All of lore.kernel.org
 help / color / mirror / Atom feed
From: Naohiro Aota <naohiro.aota@wdc.com>
To: linux-btrfs@vger.kernel.org, David Sterba <dsterba@suse.com>
Cc: Chris Mason <clm@fb.com>, Josef Bacik <josef@toxicpanda.com>,
	Hannes Reinecke <hare@suse.com>,
	linux-fsdevel@vger.kernel.org,
	Naohiro Aota <naohiro.aota@wdc.com>
Subject: [PATCH v7 13/39] btrfs: load zone's alloction offset
Date: Fri, 11 Sep 2020 21:32:33 +0900	[thread overview]
Message-ID: <20200911123259.3782926-14-naohiro.aota@wdc.com> (raw)
In-Reply-To: <20200911123259.3782926-1-naohiro.aota@wdc.com>

Zoned btrfs must allocate blocks at the zones' write pointer. The device's
write pointer position can be mapped to a logical address within a block
group. This commit adds "alloc_offset" to track the logical address.

This logical address is populated in btrfs_load_block-group_zone_info()
from write pointers of corresponding zones.

For now, zoned btrfs only support the SINGLE profile. Supporting non-SINGLE
profile with zone append writing is not trivial. For example, in the DUP
profile, we send a zone append writing IO to two zones on a device. The
device reply with written LBAs for the IOs. If the offsets of the returned
addresses from the beginning of the zone are different, then it results in
different logical addresses.

We need fine-grained logical to physical mapping to support such separated
physical address issue. Since it should require additional metadata type,
disable non-SINGLE profiles for now.

Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
---
 fs/btrfs/block-group.c |  15 ++++
 fs/btrfs/block-group.h |   6 ++
 fs/btrfs/zoned.c       | 153 +++++++++++++++++++++++++++++++++++++++++
 fs/btrfs/zoned.h       |   6 ++
 4 files changed, 180 insertions(+)

diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index 4ac4aacfae04..3ce685a10631 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -15,6 +15,7 @@
 #include "delalloc-space.h"
 #include "discard.h"
 #include "raid56.h"
+#include "zoned.h"
 
 /*
  * Return target flags in extended format or 0 if restripe for this chunk_type
@@ -1945,6 +1946,13 @@ static int read_one_block_group(struct btrfs_fs_info *info,
 			goto error;
 	}
 
+	ret = btrfs_load_block_group_zone_info(cache);
+	if (ret) {
+		btrfs_err(info, "failed to load zone info of bg %llu",
+			  cache->start);
+		goto error;
+	}
+
 	/*
 	 * We need to exclude the super stripes now so that the space info has
 	 * super bytes accounted for, otherwise we'll think we have more space
@@ -2148,6 +2156,13 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
 	cache->last_byte_to_unpin = (u64)-1;
 	cache->cached = BTRFS_CACHE_FINISHED;
 	cache->needs_free_space = 1;
+
+	ret = btrfs_load_block_group_zone_info(cache);
+	if (ret) {
+		btrfs_put_block_group(cache);
+		return ret;
+	}
+
 	ret = exclude_super_stripes(cache);
 	if (ret) {
 		/* We may have excluded something, so call this just in case */
diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
index adfd7583a17b..14e3043c9ce7 100644
--- a/fs/btrfs/block-group.h
+++ b/fs/btrfs/block-group.h
@@ -183,6 +183,12 @@ struct btrfs_block_group {
 
 	/* Record locked full stripes for RAID5/6 block group */
 	struct btrfs_full_stripe_locks_tree full_stripe_locks_root;
+
+	/*
+	 * Allocation offset for the block group to implement sequential
+	 * allocation. This is used only with ZONED mode enabled.
+	 */
+	u64 alloc_offset;
 };
 
 static inline u64 btrfs_block_group_end(struct btrfs_block_group *block_group)
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 916d358dea27..cc6bc45729b4 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -11,14 +11,20 @@
 #include "linux/kernel.h"
 #include <linux/slab.h>
 #include <linux/blkdev.h>
+#include <linux/sched/mm.h>
 #include "ctree.h"
 #include "volumes.h"
 #include "zoned.h"
 #include "rcu-string.h"
 #include "disk-io.h"
+#include "block-group.h"
 
 /* Maximum number of zones to report per blkdev_report_zones() call */
 #define BTRFS_REPORT_NR_ZONES   4096
+/* Invalid allocation pointer value for missing devices */
+#define WP_MISSING_DEV ((u64)-1)
+/* Pseudo write pointer value for conventional zone */
+#define WP_CONVENTIONAL ((u64)-2)
 
 static int copy_zone_info_cb(struct blk_zone *zone, unsigned int idx,
 			     void *data)
@@ -746,3 +752,150 @@ int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
 
 	return 0;
 }
+
+int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache)
+{
+	struct btrfs_fs_info *fs_info = cache->fs_info;
+	struct extent_map_tree *em_tree = &fs_info->mapping_tree;
+	struct extent_map *em;
+	struct map_lookup *map;
+	struct btrfs_device *device;
+	u64 logical = cache->start;
+	u64 length = cache->length;
+	u64 physical = 0;
+	int ret;
+	int i;
+	unsigned int nofs_flag;
+	u64 *alloc_offsets = NULL;
+	u32 num_sequential = 0, num_conventional = 0;
+
+	if (!btrfs_fs_incompat(fs_info, ZONED))
+		return 0;
+
+	/* Sanity check */
+	if (!IS_ALIGNED(length, fs_info->zone_size)) {
+		btrfs_err(fs_info, "unaligned block group at %llu + %llu",
+			  logical, length);
+		return -EIO;
+	}
+
+	/* Get the chunk mapping */
+	read_lock(&em_tree->lock);
+	em = lookup_extent_mapping(em_tree, logical, length);
+	read_unlock(&em_tree->lock);
+
+	if (!em)
+		return -EINVAL;
+
+	map = em->map_lookup;
+
+	/*
+	 * Get the zone type: if the group is mapped to a non-sequential zone,
+	 * there is no need for the allocation offset (fit allocation is OK).
+	 */
+	alloc_offsets = kcalloc(map->num_stripes, sizeof(*alloc_offsets),
+				GFP_NOFS);
+	if (!alloc_offsets) {
+		free_extent_map(em);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < map->num_stripes; i++) {
+		bool is_sequential;
+		struct blk_zone zone;
+
+		device = map->stripes[i].dev;
+		physical = map->stripes[i].physical;
+
+		if (device->bdev == NULL) {
+			alloc_offsets[i] = WP_MISSING_DEV;
+			continue;
+		}
+
+		is_sequential = btrfs_dev_is_sequential(device, physical);
+		if (is_sequential)
+			num_sequential++;
+		else
+			num_conventional++;
+
+		if (!is_sequential) {
+			alloc_offsets[i] = WP_CONVENTIONAL;
+			continue;
+		}
+
+		/*
+		 * This zone will be used for allocation, so mark this
+		 * zone non-empty.
+		 */
+		btrfs_dev_clear_zone_empty(device, physical);
+
+		/*
+		 * The group is mapped to a sequential zone. Get the zone write
+		 * pointer to determine the allocation offset within the zone.
+		 */
+		WARN_ON(!IS_ALIGNED(physical, fs_info->zone_size));
+		nofs_flag = memalloc_nofs_save();
+		ret = btrfs_get_dev_zone(device, physical, &zone);
+		memalloc_nofs_restore(nofs_flag);
+		if (ret == -EIO || ret == -EOPNOTSUPP) {
+			ret = 0;
+			alloc_offsets[i] = WP_MISSING_DEV;
+			continue;
+		} else if (ret) {
+			goto out;
+		}
+
+		switch (zone.cond) {
+		case BLK_ZONE_COND_OFFLINE:
+		case BLK_ZONE_COND_READONLY:
+			btrfs_err(fs_info, "Offline/readonly zone %llu",
+				  physical >> device->zone_info->zone_size_shift);
+			alloc_offsets[i] = WP_MISSING_DEV;
+			break;
+		case BLK_ZONE_COND_EMPTY:
+			alloc_offsets[i] = 0;
+			break;
+		case BLK_ZONE_COND_FULL:
+			alloc_offsets[i] = fs_info->zone_size;
+			break;
+		default:
+			/* Partially used zone */
+			alloc_offsets[i] =
+				((zone.wp - zone.start) << SECTOR_SHIFT);
+			break;
+		}
+	}
+
+	if (num_conventional > 0) {
+		/*
+		 * Since conventional zones does not have write pointer, we
+		 * cannot determine alloc_offset from the pointer
+		 */
+		ret = -EINVAL;
+		goto out;
+	}
+
+	switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
+	case 0: /* single */
+		cache->alloc_offset = alloc_offsets[0];
+		break;
+	case BTRFS_BLOCK_GROUP_DUP:
+	case BTRFS_BLOCK_GROUP_RAID1:
+	case BTRFS_BLOCK_GROUP_RAID0:
+	case BTRFS_BLOCK_GROUP_RAID10:
+	case BTRFS_BLOCK_GROUP_RAID5:
+	case BTRFS_BLOCK_GROUP_RAID6:
+		/* non-SINGLE profiles are not supported yet */
+	default:
+		btrfs_err(fs_info, "Unsupported profile on ZONED %s",
+			  btrfs_bg_type_to_raid_name(map->type));
+		ret = -EINVAL;
+		goto out;
+	}
+
+out:
+	kfree(alloc_offsets);
+	free_extent_map(em);
+
+	return ret;
+}
diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h
index 0be58861d922..1fd7cad19e18 100644
--- a/fs/btrfs/zoned.h
+++ b/fs/btrfs/zoned.h
@@ -45,6 +45,7 @@ u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start,
 int btrfs_reset_device_zone(struct btrfs_device *device, u64 physical,
 			    u64 length, u64 *bytes);
 int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size);
+int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache);
 #else /* CONFIG_BLK_DEV_ZONED */
 static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
 				     struct blk_zone *zone)
@@ -107,6 +108,11 @@ static inline int btrfs_ensure_empty_zones(struct btrfs_device *device,
 {
 	return 0;
 }
+static inline int btrfs_load_block_group_zone_info(
+	struct btrfs_block_group *cache)
+{
+	return 0;
+}
 #endif
 
 static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos)
-- 
2.27.0


  parent reply	other threads:[~2020-09-11 17:43 UTC|newest]

Thread overview: 51+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-11 12:32 [PATCH v7 00/39] btrfs: zoned block device support Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 01/39] btrfs: introduce ZONED feature flag Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 02/39] btrfs: Get zone information of zoned block devices Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 03/39] btrfs: Check and enable ZONED mode Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 04/39] btrfs: introduce max_zone_append_size Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 05/39] btrfs: disallow space_cache in ZONED mode Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 06/39] btrfs: disallow NODATACOW " Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 07/39] btrfs: disable fallocate " Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 08/39] btrfs: disallow mixed-bg " Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 09/39] btrfs: disallow inode_cache " Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 10/39] btrfs: implement log-structured superblock for " Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 11/39] btrfs: implement zoned chunk allocator Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 12/39] btrfs: verify device extent is aligned to zone Naohiro Aota
2020-09-11 12:32 ` Naohiro Aota [this message]
2020-09-11 12:32 ` [PATCH v7 14/39] btrfs: emulate write pointer for conventional zones Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 15/39] btrfs: track unusable bytes for zones Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 16/39] btrfs: do sequential extent allocation in ZONED mode Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 17/39] btrfs: reset zones of unused block groups Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 18/39] btrfs: redirty released extent buffers in ZONED mode Naohiro Aota
2020-09-14 11:48   ` Johannes Thumshirn
2020-09-11 12:32 ` [PATCH v7 19/39] btrfs: limit bio size under max_zone_append_size Naohiro Aota
2020-09-11 14:17   ` Christoph Hellwig
2020-09-12  4:14     ` Naohiro Aota
2020-09-12  5:30       ` Christoph Hellwig
2020-09-17  5:32         ` Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 20/39] btrfs: limit ordered extent size to max_zone_append_size Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 21/39] btrfs: extend btrfs_rmap_block for specifying a device Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 22/39] btrfs: use ZONE_APPEND write for ZONED btrfs Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 23/39] btrfs: handle REQ_OP_ZONE_APPEND as writing Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 24/39] btrfs: enable zone append writing for direct IO Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 25/39] btrfs: introduce dedicated data write path for ZONED mode Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 26/39] btrfs: serialize meta IOs on " Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 27/39] btrfs: wait existing extents before truncating Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 28/39] btrfs: avoid async metadata checksum on ZONED mode Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 29/39] btrfs: mark block groups to copy for device-replace Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 30/39] btrfs: implement cloning for ZONED device-replace Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 31/39] btrfs: implement copying " Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 32/39] btrfs: support dev-replace in ZONED mode Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 33/39] btrfs: enable relocation " Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 34/39] btrfs: relocate block group to repair IO failure in ZONED Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 35/39] btrfs: split alloc_log_tree() Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 36/39] btrfs: extend zoned allocator to use dedicated tree-log block group Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 37/39] btrfs: serialize log transaction on ZONED mode Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 38/39] btrfs: reorder log node allocation Naohiro Aota
2020-09-11 12:32 ` [PATCH v7 39/39] btrfs: enable to mount ZONED incompat flag Naohiro Aota
2020-09-15  8:09 ` [PATCH v7 00/39] btrfs: zoned block device support David Sterba
2020-09-16 17:42   ` Johannes Thumshirn
2020-09-16 19:46     ` David Sterba
2020-09-16 19:50       ` Johannes Thumshirn
2020-09-17  5:40     ` Naohiro Aota
2020-09-17  7:14       ` Johannes Thumshirn

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200911123259.3782926-14-naohiro.aota@wdc.com \
    --to=naohiro.aota@wdc.com \
    --cc=clm@fb.com \
    --cc=dsterba@suse.com \
    --cc=hare@suse.com \
    --cc=josef@toxicpanda.com \
    --cc=linux-btrfs@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.