All of lore.kernel.org
 help / color / mirror / Atom feed
From: Naohiro Aota <naohiro.aota@wdc.com>
To: David Sterba <dsterba@suse.com>
Cc: linux-btrfs@vger.kernel.org, Naohiro Aota <naohiro.aota@wdc.com>
Subject: [PATCH 06/12] btrfs-progs: factor out create_chunk()
Date: Tue,  6 Apr 2021 17:05:48 +0900	[thread overview]
Message-ID: <d4ff8e93e92baf64b28e02e129ba084ac7032663.1617694997.git.naohiro.aota@wdc.com> (raw)
In-Reply-To: <cover.1617694997.git.naohiro.aota@wdc.com>

Factor out create_chunk() from btrfs_alloc_chunk(). This new function
creates a chunk.

There is no functional changes.

Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
---
 kernel-shared/volumes.c | 217 ++++++++++++++++++++++------------------
 1 file changed, 120 insertions(+), 97 deletions(-)

diff --git a/kernel-shared/volumes.c b/kernel-shared/volumes.c
index 95b42eab846d..a409dd3d0366 100644
--- a/kernel-shared/volumes.c
+++ b/kernel-shared/volumes.c
@@ -149,6 +149,7 @@ const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
 };
 
 struct alloc_chunk_ctl {
+	u64 start;
 	u64 type;
 	int num_stripes;
 	int max_stripes;
@@ -156,6 +157,7 @@ struct alloc_chunk_ctl {
 	int sub_stripes;
 	u64 calc_size;
 	u64 min_stripe_size;
+	u64 num_bytes;
 	u64 max_chunk_size;
 	int stripe_len;
 	int total_devs;
@@ -1118,88 +1120,23 @@ static int decide_stripe_size(struct btrfs_fs_info *info,
 	}
 }
 
-int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
-		      struct btrfs_fs_info *info, u64 *start,
-		      u64 *num_bytes, u64 type)
+static int create_chunk(struct btrfs_trans_handle *trans,
+			struct btrfs_fs_info *info, struct alloc_chunk_ctl *ctl,
+			struct list_head *private_devs)
 {
-	u64 dev_offset;
 	struct btrfs_root *extent_root = info->extent_root;
 	struct btrfs_root *chunk_root = info->chunk_root;
 	struct btrfs_stripe *stripes;
 	struct btrfs_device *device = NULL;
 	struct btrfs_chunk *chunk;
-	struct list_head private_devs;
 	struct list_head *dev_list = &info->fs_devices->devices;
 	struct list_head *cur;
 	struct map_lookup *map;
-	u64 min_free;
-	u64 avail = 0;
-	u64 max_avail = 0;
-	struct alloc_chunk_ctl ctl;
-	int looped = 0;
 	int ret;
 	int index;
 	struct btrfs_key key;
 	u64 offset;
 
-	if (list_empty(dev_list)) {
-		return -ENOSPC;
-	}
-
-	ctl.type = type;
-	init_alloc_chunk_ctl(info, &ctl);
-	if (ctl.num_stripes < ctl.min_stripes)
-		return -ENOSPC;
-
-again:
-	ret = decide_stripe_size(info, &ctl);
-	if (ret < 0)
-		return ret;
-
-	INIT_LIST_HEAD(&private_devs);
-	cur = dev_list->next;
-	index = 0;
-
-	if (type & BTRFS_BLOCK_GROUP_DUP)
-		min_free = ctl.calc_size * 2;
-	else
-		min_free = ctl.calc_size;
-
-	/* build a private list of devices we will allocate from */
-	while(index < ctl.num_stripes) {
-		device = list_entry(cur, struct btrfs_device, dev_list);
-		ret = btrfs_device_avail_bytes(trans, device, &avail);
-		if (ret)
-			return ret;
-		cur = cur->next;
-		if (avail >= min_free) {
-			list_move(&device->dev_list, &private_devs);
-			index++;
-			if (type & BTRFS_BLOCK_GROUP_DUP)
-				index++;
-		} else if (avail > max_avail)
-			max_avail = avail;
-		if (cur == dev_list)
-			break;
-	}
-	if (index < ctl.num_stripes) {
-		list_splice(&private_devs, dev_list);
-		if (index >= ctl.min_stripes) {
-			ctl.num_stripes = index;
-			if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
-				ctl.num_stripes /= ctl.sub_stripes;
-				ctl.num_stripes *= ctl.sub_stripes;
-			}
-			looped = 1;
-			goto again;
-		}
-		if (!looped && max_avail > 0) {
-			looped = 1;
-			ctl.calc_size = max_avail;
-			goto again;
-		}
-		return -ENOSPC;
-	}
 	ret = find_next_chunk(info, &offset);
 	if (ret)
 		return ret;
@@ -1207,36 +1144,38 @@ again:
 	key.type = BTRFS_CHUNK_ITEM_KEY;
 	key.offset = offset;
 
-	chunk = kmalloc(btrfs_chunk_item_size(ctl.num_stripes), GFP_NOFS);
+	chunk = kmalloc(btrfs_chunk_item_size(ctl->num_stripes), GFP_NOFS);
 	if (!chunk)
 		return -ENOMEM;
 
-	map = kmalloc(btrfs_map_lookup_size(ctl.num_stripes), GFP_NOFS);
+	map = kmalloc(btrfs_map_lookup_size(ctl->num_stripes), GFP_NOFS);
 	if (!map) {
 		kfree(chunk);
 		return -ENOMEM;
 	}
 
 	stripes = &chunk->stripe;
-	*num_bytes = chunk_bytes_by_type(type, ctl.calc_size, &ctl);
+	ctl->num_bytes = chunk_bytes_by_type(ctl->type, ctl->calc_size, ctl);
 	index = 0;
-	while(index < ctl.num_stripes) {
+	while (index < ctl->num_stripes) {
+		u64 dev_offset;
 		struct btrfs_stripe *stripe;
-		BUG_ON(list_empty(&private_devs));
-		cur = private_devs.next;
+
+		BUG_ON(list_empty(private_devs));
+		cur = private_devs->next;
 		device = list_entry(cur, struct btrfs_device, dev_list);
 
 		/* loop over this device again if we're doing a dup group */
-		if (!(type & BTRFS_BLOCK_GROUP_DUP) ||
-		    (index == ctl.num_stripes - 1))
+		if (!(ctl->type & BTRFS_BLOCK_GROUP_DUP) ||
+		    (index == ctl->num_stripes - 1))
 			list_move(&device->dev_list, dev_list);
 
 		ret = btrfs_alloc_dev_extent(trans, device, key.offset,
-			     ctl.calc_size, &dev_offset);
+			     ctl->calc_size, &dev_offset);
 		if (ret < 0)
 			goto out_chunk_map;
 
-		device->bytes_used += ctl.calc_size;
+		device->bytes_used += ctl->calc_size;
 		ret = btrfs_update_device(trans, device);
 		if (ret < 0)
 			goto out_chunk_map;
@@ -1249,41 +1188,41 @@ again:
 		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
 		index++;
 	}
-	BUG_ON(!list_empty(&private_devs));
+	BUG_ON(!list_empty(private_devs));
 
 	/* key was set above */
-	btrfs_set_stack_chunk_length(chunk, *num_bytes);
+	btrfs_set_stack_chunk_length(chunk, ctl->num_bytes);
 	btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
-	btrfs_set_stack_chunk_stripe_len(chunk, ctl.stripe_len);
-	btrfs_set_stack_chunk_type(chunk, type);
-	btrfs_set_stack_chunk_num_stripes(chunk, ctl.num_stripes);
-	btrfs_set_stack_chunk_io_align(chunk, ctl.stripe_len);
-	btrfs_set_stack_chunk_io_width(chunk, ctl.stripe_len);
+	btrfs_set_stack_chunk_stripe_len(chunk, ctl->stripe_len);
+	btrfs_set_stack_chunk_type(chunk, ctl->type);
+	btrfs_set_stack_chunk_num_stripes(chunk, ctl->num_stripes);
+	btrfs_set_stack_chunk_io_align(chunk, ctl->stripe_len);
+	btrfs_set_stack_chunk_io_width(chunk, ctl->stripe_len);
 	btrfs_set_stack_chunk_sector_size(chunk, info->sectorsize);
-	btrfs_set_stack_chunk_sub_stripes(chunk, ctl.sub_stripes);
+	btrfs_set_stack_chunk_sub_stripes(chunk, ctl->sub_stripes);
 	map->sector_size = info->sectorsize;
-	map->stripe_len = ctl.stripe_len;
-	map->io_align = ctl.stripe_len;
-	map->io_width = ctl.stripe_len;
-	map->type = type;
-	map->num_stripes = ctl.num_stripes;
-	map->sub_stripes = ctl.sub_stripes;
+	map->stripe_len = ctl->stripe_len;
+	map->io_align = ctl->stripe_len;
+	map->io_width = ctl->stripe_len;
+	map->type = ctl->type;
+	map->num_stripes = ctl->num_stripes;
+	map->sub_stripes = ctl->sub_stripes;
 
 	ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
-				btrfs_chunk_item_size(ctl.num_stripes));
+				btrfs_chunk_item_size(ctl->num_stripes));
 	BUG_ON(ret);
-	*start = key.offset;;
+	ctl->start = key.offset;
 
 	map->ce.start = key.offset;
-	map->ce.size = *num_bytes;
+	map->ce.size = ctl->num_bytes;
 
 	ret = insert_cache_extent(&info->mapping_tree.cache_tree, &map->ce);
 	if (ret < 0)
 		goto out_chunk_map;
 
-	if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
+	if (ctl->type & BTRFS_BLOCK_GROUP_SYSTEM) {
 		ret = btrfs_add_system_chunk(info, &key,
-			    chunk, btrfs_chunk_item_size(ctl.num_stripes));
+			    chunk, btrfs_chunk_item_size(ctl->num_stripes));
 		if (ret < 0)
 			goto out_chunk;
 	}
@@ -1298,6 +1237,90 @@ out_chunk:
 	return ret;
 }
 
+int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
+		      struct btrfs_fs_info *info, u64 *start,
+		      u64 *num_bytes, u64 type)
+{
+	struct btrfs_device *device = NULL;
+	struct list_head private_devs;
+	struct list_head *dev_list = &info->fs_devices->devices;
+	struct list_head *cur;
+	u64 min_free;
+	u64 avail = 0;
+	u64 max_avail = 0;
+	struct alloc_chunk_ctl ctl;
+	int looped = 0;
+	int ret;
+	int index;
+
+	if (list_empty(dev_list))
+		return -ENOSPC;
+
+	ctl.type = type;
+	/* start and num_bytes will be set by create_chunk() */
+	ctl.start = 0;
+	ctl.num_bytes = 0;
+	init_alloc_chunk_ctl(info, &ctl);
+	if (ctl.num_stripes < ctl.min_stripes)
+		return -ENOSPC;
+
+again:
+	ret = decide_stripe_size(info, &ctl);
+	if (ret < 0)
+		return ret;
+
+	INIT_LIST_HEAD(&private_devs);
+	cur = dev_list->next;
+	index = 0;
+
+	if (type & BTRFS_BLOCK_GROUP_DUP)
+		min_free = ctl.calc_size * 2;
+	else
+		min_free = ctl.calc_size;
+
+	/* build a private list of devices we will allocate from */
+	while (index < ctl.num_stripes) {
+		device = list_entry(cur, struct btrfs_device, dev_list);
+		ret = btrfs_device_avail_bytes(trans, device, &avail);
+		if (ret)
+			return ret;
+		cur = cur->next;
+		if (avail >= min_free) {
+			list_move(&device->dev_list, &private_devs);
+			index++;
+			if (type & BTRFS_BLOCK_GROUP_DUP)
+				index++;
+		} else if (avail > max_avail)
+			max_avail = avail;
+		if (cur == dev_list)
+			break;
+	}
+	if (index < ctl.num_stripes) {
+		list_splice(&private_devs, dev_list);
+		if (index >= ctl.min_stripes) {
+			ctl.num_stripes = index;
+			if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
+				ctl.num_stripes /= ctl.sub_stripes;
+				ctl.num_stripes *= ctl.sub_stripes;
+			}
+			looped = 1;
+			goto again;
+		}
+		if (!looped && max_avail > 0) {
+			looped = 1;
+			ctl.calc_size = max_avail;
+			goto again;
+		}
+		return -ENOSPC;
+	}
+
+	ret = create_chunk(trans, info, &ctl, &private_devs);
+	*start = ctl.start;
+	*num_bytes = ctl.num_bytes;
+
+	return ret;
+}
+
 /*
  * Alloc a DATA chunk with SINGLE profile.
  *
-- 
2.31.1


  parent reply	other threads:[~2021-04-06  8:08 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-06  8:05 [PATCH 00/12] btrfs-progs: refactor and generalize chunk/dev_extent allocation Naohiro Aota
2021-04-06  8:05 ` [PATCH 01/12] btrfs-progs: introduce chunk allocation policy Naohiro Aota
2021-04-06  8:05 ` [PATCH 02/12] btrfs-progs: refactor find_free_dev_extent_start() Naohiro Aota
2021-04-06  8:05 ` [PATCH 03/12] btrfs-progs: convert type of alloc_chunk_ctl::type Naohiro Aota
2021-04-06  8:05 ` [PATCH 04/12] btrfs-progs: consolidate parameter initialization of regular allocator Naohiro Aota
2021-04-06  8:05 ` [PATCH 05/12] btrfs-progs: factor out decide_stripe_size() Naohiro Aota
2021-04-06  8:05 ` Naohiro Aota [this message]
2021-04-06  8:05 ` [PATCH 07/12] btrfs-progs: rewrite btrfs_alloc_data_chunk() using create_chunk() Naohiro Aota
2021-04-06  8:05 ` [PATCH 08/12] btrfs-progs: fix to use half the available space for DUP profile Naohiro Aota
2021-04-06  8:05 ` [PATCH 09/12] btrfs-progs: use round_down for allocation calcs Naohiro Aota
2021-04-06  8:05 ` [PATCH 10/12] btrfs-progs: drop alloc_chunk_ctl::stripe_len Naohiro Aota
2021-04-06  8:05 ` [PATCH 11/12] btrfs-progs: simplify arguments of chunk_bytes_by_type() Naohiro Aota
2021-04-06  8:05 ` [PATCH 12/12] btrfs-progs: rename calc_size to stripe_size Naohiro Aota
2021-04-06  8:28 ` [PATCH 00/12] btrfs-progs: refactor and generalize chunk/dev_extent allocation Johannes Thumshirn
2021-04-06 10:54 ` Su Yue
2021-04-06 13:24   ` Naohiro Aota
2021-04-06 14:40     ` Su Yue
2021-04-29 14:20 ` David Sterba

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=d4ff8e93e92baf64b28e02e129ba084ac7032663.1617694997.git.naohiro.aota@wdc.com \
    --to=naohiro.aota@wdc.com \
    --cc=dsterba@suse.com \
    --cc=linux-btrfs@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.