All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/5] btrfs: code cleanup
@ 2019-12-10  7:13 Sebastian
  2019-12-10  7:13 ` [PATCH 1/5] fs_btrfs_sysfs: " Sebastian
                   ` (5 more replies)
  0 siblings, 6 replies; 7+ messages in thread
From: Sebastian @ 2019-12-10  7:13 UTC (permalink / raw)
  To: dsterba; +Cc: josef, clm, linux-btrfs, linux-kernel, Sebastian Scherbel

From: Sebastian Scherbel <sebastian.scherbel@fau.de>

This patch series changes several instances in btrfs where the coding style
is not in line with the Linux kernel guidelines to improve readability.

Sebastian Scherbel (5):
  fs_btrfs_sysfs: code cleanup
  fs_btrfs_struct-funcs: code cleanup
  fs_btrfs_ref-verify: code cleanup
  fs_btrfs_qgroup: code cleanup
  fs_btrfs_block-group: code cleanup

 fs/btrfs/block-group.c  | 21 ++++++++++------
 fs/btrfs/block-group.h  |  8 +++---
 fs/btrfs/qgroup.c       | 54 +++++++++++++++++++++++------------------
 fs/btrfs/qgroup.h       | 12 ++++-----
 fs/btrfs/ref-verify.c   |  6 ++---
 fs/btrfs/struct-funcs.c |  5 ++--
 fs/btrfs/sysfs.c        | 33 +++++++++++++++++++------
 fs/btrfs/sysfs.h        |  5 ++--
 8 files changed, 89 insertions(+), 55 deletions(-)

-- 
2.20.1


^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 1/5] fs_btrfs_sysfs: code cleanup
  2019-12-10  7:13 [PATCH 0/5] btrfs: code cleanup Sebastian
@ 2019-12-10  7:13 ` Sebastian
  2019-12-10  7:13 ` [PATCH 2/5] fs_btrfs_struct-funcs: " Sebastian
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 7+ messages in thread
From: Sebastian @ 2019-12-10  7:13 UTC (permalink / raw)
  To: dsterba
  Cc: josef, clm, linux-btrfs, linux-kernel, Sebastian Scherbel, Ole Wiedemann

From: Sebastian Scherbel <sebastian.scherbel@fau.de>

This patch changes several instances in sysfs where the coding style is not
in line with the Linux kernel guidelines to improve readability.

1. Symbolic permissions like 'S_IRUGO' are not preferred, they are
converted into their octal representation
2. lines with more than 80 characters are broken into sensible chunks,
unless exceeding the limit significantly increases readability
3. missing blank lines after declerations are added
4. tabs are used for indentations where possible

Signed-off-by: Sebastian Scherbel <sebastian.scherbel@fau.de>
Co-developed-by: Ole Wiedemann <ole.wiedemann@fau.de>
Signed-off-by: Ole Wiedemann <ole.wiedemann@fau.de>
---
 fs/btrfs/sysfs.c | 33 ++++++++++++++++++++++++++-------
 fs/btrfs/sysfs.h |  5 +++--
 2 files changed, 29 insertions(+), 9 deletions(-)

diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index 5ebbe8a5ee76..30221dfb7f5c 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -51,7 +51,7 @@ struct raid_kobject {
 
 #define BTRFS_FEAT_ATTR(_name, _feature_set, _feature_prefix, _feature_bit)  \
 static struct btrfs_feature_attr btrfs_attr_features_##_name = {	     \
-	.kobj_attr = __INIT_KOBJ_ATTR(_name, S_IRUGO,			     \
+	.kobj_attr = __INIT_KOBJ_ATTR(_name, 0444,			     \
 				      btrfs_feature_attr_show,		     \
 				      btrfs_feature_attr_store),	     \
 	.feature_set	= _feature_set,					     \
@@ -90,6 +90,7 @@ static u64 get_features(struct btrfs_fs_info *fs_info,
 			enum btrfs_feature_set set)
 {
 	struct btrfs_super_block *disk_super = fs_info->super_copy;
+
 	if (set == FEAT_COMPAT)
 		return btrfs_super_compat_flags(disk_super);
 	else if (set == FEAT_COMPAT_RO)
@@ -102,6 +103,7 @@ static void set_features(struct btrfs_fs_info *fs_info,
 			 enum btrfs_feature_set set, u64 features)
 {
 	struct btrfs_super_block *disk_super = fs_info->super_copy;
+
 	if (set == FEAT_COMPAT)
 		btrfs_set_super_compat_flags(disk_super, features);
 	else if (set == FEAT_COMPAT_RO)
@@ -114,6 +116,7 @@ static int can_modify_feature(struct btrfs_feature_attr *fa)
 {
 	int val = 0;
 	u64 set, clear;
+
 	switch (fa->feature_set) {
 	case FEAT_COMPAT:
 		set = BTRFS_FEATURE_COMPAT_SAFE_SET;
@@ -147,8 +150,10 @@ static ssize_t btrfs_feature_attr_show(struct kobject *kobj,
 	int val = 0;
 	struct btrfs_fs_info *fs_info = to_fs_info(kobj);
 	struct btrfs_feature_attr *fa = to_btrfs_feature_attr(a);
+
 	if (fs_info) {
 		u64 features = get_features(fs_info, fa->feature_set);
+
 		if (features & fa->feature_bit)
 			val = 1;
 	} else
@@ -239,7 +244,7 @@ static umode_t btrfs_feature_visible(struct kobject *kobj,
 		features = get_features(fs_info, fa->feature_set);
 
 		if (can_modify_feature(fa))
-			mode |= S_IWUSR;
+			mode |= 0200;
 		else if (!(features & fa->feature_bit))
 			mode = 0;
 	}
@@ -358,6 +363,7 @@ static const struct attribute_group btrfs_debug_feature_attr_group = {
 static ssize_t btrfs_show_u64(u64 *value_ptr, spinlock_t *lock, char *buf)
 {
 	u64 val;
+
 	if (lock)
 		spin_lock(lock);
 	val = *value_ptr;
@@ -371,6 +377,7 @@ static ssize_t global_rsv_size_show(struct kobject *kobj,
 {
 	struct btrfs_fs_info *fs_info = to_fs_info(kobj->parent);
 	struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
+
 	return btrfs_show_u64(&block_rsv->size, &block_rsv->lock, buf);
 }
 BTRFS_ATTR(allocation, global_rsv_size, global_rsv_size_show);
@@ -380,6 +387,7 @@ static ssize_t global_rsv_reserved_show(struct kobject *kobj,
 {
 	struct btrfs_fs_info *fs_info = to_fs_info(kobj->parent);
 	struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
+
 	return btrfs_show_u64(&block_rsv->reserved, &block_rsv->lock, buf);
 }
 BTRFS_ATTR(allocation, global_rsv_reserved, global_rsv_reserved_show);
@@ -436,6 +444,7 @@ static ssize_t btrfs_space_info_show_##field(struct kobject *kobj,	\
 					     char *buf)			\
 {									\
 	struct btrfs_space_info *sinfo = to_space_info(kobj);		\
+									\
 	return btrfs_show_u64(&sinfo->field, &sinfo->lock, buf);	\
 }									\
 BTRFS_ATTR(space_info, field, btrfs_space_info_show_##field)
@@ -446,6 +455,7 @@ static ssize_t btrfs_space_info_show_total_bytes_pinned(struct kobject *kobj,
 {
 	struct btrfs_space_info *sinfo = to_space_info(kobj);
 	s64 val = percpu_counter_sum(&sinfo->total_bytes_pinned);
+
 	return snprintf(buf, PAGE_SIZE, "%lld\n", val);
 }
 
@@ -479,6 +489,7 @@ ATTRIBUTE_GROUPS(space_info);
 static void space_info_release(struct kobject *kobj)
 {
 	struct btrfs_space_info *sinfo = to_space_info(kobj);
+
 	percpu_counter_destroy(&sinfo->total_bytes_pinned);
 	kfree(sinfo);
 }
@@ -682,8 +693,10 @@ static inline struct btrfs_fs_info *to_fs_info(struct kobject *kobj)
 
 #define NUM_FEATURE_BITS 64
 #define BTRFS_FEATURE_NAME_MAX 13
-static char btrfs_unknown_feature_names[FEAT_MAX][NUM_FEATURE_BITS][BTRFS_FEATURE_NAME_MAX];
-static struct btrfs_feature_attr btrfs_feature_attrs[FEAT_MAX][NUM_FEATURE_BITS];
+static char btrfs_unknown_feature_names[FEAT_MAX][NUM_FEATURE_BITS]
+				       [BTRFS_FEATURE_NAME_MAX];
+static struct btrfs_feature_attr btrfs_feature_attrs[FEAT_MAX]
+						    [NUM_FEATURE_BITS];
 
 static const u64 supported_feature_masks[FEAT_MAX] = {
 	[FEAT_COMPAT]    = BTRFS_FEATURE_COMPAT_SUPP,
@@ -703,6 +716,7 @@ static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add)
 			.attrs = attrs,
 		};
 		u64 features = get_features(fs_info, set);
+
 		features &= ~supported_feature_masks[set];
 
 		if (!features)
@@ -719,6 +733,7 @@ static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add)
 			attrs[0] = &fa->kobj_attr.attr;
 			if (add) {
 				int ret;
+
 				ret = sysfs_merge_group(&fs_info->fs_devices->fsid_kobj,
 							&agroup);
 				if (ret)
@@ -772,7 +787,8 @@ void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info)
 		kobject_put(fs_info->space_info_kobj);
 	}
 	addrm_unknown_feature_attrs(fs_info, false);
-	sysfs_remove_group(&fs_info->fs_devices->fsid_kobj, &btrfs_feature_attr_group);
+	sysfs_remove_group(&fs_info->fs_devices->fsid_kobj,
+			   &btrfs_feature_attr_group);
 	sysfs_remove_files(&fs_info->fs_devices->fsid_kobj, btrfs_attrs);
 	btrfs_sysfs_rm_device_link(fs_info->fs_devices, NULL);
 }
@@ -831,6 +847,7 @@ static void init_feature_attrs(void)
 		struct btrfs_feature_attr *sfa;
 		struct attribute *a = btrfs_supported_feature_attrs[i];
 		int bit;
+
 		sfa = attr_to_btrfs_feature_attr(a);
 		bit = ilog2(sfa->feature_bit);
 		fa = &btrfs_feature_attrs[sfa->feature_set][bit];
@@ -841,6 +858,7 @@ static void init_feature_attrs(void)
 	for (set = 0; set < FEAT_MAX; set++) {
 		for (i = 0; i < ARRAY_SIZE(btrfs_feature_attrs[set]); i++) {
 			char *name = btrfs_unknown_feature_names[set][i];
+
 			fa = &btrfs_feature_attrs[set][i];
 
 			if (fa->kobj_attr.attr.name)
@@ -850,7 +868,7 @@ static void init_feature_attrs(void)
 				 btrfs_feature_set_names[set], i);
 
 			fa->kobj_attr.attr.name = name;
-			fa->kobj_attr.attr.mode = S_IRUGO;
+			fa->kobj_attr.attr.mode = 0444;
 			fa->feature_set = set;
 			fa->feature_bit = 1ULL << i;
 		}
@@ -1189,7 +1207,8 @@ int __init btrfs_init_sysfs(void)
 		goto out_remove_group;
 
 #ifdef CONFIG_BTRFS_DEBUG
-	ret = sysfs_create_group(&btrfs_kset->kobj, &btrfs_debug_feature_attr_group);
+	ret = sysfs_create_group(&btrfs_kset->kobj,
+				 &btrfs_debug_feature_attr_group);
 	if (ret)
 		goto out2;
 #endif
diff --git a/fs/btrfs/sysfs.h b/fs/btrfs/sysfs.h
index e10c3adfc30f..7c2222d7046e 100644
--- a/fs/btrfs/sysfs.h
+++ b/fs/btrfs/sysfs.h
@@ -17,7 +17,7 @@ const char * const btrfs_feature_set_name(enum btrfs_feature_set set);
 int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices,
 		struct btrfs_device *one_device);
 int btrfs_sysfs_rm_device_link(struct btrfs_fs_devices *fs_devices,
-                struct btrfs_device *one_device);
+		struct btrfs_device *one_device);
 int btrfs_sysfs_add_fsid(struct btrfs_fs_devices *fs_devs,
 				struct kobject *parent);
 int btrfs_sysfs_add_device(struct btrfs_fs_devices *fs_devs);
@@ -26,7 +26,8 @@ void btrfs_sysfs_update_sprout_fsid(struct btrfs_fs_devices *fs_devices,
 				    const u8 *fsid);
 void btrfs_sysfs_feature_update(struct btrfs_fs_info *fs_info,
 		u64 bit, enum btrfs_feature_set set);
-void btrfs_kobject_uevent(struct block_device *bdev, enum kobject_action action);
+void btrfs_kobject_uevent(struct block_device *bdev,
+			  enum kobject_action action);
 
 int __init btrfs_init_sysfs(void);
 void __cold btrfs_exit_sysfs(void);
-- 
2.20.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 2/5] fs_btrfs_struct-funcs: code cleanup
  2019-12-10  7:13 [PATCH 0/5] btrfs: code cleanup Sebastian
  2019-12-10  7:13 ` [PATCH 1/5] fs_btrfs_sysfs: " Sebastian
@ 2019-12-10  7:13 ` Sebastian
  2019-12-10  7:13 ` [PATCH 3/5] fs_btrfs_ref-verify: " Sebastian
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 7+ messages in thread
From: Sebastian @ 2019-12-10  7:13 UTC (permalink / raw)
  To: dsterba
  Cc: josef, clm, linux-btrfs, linux-kernel, Sebastian Scherbel, Ole Wiedemann

From: Sebastian Scherbel <sebastian.scherbel@fau.de>

This patch changes several instances in struct-funcs where the coding style
is not in line with the Linux kernel guidelines to improve readability.

1. missing blank lines after decleration are added
2. tabs are used for indentations where possible

Signed-off-by: Sebastian Scherbel <sebastian.scherbel@fau.de>
Co-developed-by: Ole Wiedemann <ole.wiedemann@fau.de>
Signed-off-by: Ole Wiedemann <ole.wiedemann@fau.de>
---
 fs/btrfs/struct-funcs.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c
index 73f7987143df..4f63e69c5387 100644
--- a/fs/btrfs/struct-funcs.c
+++ b/fs/btrfs/struct-funcs.c
@@ -9,12 +9,12 @@
 
 static inline u8 get_unaligned_le8(const void *p)
 {
-       return *(u8 *)p;
+	return *(u8 *)p;
 }
 
 static inline void put_unaligned_le8(u8 val, void *p)
 {
-       *(u8 *)p = val;
+	*(u8 *)p = val;
 }
 
 /*
@@ -173,6 +173,7 @@ void btrfs_node_key(const struct extent_buffer *eb,
 		    struct btrfs_disk_key *disk_key, int nr)
 {
 	unsigned long ptr = btrfs_node_key_ptr_offset(nr);
+
 	read_eb_member(eb, (struct btrfs_key_ptr *)ptr,
 		       struct btrfs_key_ptr, key, disk_key);
 }
-- 
2.20.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 3/5] fs_btrfs_ref-verify: code cleanup
  2019-12-10  7:13 [PATCH 0/5] btrfs: code cleanup Sebastian
  2019-12-10  7:13 ` [PATCH 1/5] fs_btrfs_sysfs: " Sebastian
  2019-12-10  7:13 ` [PATCH 2/5] fs_btrfs_struct-funcs: " Sebastian
@ 2019-12-10  7:13 ` Sebastian
  2019-12-10  7:13 ` [PATCH 4/5] fs_btrfs_qgroup: " Sebastian
                   ` (2 subsequent siblings)
  5 siblings, 0 replies; 7+ messages in thread
From: Sebastian @ 2019-12-10  7:13 UTC (permalink / raw)
  To: dsterba
  Cc: josef, clm, linux-btrfs, linux-kernel, Sebastian Scherbel, Ole Wiedemann

From: Sebastian Scherbel <sebastian.scherbel@fau.de>

This patch changes several instances in ref-verify where the coding style
is not in line with the Linux kernel guidelines to improve readability.

1. inline keyword moved between storage class and type
2. missing space before the open parenthesis added

Signed-off-by: Sebastian Scherbel <sebastian.scherbel@fau.de>
Co-developed-by: Ole Wiedemann <ole.wiedemann@fau.de>
Signed-off-by: Ole Wiedemann <ole.wiedemann@fau.de>
---
 fs/btrfs/ref-verify.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
index b57f3618e58e..be735e774d3a 100644
--- a/fs/btrfs/ref-verify.c
+++ b/fs/btrfs/ref-verify.c
@@ -218,11 +218,11 @@ static void __print_stack_trace(struct btrfs_fs_info *fs_info,
 	stack_trace_print(ra->trace, ra->trace_len, 2);
 }
 #else
-static void inline __save_stack_trace(struct ref_action *ra)
+static inline void __save_stack_trace(struct ref_action *ra)
 {
 }
 
-static void inline __print_stack_trace(struct btrfs_fs_info *fs_info,
+static inline void __print_stack_trace(struct btrfs_fs_info *fs_info,
 				       struct ref_action *ra)
 {
 	btrfs_err(fs_info, "  ref-verify: no stacktrace support");
@@ -242,7 +242,7 @@ static void free_block_entry(struct block_entry *be)
 		kfree(re);
 	}
 
-	while((n = rb_first(&be->refs))) {
+	while ((n = rb_first(&be->refs))) {
 		ref = rb_entry(n, struct ref_entry, node);
 		rb_erase(&ref->node, &be->refs);
 		kfree(ref);
-- 
2.20.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 4/5] fs_btrfs_qgroup: code cleanup
  2019-12-10  7:13 [PATCH 0/5] btrfs: code cleanup Sebastian
                   ` (2 preceding siblings ...)
  2019-12-10  7:13 ` [PATCH 3/5] fs_btrfs_ref-verify: " Sebastian
@ 2019-12-10  7:13 ` Sebastian
  2019-12-10  7:13 ` [PATCH 5/5] fs_btrfs_block-group: " Sebastian
  2019-12-10 12:19 ` [PATCH 0/5] btrfs: " David Sterba
  5 siblings, 0 replies; 7+ messages in thread
From: Sebastian @ 2019-12-10  7:13 UTC (permalink / raw)
  To: dsterba
  Cc: josef, clm, linux-btrfs, linux-kernel, Sebastian Scherbel, Ole Wiedemann

From: Sebastian Scherbel <sebastian.scherbel@fau.de>

This patch changes several instances in qgroup where the coding style
is not in line with the Linux kernel guidelines to improve readability.

1. block comment alignment fixed
2. unnecessary braces removed
3. consistent spacing around '-' added
4. trailing whitespace removed
5. lines with more than 80 characters are broken into sensible chunks,
unless exceeding the limit significantly increases readability
6. missing blank lines after declerations are added
7. tabs are used for indentations where possible

Signed-off-by: Sebastian Scherbel <sebastian.scherbel@fau.de>
Co-developed-by: Ole Wiedemann <ole.wiedemann@fau.de>
Signed-off-by: Ole Wiedemann <ole.wiedemann@fau.de>
---
 fs/btrfs/qgroup.c | 54 +++++++++++++++++++++++++++--------------------
 fs/btrfs/qgroup.h | 12 +++++------
 2 files changed, 37 insertions(+), 29 deletions(-)

diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 93aeb2e539a4..da3fbdd756ad 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -156,7 +156,7 @@ static inline u64 qgroup_to_aux(struct btrfs_qgroup *qg)
 	return (u64)(uintptr_t)qg;
 }
 
-static inline struct btrfs_qgroup* unode_aux_to_qgroup(struct ulist_node *n)
+static inline struct btrfs_qgroup *unode_aux_to_qgroup(struct ulist_node *n)
 {
 	return (struct btrfs_qgroup *)(uintptr_t)n->aux;
 }
@@ -1029,9 +1029,9 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
 
 	ret = qgroup_rescan_init(fs_info, 0, 1);
 	if (!ret) {
-	        qgroup_rescan_zero_tracking(fs_info);
-	        btrfs_queue_work(fs_info->qgroup_rescan_workers,
-	                         &fs_info->qgroup_rescan_work);
+		qgroup_rescan_zero_tracking(fs_info);
+		btrfs_queue_work(fs_info->qgroup_rescan_workers,
+				 &fs_info->qgroup_rescan_work);
 	}
 
 out_free_path:
@@ -1191,7 +1191,8 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
 		/* Add any parents of the parents */
 		list_for_each_entry(glist, &qgroup->groups, next_group) {
 			ret = ulist_add(tmp, glist->group->qgroupid,
-					qgroup_to_aux(glist->group), GFP_ATOMIC);
+					qgroup_to_aux(glist->group),
+					GFP_ATOMIC);
 			if (ret < 0)
 				goto out;
 		}
@@ -1781,7 +1782,7 @@ static int adjust_slots_upwards(struct btrfs_path *path, int root_level)
  * This function can free us from keeping two paths, thus later we only need
  * to care about how to iterate all new tree blocks in reloc tree.
  */
-static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
+static int qgroup_trace_extent_swap(struct btrfs_trans_handle *trans,
 				    struct extent_buffer *src_eb,
 				    struct btrfs_path *dst_path,
 				    int dst_level, int root_level,
@@ -1876,8 +1877,9 @@ static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
 	 * Now both @dst_path and @src_path have been populated, record the tree
 	 * blocks for qgroup accounting.
 	 */
-	ret = btrfs_qgroup_trace_extent(trans, src_path->nodes[dst_level]->start,
-			nodesize, GFP_NOFS);
+	ret = btrfs_qgroup_trace_extent(trans,
+					src_path->nodes[dst_level]->start,
+					nodesize, GFP_NOFS);
 	if (ret < 0)
 		goto out;
 	ret = btrfs_qgroup_trace_extent(trans,
@@ -1920,7 +1922,7 @@ static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
  * While during search, old tree blocks OO(c) will be skipped as tree block swap
  * won't affect OO(c).
  */
-static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
+static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle *trans,
 					   struct extent_buffer *src_eb,
 					   struct btrfs_path *dst_path,
 					   int cur_level, int root_level,
@@ -1963,7 +1965,7 @@ static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
 		/*
 		 * We need to get child blockptr/gen from parent before we can
 		 * read it.
-		  */
+		 */
 		eb = dst_path->nodes[cur_level + 1];
 		parent_slot = dst_path->slots[cur_level + 1];
 		child_bytenr = btrfs_node_blockptr(eb, parent_slot);
@@ -2142,7 +2144,7 @@ int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
 			/*
 			 * We need to get child blockptr/gen from parent before
 			 * we can read it.
-			  */
+			 */
 			eb = path->nodes[level + 1];
 			parent_slot = path->slots[level + 1];
 			child_bytenr = btrfs_node_blockptr(eb, parent_slot);
@@ -2228,7 +2230,8 @@ static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
 				GFP_ATOMIC);
 		if (ret < 0)
 			return ret;
-		ret = ulist_add(tmp, qg->qgroupid, qgroup_to_aux(qg), GFP_ATOMIC);
+		ret = ulist_add(tmp, qg->qgroupid, qgroup_to_aux(qg),
+				GFP_ATOMIC);
 		if (ret < 0)
 			return ret;
 		ULIST_ITER_INIT(&tmp_uiter);
@@ -2406,7 +2409,8 @@ static int maybe_fs_roots(struct ulist *roots)
 	/*
 	 * If it contains fs tree roots, then it must belong to fs/subvol
 	 * trees.
-	 * If it contains a non-fs tree, it won't be shared with fs/subvol trees.
+	 * If it contains a non-fs tree, it won't be shared with fs/subvol
+	 * trees.
 	 */
 	return is_fstree(unode->val);
 }
@@ -2587,6 +2591,7 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
 	spin_lock(&fs_info->qgroup_lock);
 	while (!list_empty(&fs_info->dirty_qgroups)) {
 		struct btrfs_qgroup *qgroup;
+
 		qgroup = list_first_entry(&fs_info->dirty_qgroups,
 					  struct btrfs_qgroup, dirty);
 		list_del_init(&qgroup->dirty);
@@ -2926,7 +2931,8 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
 		list_for_each_entry(glist, &qg->groups, next_group) {
 			ret = ulist_add(fs_info->qgroup_ulist,
 					glist->group->qgroupid,
-					qgroup_to_aux(glist->group), GFP_ATOMIC);
+					qgroup_to_aux(glist->group),
+					GFP_ATOMIC);
 			if (ret < 0)
 				goto out;
 		}
@@ -3012,7 +3018,8 @@ void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
 		list_for_each_entry(glist, &qg->groups, next_group) {
 			ret = ulist_add(fs_info->qgroup_ulist,
 					glist->group->qgroupid,
-					qgroup_to_aux(glist->group), GFP_ATOMIC);
+					qgroup_to_aux(glist->group),
+					GFP_ATOMIC);
 			if (ret < 0)
 				goto out;
 		}
@@ -3151,11 +3158,10 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
 			err = PTR_ERR(trans);
 			break;
 		}
-		if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
+		if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
 			err = -EINTR;
-		} else {
+		else
 			err = qgroup_rescan_leaf(trans, path);
-		}
 		if (err > 0)
 			btrfs_commit_transaction(trans);
 		else
@@ -3419,7 +3425,7 @@ int btrfs_qgroup_reserve_data(struct inode *inode,
 	/* Record already reserved space */
 	orig_reserved = reserved->bytes_changed;
 	ret = set_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
-			start + len -1, EXTENT_QGROUP_RESERVED, reserved);
+			start + len - 1, EXTENT_QGROUP_RESERVED, reserved);
 
 	/* Newly reserved space */
 	to_reserve = reserved->bytes_changed - orig_reserved;
@@ -3438,7 +3444,8 @@ int btrfs_qgroup_reserve_data(struct inode *inode,
 	ULIST_ITER_INIT(&uiter);
 	while ((unode = ulist_next(&reserved->range_changed, &uiter)))
 		clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val,
-				 unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL);
+				 unode->aux, EXTENT_QGROUP_RESERVED, 0, 0,
+				 NULL);
 	/* Also free data bytes of already reserved one */
 	btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid,
 				  orig_reserved, BTRFS_QGROUP_RSV_DATA);
@@ -3518,8 +3525,8 @@ static int __btrfs_qgroup_release_data(struct inode *inode,
 	if (free && reserved)
 		return qgroup_free_reserved_data(inode, reserved, start, len);
 	extent_changeset_init(&changeset);
-	ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, start, 
-			start + len -1, EXTENT_QGROUP_RESERVED, &changeset);
+	ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
+			start + len - 1, EXTENT_QGROUP_RESERVED, &changeset);
 	if (ret < 0)
 		goto out;
 
@@ -3715,7 +3722,8 @@ static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root,
 		list_for_each_entry(glist, &qg->groups, next_group) {
 			ret = ulist_add(fs_info->qgroup_ulist,
 					glist->group->qgroupid,
-					qgroup_to_aux(glist->group), GFP_ATOMIC);
+					qgroup_to_aux(glist->group),
+					GFP_ATOMIC);
 			if (ret < 0)
 				goto out;
 		}
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index 236f12224d52..8d70bf3711a2 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -146,12 +146,12 @@ struct btrfs_qgroup_swapped_block {
  *	space reserved for data
  *
  * META_PERTRANS:
- * 	Space reserved for metadata (per-transaction)
- * 	Due to the fact that qgroup data is only updated at transaction commit
- * 	time, reserved space for metadata must be kept until transaction
- * 	commits.
- * 	Any metadata reserved that are used in btrfs_start_transaction() should
- * 	be of this type.
+ *	Space reserved for metadata (per-transaction)
+ *	Due to the fact that qgroup data is only updated at transaction commit
+ *	time, reserved space for metadata must be kept until transaction
+ *	commits.
+ *	Any metadata reserved that are used in btrfs_start_transaction() should
+ *	be of this type.
  *
  * META_PREALLOC:
  *	There are cases where metadata space is reserved before starting
-- 
2.20.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 5/5] fs_btrfs_block-group: code cleanup
  2019-12-10  7:13 [PATCH 0/5] btrfs: code cleanup Sebastian
                   ` (3 preceding siblings ...)
  2019-12-10  7:13 ` [PATCH 4/5] fs_btrfs_qgroup: " Sebastian
@ 2019-12-10  7:13 ` Sebastian
  2019-12-10 12:19 ` [PATCH 0/5] btrfs: " David Sterba
  5 siblings, 0 replies; 7+ messages in thread
From: Sebastian @ 2019-12-10  7:13 UTC (permalink / raw)
  To: dsterba
  Cc: josef, clm, linux-btrfs, linux-kernel, Sebastian Scherbel, Ole Wiedemann

From: Sebastian Scherbel <sebastian.scherbel@fau.de>

This patch changes several instances in block-group where the coding style
is not in line with the Linux kernel guidelines to improve readability.

1. bare use of 'unsigned' replaced by 'unsigned int'
2. code indentation fixed
3. lines with more than 80 characters are broken into sensible chunks,
unless exceeding the limit significantly increases readability
4. tabs are used for indentations where possible

Signed-off-by: Sebastian Scherbel <sebastian.scherbel@fau.de>
Co-developed-by: Ole Wiedemann <ole.wiedemann@fau.de>
Signed-off-by: Ole Wiedemann <ole.wiedemann@fau.de>
---
 fs/btrfs/block-group.c | 21 +++++++++++++--------
 fs/btrfs/block-group.h |  8 ++++----
 2 files changed, 17 insertions(+), 12 deletions(-)

diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index 6934a5b8708f..22bc97515e96 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -97,7 +97,7 @@ static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
 
 static u64 get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
 {
-	unsigned seq;
+	unsigned int seq;
 	u64 flags;
 
 	do {
@@ -259,7 +259,8 @@ struct btrfs_block_group *btrfs_next_block_group(
 
 		spin_unlock(&fs_info->block_group_cache_lock);
 		btrfs_put_block_group(cache);
-		cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
+		cache = btrfs_lookup_first_block_group(fs_info, next_bytenr);
+		return cache;
 	}
 	node = rb_next(&cache->cache_node);
 	btrfs_put_block_group(cache);
@@ -447,7 +448,8 @@ static void fragment_free_space(struct btrfs_block_group *block_group)
  * used yet since their free space will be released as soon as the transaction
  * commits.
  */
-u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end)
+u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start,
+		       u64 end)
 {
 	struct btrfs_fs_info *info = block_group->fs_info;
 	u64 extent_start, extent_end, size, total_added = 0;
@@ -670,7 +672,8 @@ static noinline void caching_thread(struct btrfs_work *work)
 	btrfs_put_block_group(block_group);
 }
 
-int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only)
+int btrfs_cache_block_group(struct btrfs_block_group *cache,
+			    int load_cache_only)
 {
 	DEFINE_WAIT(wait);
 	struct btrfs_fs_info *fs_info = cache->fs_info;
@@ -1696,7 +1699,8 @@ static int read_one_block_group(struct btrfs_fs_info *info,
 
 	ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY);
 
-	cache = btrfs_create_block_group_cache(info, key->objectid, key->offset);
+	cache = btrfs_create_block_group_cache(info, key->objectid,
+					       key->offset);
 	if (!cache)
 		return -ENOMEM;
 
@@ -2023,8 +2027,8 @@ static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags)
  *
  * @cache:		the destination block group
  * @do_chunk_alloc:	whether need to do chunk pre-allocation, this is to
- * 			ensure we still have some free space after marking this
- * 			block group RO.
+ *			ensure we still have some free space after marking this
+ *			block group RO.
  */
 int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
 			     bool do_chunk_alloc)
@@ -2082,7 +2086,8 @@ int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
 		goto unlock_out;
 	if (!ret)
 		goto out;
-	alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags);
+	alloc_flags = btrfs_get_alloc_profile(fs_info,
+					      cache->space_info->flags);
 	ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
 	if (ret < 0)
 		goto out;
diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
index 9b409676c4b2..d4e9d2d88542 100644
--- a/fs/btrfs/block-group.h
+++ b/fs/btrfs/block-group.h
@@ -139,9 +139,9 @@ struct btrfs_block_group {
 	 * Incremented while holding the spinlock *lock* by a task checking if
 	 * it can perform a nocow write (incremented if the value for the *ro*
 	 * field is 0). Decremented by such tasks once they create an ordered
-	 * extent or before that if some error happens before reaching that step.
-	 * This is to prevent races between block group relocation and nocow
-	 * writes through direct IO.
+	 * extent or before that if some error happens before reaching that
+	 * step. This is to prevent races between block group relocation and
+	 * nocow writes through direct IO.
 	 */
 	atomic_t nocow_writers;
 
@@ -186,7 +186,7 @@ bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
 void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
 void btrfs_wait_nocow_writers(struct btrfs_block_group *bg);
 void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
-				           u64 num_bytes);
+					   u64 num_bytes);
 int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache);
 int btrfs_cache_block_group(struct btrfs_block_group *cache,
 			    int load_cache_only);
-- 
2.20.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH 0/5] btrfs: code cleanup
  2019-12-10  7:13 [PATCH 0/5] btrfs: code cleanup Sebastian
                   ` (4 preceding siblings ...)
  2019-12-10  7:13 ` [PATCH 5/5] fs_btrfs_block-group: " Sebastian
@ 2019-12-10 12:19 ` David Sterba
  5 siblings, 0 replies; 7+ messages in thread
From: David Sterba @ 2019-12-10 12:19 UTC (permalink / raw)
  To: Sebastian; +Cc: dsterba, josef, clm, linux-btrfs, linux-kernel

On Tue, Dec 10, 2019 at 08:13:52AM +0100, Sebastian wrote:
> From: Sebastian Scherbel <sebastian.scherbel@fau.de>
> 
> This patch series changes several instances in btrfs where the coding style
> is not in line with the Linux kernel guidelines to improve readability.

Please don't do that. This has happened enough times that we have a FAQ
entry about that and I can recommend reading the whole section, from
which I quote the first part:

https://btrfs.wiki.kernel.org/index.php/Developer%27s_FAQ#How_not_to_start

"It might be tempting to look for coding style violations and send
patches to fix them. This happens from time to time and the community
does not welcome that. [...]"

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2019-12-10 12:20 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-12-10  7:13 [PATCH 0/5] btrfs: code cleanup Sebastian
2019-12-10  7:13 ` [PATCH 1/5] fs_btrfs_sysfs: " Sebastian
2019-12-10  7:13 ` [PATCH 2/5] fs_btrfs_struct-funcs: " Sebastian
2019-12-10  7:13 ` [PATCH 3/5] fs_btrfs_ref-verify: " Sebastian
2019-12-10  7:13 ` [PATCH 4/5] fs_btrfs_qgroup: " Sebastian
2019-12-10  7:13 ` [PATCH 5/5] fs_btrfs_block-group: " Sebastian
2019-12-10 12:19 ` [PATCH 0/5] btrfs: " David Sterba

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.