All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/5] f2fs: refactor __exchange_data_block for speed up
@ 2016-07-15  0:46 Jaegeuk Kim
  2016-07-15  0:46 ` [PATCH 2/5] f2fs: disable extent_cache for fcollapse/finsert inodes Jaegeuk Kim
                   ` (3 more replies)
  0 siblings, 4 replies; 12+ messages in thread
From: Jaegeuk Kim @ 2016-07-15  0:46 UTC (permalink / raw)
  To: linux-kernel, linux-fsdevel, linux-f2fs-devel; +Cc: Jaegeuk Kim

This reduces the elapsed time to do xfstests/generic/017.

Before: 715 s
After:  458 s

Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
---
 fs/f2fs/file.c | 236 +++++++++++++++++++++++++++++++++++++++++----------------
 fs/f2fs/node.c |   1 +
 2 files changed, 171 insertions(+), 66 deletions(-)

diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index e6b770d..0d4f1dd 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -889,85 +889,189 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
 	return ret;
 }
 
-static int __exchange_data_block(struct inode *inode, pgoff_t src,
-					pgoff_t dst, bool full)
+static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
+				int *do_replace, pgoff_t off, pgoff_t len)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	struct dnode_of_data dn;
-	block_t new_addr;
-	bool do_replace = false;
-	int ret;
+	int ret, done, i;
 
+next_dnode:
 	set_new_dnode(&dn, inode, NULL, NULL, 0);
-	ret = get_dnode_of_data(&dn, src, LOOKUP_NODE_RA);
+	ret = get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
 	if (ret && ret != -ENOENT) {
 		return ret;
 	} else if (ret == -ENOENT) {
-		new_addr = NULL_ADDR;
-	} else {
-		new_addr = dn.data_blkaddr;
-		if (!is_checkpointed_data(sbi, new_addr)) {
+		if (dn.max_level == 0)
+			return -ENOENT;
+		done = min((pgoff_t)ADDRS_PER_BLOCK - dn.ofs_in_node, len);
+		blkaddr += done;
+		do_replace += done;
+		goto next;
+	}
+
+	done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
+							dn.ofs_in_node, len);
+	for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
+		*blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
+		if (!is_checkpointed_data(sbi, *blkaddr)) {
+
+			if (test_opt(sbi, LFS)) {
+				f2fs_put_dnode(&dn);
+				return -ENOTSUPP;
+			}
+
 			/* do not invalidate this block address */
 			f2fs_update_data_blkaddr(&dn, NULL_ADDR);
-			do_replace = true;
+			*do_replace = 1;
 		}
-		f2fs_put_dnode(&dn);
 	}
+	f2fs_put_dnode(&dn);
+next:
+	len -= done;
+	off += done;
+	if (len)
+		goto next_dnode;
+	return 0;
+}
 
-	if (new_addr == NULL_ADDR)
-		return full ? truncate_hole(inode, dst, dst + 1) : 0;
+static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
+				int *do_replace, pgoff_t off, int len)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	struct dnode_of_data dn;
+	int ret, i;
 
-	if (do_replace) {
-		struct page *ipage;
-		struct node_info ni;
+	for (i = 0; i < len; i++, do_replace++, blkaddr++) {
+		if (*do_replace == 0)
+			continue;
 
-		if (test_opt(sbi, LFS)) {
-			ret = -ENOTSUPP;
-			goto err_out;
+		set_new_dnode(&dn, inode, NULL, NULL, 0);
+		ret = get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
+		if (ret) {
+			dec_valid_block_count(sbi, inode, 1);
+			invalidate_blocks(sbi, *blkaddr);
+		} else {
+			f2fs_update_data_blkaddr(&dn, *blkaddr);
 		}
+		f2fs_put_dnode(&dn);
+	}
+	return 0;
+}
 
-		ipage = get_node_page(sbi, inode->i_ino);
-		if (IS_ERR(ipage)) {
-			ret = PTR_ERR(ipage);
-			goto err_out;
+static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
+			block_t *blkaddr, int *do_replace,
+			pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
+	pgoff_t i = 0;
+	int ret;
+
+	while (i < len) {
+		if (blkaddr[i] == NULL_ADDR && !full) {
+			i++;
+			continue;
 		}
 
-		set_new_dnode(&dn, inode, ipage, NULL, 0);
-		ret = f2fs_reserve_block(&dn, dst);
-		if (ret)
-			goto err_out;
+		if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
+			struct dnode_of_data dn;
+			struct node_info ni;
+			size_t new_size;
+			pgoff_t ilen;
 
-		truncate_data_blocks_range(&dn, 1);
+			set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
+			ret = get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
+			if (ret)
+				return ret;
 
-		get_node_info(sbi, dn.nid, &ni);
-		f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr,
-				ni.version, true, false);
-		f2fs_put_dnode(&dn);
-	} else {
-		struct page *psrc, *pdst;
+			get_node_info(sbi, dn.nid, &ni);
+			ilen = min((pgoff_t)
+				ADDRS_PER_PAGE(dn.node_page, dst_inode) -
+						dn.ofs_in_node, len - i);
+			do {
+				dn.data_blkaddr = datablock_addr(dn.node_page,
+								dn.ofs_in_node);
+				truncate_data_blocks_range(&dn, 1);
+
+				if (do_replace[i]) {
+					f2fs_i_blocks_write(src_inode,
+								1, false);
+					f2fs_i_blocks_write(dst_inode,
+								1, true);
+					f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
+					blkaddr[i], ni.version, true, false);
+
+					do_replace[i] = 0;
+				}
+				dn.ofs_in_node++;
+				i++;
+				new_size = (dst + i) << PAGE_SHIFT;
+				if (dst_inode->i_size < new_size)
+					f2fs_i_size_write(dst_inode, new_size);
+			} while ((do_replace[i] || blkaddr[i] == NULL_ADDR) && --ilen);
 
-		psrc = get_lock_data_page(inode, src, true);
-		if (IS_ERR(psrc))
-			return PTR_ERR(psrc);
-		pdst = get_new_data_page(inode, NULL, dst, true);
-		if (IS_ERR(pdst)) {
+			f2fs_put_dnode(&dn);
+		} else {
+			struct page *psrc, *pdst;
+
+			psrc = get_lock_data_page(src_inode, src + i, true);
+			if (IS_ERR(psrc))
+				return PTR_ERR(psrc);
+			pdst = get_new_data_page(dst_inode, NULL, dst + i,
+								true);
+			if (IS_ERR(pdst)) {
+				f2fs_put_page(psrc, 1);
+				return PTR_ERR(pdst);
+			}
+			f2fs_copy_page(psrc, pdst);
+			set_page_dirty(pdst);
+			f2fs_put_page(pdst, 1);
 			f2fs_put_page(psrc, 1);
-			return PTR_ERR(pdst);
-		}
-		f2fs_copy_page(psrc, pdst);
-		set_page_dirty(pdst);
-		f2fs_put_page(pdst, 1);
-		f2fs_put_page(psrc, 1);
 
-		return truncate_hole(inode, src, src + 1);
+			ret = truncate_hole(src_inode, src + i, src + i + 1);
+			if (ret)
+				return ret;
+			i++;
+		}
 	}
 	return 0;
+}
 
-err_out:
-	if (!get_dnode_of_data(&dn, src, LOOKUP_NODE)) {
-		f2fs_update_data_blkaddr(&dn, new_addr);
-		f2fs_put_dnode(&dn);
+static int __exchange_data_block(struct inode *src_inode,
+			struct inode *dst_inode, pgoff_t src, pgoff_t dst,
+			int len, bool full)
+{
+	block_t *src_blkaddr;
+	int *do_replace;
+	int ret;
+
+	src_blkaddr = f2fs_kvzalloc(sizeof(block_t) * len, GFP_KERNEL);
+	if (!src_blkaddr)
+		return -ENOMEM;
+
+	do_replace = f2fs_kvzalloc(sizeof(int) * len, GFP_KERNEL);
+	if (!do_replace) {
+		kvfree(src_blkaddr);
+		return -ENOMEM;
 	}
+
+	ret = __read_out_blkaddrs(src_inode, src_blkaddr, do_replace, src, len);
+	if (ret)
+		goto roll_back;
+
+	ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
+					do_replace, src, dst, len, full);
+	if (ret)
+		goto roll_back;
+
+	kvfree(src_blkaddr);
+	kvfree(do_replace);
+	return 0;
+
+roll_back:
+	__roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, len);
+	kvfree(src_blkaddr);
+	kvfree(do_replace);
 	return ret;
 }
 
@@ -975,16 +1079,12 @@ static int f2fs_do_collapse(struct inode *inode, pgoff_t start, pgoff_t end)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
-	int ret = 0;
+	int ret;
 
-	for (; end < nrpages; start++, end++) {
-		f2fs_balance_fs(sbi, true);
-		f2fs_lock_op(sbi);
-		ret = __exchange_data_block(inode, end, start, true);
-		f2fs_unlock_op(sbi);
-		if (ret)
-			break;
-	}
+	f2fs_balance_fs(sbi, true);
+	f2fs_lock_op(sbi);
+	ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
+	f2fs_unlock_op(sbi);
 	return ret;
 }
 
@@ -1173,7 +1273,7 @@ out:
 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-	pgoff_t pg_start, pg_end, delta, nrpages, idx;
+	pgoff_t nr, pg_start, pg_end, delta, idx;
 	loff_t new_size;
 	int ret = 0;
 
@@ -1208,14 +1308,18 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
 	pg_start = offset >> PAGE_SHIFT;
 	pg_end = (offset + len) >> PAGE_SHIFT;
 	delta = pg_end - pg_start;
-	nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
+	idx = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
+
+	while (!ret && idx > pg_start) {
+		nr = idx - pg_start;
+		if (nr > delta)
+			nr = delta;
+		idx -= nr;
 
-	for (idx = nrpages - 1; idx >= pg_start && idx != -1; idx--) {
 		f2fs_lock_op(sbi);
-		ret = __exchange_data_block(inode, idx, idx + delta, false);
+		ret = __exchange_data_block(inode, inode, idx,
+					idx + delta, nr, false);
 		f2fs_unlock_op(sbi);
-		if (ret)
-			break;
 	}
 
 	/* write out all moved pages, if possible */
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index b841c43..d78f61d 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -649,6 +649,7 @@ release_out:
 	if (err == -ENOENT) {
 		dn->cur_level = i;
 		dn->max_level = level;
+		dn->ofs_in_node = offset[level];
 	}
 	return err;
 }
-- 
2.8.3

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 2/5] f2fs: disable extent_cache for fcollapse/finsert inodes
  2016-07-15  0:46 [PATCH 1/5] f2fs: refactor __exchange_data_block for speed up Jaegeuk Kim
@ 2016-07-15  0:46 ` Jaegeuk Kim
  2016-07-15  0:46   ` Jaegeuk Kim
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 12+ messages in thread
From: Jaegeuk Kim @ 2016-07-15  0:46 UTC (permalink / raw)
  To: linux-kernel, linux-fsdevel, linux-f2fs-devel; +Cc: Jaegeuk Kim

This reduces the elapsed time to do xfstests/generic/017.

Before: 458 s
After:  390 s

Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
---
 fs/f2fs/extent_cache.c | 13 +++++++++++++
 fs/f2fs/f2fs.h         |  1 +
 fs/f2fs/file.c         |  5 +++++
 3 files changed, 19 insertions(+)

diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
index 5b4b6d4..2b06d4f 100644
--- a/fs/f2fs/extent_cache.c
+++ b/fs/f2fs/extent_cache.c
@@ -631,6 +631,19 @@ unsigned int f2fs_destroy_extent_node(struct inode *inode)
 	return node_cnt;
 }
 
+void f2fs_drop_extent_tree(struct inode *inode)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	struct extent_tree *et = F2FS_I(inode)->extent_tree;
+
+	set_inode_flag(inode, FI_NO_EXTENT);
+
+	write_lock(&et->lock);
+	__free_extent_tree(sbi, et);
+	__drop_largest_extent(inode, 0, UINT_MAX);
+	write_unlock(&et->lock);
+}
+
 void f2fs_destroy_extent_tree(struct inode *inode)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 1190c04..2b8bf50 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -2317,6 +2317,7 @@ void f2fs_leave_shrinker(struct f2fs_sb_info *);
  */
 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *, int);
 bool f2fs_init_extent_tree(struct inode *, struct f2fs_extent *);
+void f2fs_drop_extent_tree(struct inode *);
 unsigned int f2fs_destroy_extent_node(struct inode *);
 void f2fs_destroy_extent_tree(struct inode *);
 bool f2fs_lookup_extent_cache(struct inode *, pgoff_t, struct extent_info *);
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 0d4f1dd..dd399b3 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -1083,6 +1083,9 @@ static int f2fs_do_collapse(struct inode *inode, pgoff_t start, pgoff_t end)
 
 	f2fs_balance_fs(sbi, true);
 	f2fs_lock_op(sbi);
+
+	f2fs_drop_extent_tree(inode);
+
 	ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
 	f2fs_unlock_op(sbi);
 	return ret;
@@ -1317,6 +1320,8 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
 		idx -= nr;
 
 		f2fs_lock_op(sbi);
+		f2fs_drop_extent_tree(inode);
+
 		ret = __exchange_data_block(inode, inode, idx,
 					idx + delta, nr, false);
 		f2fs_unlock_op(sbi);
-- 
2.8.3

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 3/5] f2fs: support copy_file_range
  2016-07-15  0:46 [PATCH 1/5] f2fs: refactor __exchange_data_block for speed up Jaegeuk Kim
@ 2016-07-15  0:46   ` Jaegeuk Kim
  2016-07-15  0:46   ` Jaegeuk Kim
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 12+ messages in thread
From: Jaegeuk Kim @ 2016-07-15  0:46 UTC (permalink / raw)
  To: linux-kernel, linux-fsdevel, linux-f2fs-devel; +Cc: Jaegeuk Kim

This patch implements copy_file_range in f2fs.

Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
---
 fs/f2fs/file.c | 67 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 67 insertions(+)

diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index dd399b3..c529884 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -2201,6 +2201,72 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 	return ret;
 }
 
+ssize_t f2fs_copy_file_range(struct file *file_in, loff_t pos_in,
+				struct file *file_out, loff_t pos_out,
+				size_t len, unsigned int flags)
+{
+	struct inode *src = file_inode(file_in);
+	struct inode *dst = file_inode(file_out);
+	struct f2fs_sb_info *sbi = F2FS_I_SB(src);
+	size_t olen = len, dst_max_i_size = 0;
+	size_t ret;
+
+	if (file_in->f_path.mnt != file_out->f_path.mnt ||
+				src->i_sb != dst->i_sb)
+		return -EXDEV;
+
+	if (unlikely(f2fs_readonly(src->i_sb)))
+		return -EROFS;
+
+	if (S_ISDIR(src->i_mode) || S_ISDIR(dst->i_mode))
+		return -EISDIR;
+
+	inode_lock(src);
+	if (src != dst)
+		inode_lock(dst);
+
+	ret = -EINVAL;
+	if (pos_in + len > src->i_size || pos_in + len < pos_in)
+		goto out_unlock;
+	if (len == 0)
+		olen = len = src->i_size - pos_in;
+	if (pos_in + len == src->i_size)
+		len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
+	if (len == 0) {
+		ret = 0;
+		goto out_unlock;
+	}
+	if (pos_out + olen > dst->i_size)
+		dst_max_i_size = pos_out + olen;
+
+	/* verify the end result is block aligned */
+	if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
+			!IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
+			!IS_ALIGNED(pos_out, F2FS_BLKSIZE))
+		goto out_unlock;
+
+	ret = f2fs_convert_inline_inode(src);
+	if (ret)
+		goto out_unlock;
+	ret = f2fs_convert_inline_inode(dst);
+	if (ret)
+		goto out_unlock;
+
+	f2fs_balance_fs(sbi, true);
+	f2fs_lock_op(sbi);
+	ret = __exchange_data_block(src, dst, pos_in, pos_out, len, false);
+	if (!ret && dst->i_size > dst_max_i_size)
+		f2fs_i_size_write(dst, dst_max_i_size);
+	f2fs_unlock_op(sbi);
+out_unlock:
+	if (src != dst)
+		inode_unlock(dst);
+	inode_unlock(src);
+	if (!ret)
+		ret = olen;
+	return ret;
+}
+
 #ifdef CONFIG_COMPAT
 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
@@ -2247,6 +2313,7 @@ const struct file_operations f2fs_file_operations = {
 #ifdef CONFIG_COMPAT
 	.compat_ioctl	= f2fs_compat_ioctl,
 #endif
+	.copy_file_range = f2fs_copy_file_range,
 	.splice_read	= generic_file_splice_read,
 	.splice_write	= iter_file_splice_write,
 };
-- 
2.8.3

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 3/5] f2fs: support copy_file_range
@ 2016-07-15  0:46   ` Jaegeuk Kim
  0 siblings, 0 replies; 12+ messages in thread
From: Jaegeuk Kim @ 2016-07-15  0:46 UTC (permalink / raw)
  To: linux-kernel, linux-fsdevel, linux-f2fs-devel; +Cc: Jaegeuk Kim

This patch implements copy_file_range in f2fs.

Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
---
 fs/f2fs/file.c | 67 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 67 insertions(+)

diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index dd399b3..c529884 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -2201,6 +2201,72 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 	return ret;
 }
 
+ssize_t f2fs_copy_file_range(struct file *file_in, loff_t pos_in,
+				struct file *file_out, loff_t pos_out,
+				size_t len, unsigned int flags)
+{
+	struct inode *src = file_inode(file_in);
+	struct inode *dst = file_inode(file_out);
+	struct f2fs_sb_info *sbi = F2FS_I_SB(src);
+	size_t olen = len, dst_max_i_size = 0;
+	size_t ret;
+
+	if (file_in->f_path.mnt != file_out->f_path.mnt ||
+				src->i_sb != dst->i_sb)
+		return -EXDEV;
+
+	if (unlikely(f2fs_readonly(src->i_sb)))
+		return -EROFS;
+
+	if (S_ISDIR(src->i_mode) || S_ISDIR(dst->i_mode))
+		return -EISDIR;
+
+	inode_lock(src);
+	if (src != dst)
+		inode_lock(dst);
+
+	ret = -EINVAL;
+	if (pos_in + len > src->i_size || pos_in + len < pos_in)
+		goto out_unlock;
+	if (len == 0)
+		olen = len = src->i_size - pos_in;
+	if (pos_in + len == src->i_size)
+		len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
+	if (len == 0) {
+		ret = 0;
+		goto out_unlock;
+	}
+	if (pos_out + olen > dst->i_size)
+		dst_max_i_size = pos_out + olen;
+
+	/* verify the end result is block aligned */
+	if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
+			!IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
+			!IS_ALIGNED(pos_out, F2FS_BLKSIZE))
+		goto out_unlock;
+
+	ret = f2fs_convert_inline_inode(src);
+	if (ret)
+		goto out_unlock;
+	ret = f2fs_convert_inline_inode(dst);
+	if (ret)
+		goto out_unlock;
+
+	f2fs_balance_fs(sbi, true);
+	f2fs_lock_op(sbi);
+	ret = __exchange_data_block(src, dst, pos_in, pos_out, len, false);
+	if (!ret && dst->i_size > dst_max_i_size)
+		f2fs_i_size_write(dst, dst_max_i_size);
+	f2fs_unlock_op(sbi);
+out_unlock:
+	if (src != dst)
+		inode_unlock(dst);
+	inode_unlock(src);
+	if (!ret)
+		ret = olen;
+	return ret;
+}
+
 #ifdef CONFIG_COMPAT
 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
@@ -2247,6 +2313,7 @@ const struct file_operations f2fs_file_operations = {
 #ifdef CONFIG_COMPAT
 	.compat_ioctl	= f2fs_compat_ioctl,
 #endif
+	.copy_file_range = f2fs_copy_file_range,
 	.splice_read	= generic_file_splice_read,
 	.splice_write	= iter_file_splice_write,
 };
-- 
2.8.3


------------------------------------------------------------------------------
What NetFlow Analyzer can do for you? Monitors network bandwidth and traffic
patterns at an interface-level. Reveals which users, apps, and protocols are 
consuming the most bandwidth. Provides multi-vendor support for NetFlow, 
J-Flow, sFlow and other flows. Make informed decisions using capacity planning
reports.http://sdm.link/zohodev2dev

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 4/5] f2fs: add maximum prefree segments
  2016-07-15  0:46 [PATCH 1/5] f2fs: refactor __exchange_data_block for speed up Jaegeuk Kim
@ 2016-07-15  0:46   ` Jaegeuk Kim
  2016-07-15  0:46   ` Jaegeuk Kim
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 12+ messages in thread
From: Jaegeuk Kim @ 2016-07-15  0:46 UTC (permalink / raw)
  To: linux-kernel, linux-fsdevel, linux-f2fs-devel; +Cc: Jaegeuk Kim

In 1TB storage, we need to admit 22841 prefree segments, which can consume
too much segments.
This patch sets 8GB in max. prefree segments in that case.

Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
---
 fs/f2fs/segment.c | 3 +++
 fs/f2fs/segment.h | 1 +
 2 files changed, 4 insertions(+)

diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 08f6c0b..e87aa05 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -2409,6 +2409,9 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
 	sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
 	sm_info->rec_prefree_segments = sm_info->main_segments *
 					DEF_RECLAIM_PREFREE_SEGMENTS / 100;
+	if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
+		sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
+
 	if (!test_opt(sbi, LFS))
 		sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
 	sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 57d450f..b33f73e 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -16,6 +16,7 @@
 #define NULL_SECNO			((unsigned int)(~0))
 
 #define DEF_RECLAIM_PREFREE_SEGMENTS	5	/* 5% over total segments */
+#define DEF_MAX_RECLAIM_PREFREE_SEGMENTS	4096	/* 8GB in maximum */
 
 /* L: Logical segment # in volume, R: Relative segment # in main area */
 #define GET_L2R_SEGNO(free_i, segno)	(segno - free_i->start_segno)
-- 
2.8.3

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 4/5] f2fs: add maximum prefree segments
@ 2016-07-15  0:46   ` Jaegeuk Kim
  0 siblings, 0 replies; 12+ messages in thread
From: Jaegeuk Kim @ 2016-07-15  0:46 UTC (permalink / raw)
  To: linux-kernel, linux-fsdevel, linux-f2fs-devel; +Cc: Jaegeuk Kim

In 1TB storage, we need to admit 22841 prefree segments, which can consume
too much segments.
This patch sets 8GB in max. prefree segments in that case.

Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
---
 fs/f2fs/segment.c | 3 +++
 fs/f2fs/segment.h | 1 +
 2 files changed, 4 insertions(+)

diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 08f6c0b..e87aa05 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -2409,6 +2409,9 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
 	sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
 	sm_info->rec_prefree_segments = sm_info->main_segments *
 					DEF_RECLAIM_PREFREE_SEGMENTS / 100;
+	if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
+		sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
+
 	if (!test_opt(sbi, LFS))
 		sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
 	sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 57d450f..b33f73e 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -16,6 +16,7 @@
 #define NULL_SECNO			((unsigned int)(~0))
 
 #define DEF_RECLAIM_PREFREE_SEGMENTS	5	/* 5% over total segments */
+#define DEF_MAX_RECLAIM_PREFREE_SEGMENTS	4096	/* 8GB in maximum */
 
 /* L: Logical segment # in volume, R: Relative segment # in main area */
 #define GET_L2R_SEGNO(free_i, segno)	(segno - free_i->start_segno)
-- 
2.8.3


------------------------------------------------------------------------------
What NetFlow Analyzer can do for you? Monitors network bandwidth and traffic
patterns at an interface-level. Reveals which users, apps, and protocols are 
consuming the most bandwidth. Provides multi-vendor support for NetFlow, 
J-Flow, sFlow and other flows. Make informed decisions using capacity planning
reports.http://sdm.link/zohodev2dev

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 5/5] f2fs: use blk_plug in all the possible paths
  2016-07-15  0:46 [PATCH 1/5] f2fs: refactor __exchange_data_block for speed up Jaegeuk Kim
@ 2016-07-15  0:46   ` Jaegeuk Kim
  2016-07-15  0:46   ` Jaegeuk Kim
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 12+ messages in thread
From: Jaegeuk Kim @ 2016-07-15  0:46 UTC (permalink / raw)
  To: linux-kernel, linux-fsdevel, linux-f2fs-devel; +Cc: Jaegeuk Kim

This patch reverts 19a5f5e2ef37 (f2fs: drop any block plugging),
and adds blk_plug in write paths additionally.

The main reason is that blk_start_plug can be used to wake up from low-power
mode before submitting further bios.

Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
---
 fs/f2fs/checkpoint.c | 7 +++++++
 fs/f2fs/data.c       | 3 +++
 fs/f2fs/file.c       | 6 +++++-
 fs/f2fs/gc.c         | 5 +++++
 fs/f2fs/node.c       | 3 +++
 fs/f2fs/segment.c    | 7 ++++++-
 6 files changed, 29 insertions(+), 2 deletions(-)

diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 8ea8953..be1c54b 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -265,6 +265,7 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
 				struct writeback_control *wbc)
 {
 	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
+	struct blk_plug plug;
 	long diff, written;
 
 	/* collect a number of dirty meta pages and write together */
@@ -277,7 +278,9 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
 	/* if mounting is failed, skip writing node pages */
 	mutex_lock(&sbi->cp_mutex);
 	diff = nr_pages_to_write(sbi, META, wbc);
+	blk_start_plug(&plug);
 	written = sync_meta_pages(sbi, META, wbc->nr_to_write);
+	blk_finish_plug(&plug);
 	mutex_unlock(&sbi->cp_mutex);
 	wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
 	return 0;
@@ -899,8 +902,11 @@ static int block_operations(struct f2fs_sb_info *sbi)
 		.nr_to_write = LONG_MAX,
 		.for_reclaim = 0,
 	};
+	struct blk_plug plug;
 	int err = 0;
 
+	blk_start_plug(&plug);
+
 retry_flush_dents:
 	f2fs_lock_all(sbi);
 	/* write all the dirty dentry pages */
@@ -937,6 +943,7 @@ retry_flush_nodes:
 		goto retry_flush_nodes;
 	}
 out:
+	blk_finish_plug(&plug);
 	return err;
 }
 
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 6500995..b7a530e 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -1438,6 +1438,7 @@ static int f2fs_write_data_pages(struct address_space *mapping,
 {
 	struct inode *inode = mapping->host;
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	struct blk_plug plug;
 	int ret;
 
 	/* deal with chardevs and other special file */
@@ -1463,7 +1464,9 @@ static int f2fs_write_data_pages(struct address_space *mapping,
 
 	trace_f2fs_writepages(mapping->host, wbc, DATA);
 
+	blk_start_plug(&plug);
 	ret = f2fs_write_cache_pages(mapping, wbc);
+	blk_finish_plug(&plug);
 	/*
 	 * if some pages were truncated, we cannot guarantee its mapping->host
 	 * to detect pending bios.
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index c529884..001f062 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -2180,6 +2180,7 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file_inode(file);
+	struct blk_plug plug;
 	ssize_t ret;
 
 	if (f2fs_encrypted_inode(inode) &&
@@ -2191,8 +2192,11 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 	ret = generic_write_checks(iocb, from);
 	if (ret > 0) {
 		ret = f2fs_preallocate_blocks(iocb, from);
-		if (!ret)
+		if (!ret) {
+			blk_start_plug(&plug);
 			ret = __generic_file_write_iter(iocb, from);
+			blk_finish_plug(&plug);
+		}
 	}
 	inode_unlock(inode);
 
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index c612137..90d5fda 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -788,6 +788,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
 {
 	struct page *sum_page;
 	struct f2fs_summary_block *sum;
+	struct blk_plug plug;
 	unsigned int segno = start_segno;
 	unsigned int end_segno = start_segno + sbi->segs_per_sec;
 	int seg_freed = 0;
@@ -805,6 +806,8 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
 		unlock_page(sum_page);
 	}
 
+	blk_start_plug(&plug);
+
 	for (segno = start_segno; segno < end_segno; segno++) {
 
 		if (get_valid_blocks(sbi, segno, 1) == 0)
@@ -842,6 +845,8 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
 		f2fs_submit_merged_bio(sbi,
 				(type == SUM_TYPE_NODE) ? NODE : DATA, WRITE);
 
+	blk_finish_plug(&plug);
+
 	if (gc_type == FG_GC) {
 		while (start_segno < end_segno)
 			if (get_valid_blocks(sbi, start_segno++, 1) == 0)
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index d78f61d..79a93c6 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1618,6 +1618,7 @@ static int f2fs_write_node_pages(struct address_space *mapping,
 			    struct writeback_control *wbc)
 {
 	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
+	struct blk_plug plug;
 	long diff;
 
 	/* balancing f2fs's metadata in background */
@@ -1631,7 +1632,9 @@ static int f2fs_write_node_pages(struct address_space *mapping,
 
 	diff = nr_pages_to_write(sbi, NODE, wbc);
 	wbc->sync_mode = WB_SYNC_NONE;
+	blk_start_plug(&plug);
 	sync_node_pages(sbi, wbc);
+	blk_finish_plug(&plug);
 	wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
 	return 0;
 
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index e87aa05..d45e6bb 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -381,8 +381,13 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
 			excess_prefree_segs(sbi) ||
 			excess_dirty_nats(sbi) ||
 			(is_idle(sbi) && f2fs_time_over(sbi, CP_TIME))) {
-		if (test_opt(sbi, DATA_FLUSH))
+		if (test_opt(sbi, DATA_FLUSH)) {
+			struct blk_plug plug;
+
+			blk_start_plug(&plug);
 			sync_dirty_inodes(sbi, FILE_INODE);
+			blk_finish_plug(&plug);
+		}
 		f2fs_sync_fs(sbi->sb, true);
 		stat_inc_bg_cp_count(sbi->stat_info);
 	}
-- 
2.8.3

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 5/5] f2fs: use blk_plug in all the possible paths
@ 2016-07-15  0:46   ` Jaegeuk Kim
  0 siblings, 0 replies; 12+ messages in thread
From: Jaegeuk Kim @ 2016-07-15  0:46 UTC (permalink / raw)
  To: linux-kernel, linux-fsdevel, linux-f2fs-devel; +Cc: Jaegeuk Kim

This patch reverts 19a5f5e2ef37 (f2fs: drop any block plugging),
and adds blk_plug in write paths additionally.

The main reason is that blk_start_plug can be used to wake up from low-power
mode before submitting further bios.

Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
---
 fs/f2fs/checkpoint.c | 7 +++++++
 fs/f2fs/data.c       | 3 +++
 fs/f2fs/file.c       | 6 +++++-
 fs/f2fs/gc.c         | 5 +++++
 fs/f2fs/node.c       | 3 +++
 fs/f2fs/segment.c    | 7 ++++++-
 6 files changed, 29 insertions(+), 2 deletions(-)

diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 8ea8953..be1c54b 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -265,6 +265,7 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
 				struct writeback_control *wbc)
 {
 	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
+	struct blk_plug plug;
 	long diff, written;
 
 	/* collect a number of dirty meta pages and write together */
@@ -277,7 +278,9 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
 	/* if mounting is failed, skip writing node pages */
 	mutex_lock(&sbi->cp_mutex);
 	diff = nr_pages_to_write(sbi, META, wbc);
+	blk_start_plug(&plug);
 	written = sync_meta_pages(sbi, META, wbc->nr_to_write);
+	blk_finish_plug(&plug);
 	mutex_unlock(&sbi->cp_mutex);
 	wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
 	return 0;
@@ -899,8 +902,11 @@ static int block_operations(struct f2fs_sb_info *sbi)
 		.nr_to_write = LONG_MAX,
 		.for_reclaim = 0,
 	};
+	struct blk_plug plug;
 	int err = 0;
 
+	blk_start_plug(&plug);
+
 retry_flush_dents:
 	f2fs_lock_all(sbi);
 	/* write all the dirty dentry pages */
@@ -937,6 +943,7 @@ retry_flush_nodes:
 		goto retry_flush_nodes;
 	}
 out:
+	blk_finish_plug(&plug);
 	return err;
 }
 
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 6500995..b7a530e 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -1438,6 +1438,7 @@ static int f2fs_write_data_pages(struct address_space *mapping,
 {
 	struct inode *inode = mapping->host;
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	struct blk_plug plug;
 	int ret;
 
 	/* deal with chardevs and other special file */
@@ -1463,7 +1464,9 @@ static int f2fs_write_data_pages(struct address_space *mapping,
 
 	trace_f2fs_writepages(mapping->host, wbc, DATA);
 
+	blk_start_plug(&plug);
 	ret = f2fs_write_cache_pages(mapping, wbc);
+	blk_finish_plug(&plug);
 	/*
 	 * if some pages were truncated, we cannot guarantee its mapping->host
 	 * to detect pending bios.
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index c529884..001f062 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -2180,6 +2180,7 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file_inode(file);
+	struct blk_plug plug;
 	ssize_t ret;
 
 	if (f2fs_encrypted_inode(inode) &&
@@ -2191,8 +2192,11 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 	ret = generic_write_checks(iocb, from);
 	if (ret > 0) {
 		ret = f2fs_preallocate_blocks(iocb, from);
-		if (!ret)
+		if (!ret) {
+			blk_start_plug(&plug);
 			ret = __generic_file_write_iter(iocb, from);
+			blk_finish_plug(&plug);
+		}
 	}
 	inode_unlock(inode);
 
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index c612137..90d5fda 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -788,6 +788,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
 {
 	struct page *sum_page;
 	struct f2fs_summary_block *sum;
+	struct blk_plug plug;
 	unsigned int segno = start_segno;
 	unsigned int end_segno = start_segno + sbi->segs_per_sec;
 	int seg_freed = 0;
@@ -805,6 +806,8 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
 		unlock_page(sum_page);
 	}
 
+	blk_start_plug(&plug);
+
 	for (segno = start_segno; segno < end_segno; segno++) {
 
 		if (get_valid_blocks(sbi, segno, 1) == 0)
@@ -842,6 +845,8 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
 		f2fs_submit_merged_bio(sbi,
 				(type == SUM_TYPE_NODE) ? NODE : DATA, WRITE);
 
+	blk_finish_plug(&plug);
+
 	if (gc_type == FG_GC) {
 		while (start_segno < end_segno)
 			if (get_valid_blocks(sbi, start_segno++, 1) == 0)
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index d78f61d..79a93c6 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1618,6 +1618,7 @@ static int f2fs_write_node_pages(struct address_space *mapping,
 			    struct writeback_control *wbc)
 {
 	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
+	struct blk_plug plug;
 	long diff;
 
 	/* balancing f2fs's metadata in background */
@@ -1631,7 +1632,9 @@ static int f2fs_write_node_pages(struct address_space *mapping,
 
 	diff = nr_pages_to_write(sbi, NODE, wbc);
 	wbc->sync_mode = WB_SYNC_NONE;
+	blk_start_plug(&plug);
 	sync_node_pages(sbi, wbc);
+	blk_finish_plug(&plug);
 	wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
 	return 0;
 
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index e87aa05..d45e6bb 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -381,8 +381,13 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
 			excess_prefree_segs(sbi) ||
 			excess_dirty_nats(sbi) ||
 			(is_idle(sbi) && f2fs_time_over(sbi, CP_TIME))) {
-		if (test_opt(sbi, DATA_FLUSH))
+		if (test_opt(sbi, DATA_FLUSH)) {
+			struct blk_plug plug;
+
+			blk_start_plug(&plug);
 			sync_dirty_inodes(sbi, FILE_INODE);
+			blk_finish_plug(&plug);
+		}
 		f2fs_sync_fs(sbi->sb, true);
 		stat_inc_bg_cp_count(sbi->stat_info);
 	}
-- 
2.8.3


------------------------------------------------------------------------------
What NetFlow Analyzer can do for you? Monitors network bandwidth and traffic
patterns at an interface-level. Reveals which users, apps, and protocols are 
consuming the most bandwidth. Provides multi-vendor support for NetFlow, 
J-Flow, sFlow and other flows. Make informed decisions using capacity planning
reports.http://sdm.link/zohodev2dev

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH 5/5] f2fs: use blk_plug in all the possible paths
  2016-07-15  0:46   ` Jaegeuk Kim
  (?)
@ 2016-07-15  2:21   ` Christoph Hellwig
  2016-07-15  3:05     ` Jaegeuk Kim
  -1 siblings, 1 reply; 12+ messages in thread
From: Christoph Hellwig @ 2016-07-15  2:21 UTC (permalink / raw)
  To: Jaegeuk Kim; +Cc: linux-kernel, linux-fsdevel, linux-f2fs-devel

On Thu, Jul 14, 2016 at 05:46:29PM -0700, Jaegeuk Kim wrote:
> The main reason is that blk_start_plug can be used to wake up from low-power
> mode before submitting further bios.

Where does that myth come from?

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 5/5] f2fs: use blk_plug in all the possible paths
  2016-07-15  2:21   ` Christoph Hellwig
@ 2016-07-15  3:05     ` Jaegeuk Kim
  2016-07-19  3:59       ` Christoph Hellwig
  0 siblings, 1 reply; 12+ messages in thread
From: Jaegeuk Kim @ 2016-07-15  3:05 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-kernel, linux-fsdevel, linux-f2fs-devel

On Thu, Jul 14, 2016 at 07:21:32PM -0700, Christoph Hellwig wrote:
> On Thu, Jul 14, 2016 at 05:46:29PM -0700, Jaegeuk Kim wrote:
> > The main reason is that blk_start_plug can be used to wake up from low-power
> > mode before submitting further bios.
> 
> Where does that myth come from?

>From kernel guys working on android.
And, I'm in also doubt they may have done something regarding to power mode
change, by the fact that I've seen annoying performance jitters comparing to
ext4's ones all the time in some android products. 

Frankly speaking, I cannot find a concrete reason to remove plugging entirely,
since I can't see big performance differences in all my test results in severs
as well.

Thanks,

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 5/5] f2fs: use blk_plug in all the possible paths
  2016-07-15  3:05     ` Jaegeuk Kim
@ 2016-07-19  3:59       ` Christoph Hellwig
  2016-07-19  5:49         ` Jaegeuk Kim
  0 siblings, 1 reply; 12+ messages in thread
From: Christoph Hellwig @ 2016-07-19  3:59 UTC (permalink / raw)
  To: Jaegeuk Kim
  Cc: Christoph Hellwig, linux-kernel, linux-fsdevel, linux-f2fs-devel

On Thu, Jul 14, 2016 at 08:05:02PM -0700, Jaegeuk Kim wrote:
> >From kernel guys working on android.

Well, until it's mainline it simply doesn't matter, so NAK to this
patch.  Tying power behavior to plugs also sounds pretty broken, so we'd
probably come up with something better if they bothered to actually
submit their patches upsteam or at least explaining what they want
to archive.

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 5/5] f2fs: use blk_plug in all the possible paths
  2016-07-19  3:59       ` Christoph Hellwig
@ 2016-07-19  5:49         ` Jaegeuk Kim
  0 siblings, 0 replies; 12+ messages in thread
From: Jaegeuk Kim @ 2016-07-19  5:49 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-kernel, linux-fsdevel, linux-f2fs-devel

On Mon, Jul 18, 2016 at 08:59:52PM -0700, Christoph Hellwig wrote:
> On Thu, Jul 14, 2016 at 08:05:02PM -0700, Jaegeuk Kim wrote:
> > >From kernel guys working on android.
> 
> Well, until it's mainline it simply doesn't matter, so NAK to this
> patch.  Tying power behavior to plugs also sounds pretty broken, so we'd
> probably come up with something better if they bothered to actually
> submit their patches upsteam or at least explaining what they want
> to archive.

Actually, the initial patch which drops plugs is not mainlined as well.
So, at this time, I just want to revert it by this patch, since I recognized
that there might be whatever possible use-cases of plugs in another way someday.
So for now, I lost the reason to eliminate the plugs.

Moreover, since every filesystems use plugs, f2fs doesn't need to be an
exceptional case in terms of that (except hm-smr). So, let me sync f2fs
with other filesystems at this moment.

I totally agree that it'd be best to see their patches upstream, but I've
witnessed that it becomes a quite tough challenge to them.

Thanks,

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2016-07-19  5:49 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-07-15  0:46 [PATCH 1/5] f2fs: refactor __exchange_data_block for speed up Jaegeuk Kim
2016-07-15  0:46 ` [PATCH 2/5] f2fs: disable extent_cache for fcollapse/finsert inodes Jaegeuk Kim
2016-07-15  0:46 ` [PATCH 3/5] f2fs: support copy_file_range Jaegeuk Kim
2016-07-15  0:46   ` Jaegeuk Kim
2016-07-15  0:46 ` [PATCH 4/5] f2fs: add maximum prefree segments Jaegeuk Kim
2016-07-15  0:46   ` Jaegeuk Kim
2016-07-15  0:46 ` [PATCH 5/5] f2fs: use blk_plug in all the possible paths Jaegeuk Kim
2016-07-15  0:46   ` Jaegeuk Kim
2016-07-15  2:21   ` Christoph Hellwig
2016-07-15  3:05     ` Jaegeuk Kim
2016-07-19  3:59       ` Christoph Hellwig
2016-07-19  5:49         ` Jaegeuk Kim

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.