linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/6] f2fs: support in batch multi blocks preallocation
@ 2016-05-09 11:56 Chao Yu
  2016-05-09 11:56 ` [PATCH 2/6] f2fs: support in batch fzero in dnode page Chao Yu
                   ` (5 more replies)
  0 siblings, 6 replies; 17+ messages in thread
From: Chao Yu @ 2016-05-09 11:56 UTC (permalink / raw)
  To: jaegeuk; +Cc: linux-f2fs-devel, linux-kernel, Chao Yu

This patch introduces reserve_new_blocks to make preallocation of multi
blocks as in batch operation, so it can avoid lots of redundant
operation, result in better performance.

In virtual machine, with rotational device:

time fallocate -l 32G /mnt/f2fs/file

Before:
real	0m4.584s
user	0m0.000s
sys	0m4.580s

After:
real	0m0.292s
user	0m0.000s
sys	0m0.272s

Signed-off-by: Chao Yu <yuchao0@huawei.com>
---
 fs/f2fs/data.c              | 93 +++++++++++++++++++++++++++++++++------------
 include/trace/events/f2fs.h | 14 ++++---
 2 files changed, 78 insertions(+), 29 deletions(-)

diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 369d953..ea0abdc 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -278,6 +278,16 @@ alloc_new:
 	trace_f2fs_submit_page_mbio(fio->page, fio);
 }
 
+void __set_data_blkaddr(struct dnode_of_data *dn)
+{
+	struct f2fs_node *rn = F2FS_NODE(dn->node_page);
+	__le32 *addr_array;
+
+	/* Get physical address of data block */
+	addr_array = blkaddr_in_node(rn);
+	addr_array[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
+}
+
 /*
  * Lock ordering for the change of data block address:
  * ->data_page
@@ -286,19 +296,9 @@ alloc_new:
  */
 void set_data_blkaddr(struct dnode_of_data *dn)
 {
-	struct f2fs_node *rn;
-	__le32 *addr_array;
-	struct page *node_page = dn->node_page;
-	unsigned int ofs_in_node = dn->ofs_in_node;
-
-	f2fs_wait_on_page_writeback(node_page, NODE, true);
-
-	rn = F2FS_NODE(node_page);
-
-	/* Get physical address of data block */
-	addr_array = blkaddr_in_node(rn);
-	addr_array[ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
-	if (set_page_dirty(node_page))
+	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
+	__set_data_blkaddr(dn);
+	if (set_page_dirty(dn->node_page))
 		dn->node_changed = true;
 }
 
@@ -309,24 +309,53 @@ void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
 	f2fs_update_extent_cache(dn);
 }
 
-int reserve_new_block(struct dnode_of_data *dn)
+int reserve_new_blocks(struct dnode_of_data *dn, unsigned int start,
+							unsigned int count)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
+	unsigned int ofs_in_node;
+
+	if (!count)
+		return 0;
 
 	if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
 		return -EPERM;
-	if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
+	if (unlikely(!inc_valid_block_count(sbi, dn->inode, count)))
 		return -ENOSPC;
 
-	trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
+	trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
+						dn->ofs_in_node, count);
+
+	ofs_in_node = dn->ofs_in_node;
+	dn->ofs_in_node = start;
+
+	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
+
+	for (; count > 0; dn->ofs_in_node++) {
+		block_t blkaddr =
+			datablock_addr(dn->node_page, dn->ofs_in_node);
+		if (blkaddr == NULL_ADDR) {
+			dn->data_blkaddr = NEW_ADDR;
+			__set_data_blkaddr(dn);
+			count--;
+		}
+	}
+
+	dn->ofs_in_node = ofs_in_node;
+
+	if (set_page_dirty(dn->node_page))
+		dn->node_changed = true;
 
-	dn->data_blkaddr = NEW_ADDR;
-	set_data_blkaddr(dn);
 	mark_inode_dirty(dn->inode);
 	sync_inode_page(dn);
 	return 0;
 }
 
+int reserve_new_block(struct dnode_of_data *dn)
+{
+	return reserve_new_blocks(dn, dn->ofs_in_node, 1);
+}
+
 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
 {
 	bool need_put = dn->inode_page ? false : true;
@@ -621,8 +650,8 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
 	struct dnode_of_data dn;
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
-	pgoff_t pgofs, end_offset;
-	int err = 0, ofs = 1;
+	pgoff_t pgofs, end_offset, end;
+	int err = 0, ofs = 1, prealloc, start;
 	struct extent_info ei;
 	bool allocated = false;
 	block_t blkaddr;
@@ -632,6 +661,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
 
 	/* it only supports block size == page size */
 	pgofs =	(pgoff_t)map->m_lblk;
+	end = pgofs + maxblocks;
 
 	if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
 		map->m_pblk = ei.blk + pgofs - ei.fofs;
@@ -659,6 +689,8 @@ next_dnode:
 		goto unlock_out;
 	}
 
+	prealloc = 0;
+	start = dn.ofs_in_node;
 	end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
 
 next_block:
@@ -672,7 +704,7 @@ next_block:
 			}
 			if (flag == F2FS_GET_BLOCK_PRE_AIO) {
 				if (blkaddr == NULL_ADDR)
-					err = reserve_new_block(&dn);
+					prealloc++;
 			} else {
 				err = __allocate_data_block(&dn);
 				if (!err)
@@ -700,6 +732,9 @@ next_block:
 		}
 	}
 
+	if (flag == F2FS_GET_BLOCK_PRE_AIO)
+		goto skip;
+
 	if (map->m_len == 0) {
 		/* preallocated unwritten block should be mapped for fiemap. */
 		if (blkaddr == NEW_ADDR)
@@ -711,18 +746,28 @@ next_block:
 	} else if ((map->m_pblk != NEW_ADDR &&
 			blkaddr == (map->m_pblk + ofs)) ||
 			(map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
-			flag == F2FS_GET_BLOCK_PRE_DIO ||
-			flag == F2FS_GET_BLOCK_PRE_AIO) {
+			flag == F2FS_GET_BLOCK_PRE_DIO) {
 		ofs++;
 		map->m_len++;
 	} else {
 		goto sync_out;
 	}
 
+skip:
 	dn.ofs_in_node++;
 	pgofs++;
 
-	if (map->m_len < maxblocks) {
+	/* preallocate blocks in batch for one dnode page */
+	if (flag == F2FS_GET_BLOCK_PRE_AIO &&
+			(pgofs == end || dn.ofs_in_node == end_offset)) {
+		allocated = false;
+		err = reserve_new_blocks(&dn, start, prealloc);
+		if (err)
+			goto sync_out;
+		map->m_len = pgofs - start;
+	}
+
+	if (pgofs < end) {
 		if (dn.ofs_in_node < end_offset)
 			goto next_block;
 
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 0f56584..5f927ff 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -694,28 +694,32 @@ TRACE_EVENT(f2fs_direct_IO_exit,
 		__entry->ret)
 );
 
-TRACE_EVENT(f2fs_reserve_new_block,
+TRACE_EVENT(f2fs_reserve_new_blocks,
 
-	TP_PROTO(struct inode *inode, nid_t nid, unsigned int ofs_in_node),
+	TP_PROTO(struct inode *inode, nid_t nid, unsigned int ofs_in_node,
+						unsigned int count),
 
-	TP_ARGS(inode, nid, ofs_in_node),
+	TP_ARGS(inode, nid, ofs_in_node, count),
 
 	TP_STRUCT__entry(
 		__field(dev_t,	dev)
 		__field(nid_t, nid)
 		__field(unsigned int, ofs_in_node)
+		__field(unsigned int, count)
 	),
 
 	TP_fast_assign(
 		__entry->dev	= inode->i_sb->s_dev;
 		__entry->nid	= nid;
 		__entry->ofs_in_node = ofs_in_node;
+		__entry->count = count;
 	),
 
-	TP_printk("dev = (%d,%d), nid = %u, ofs_in_node = %u",
+	TP_printk("dev = (%d,%d), nid = %u, ofs_in_node = %u, count = %u",
 		show_dev(__entry),
 		(unsigned int)__entry->nid,
-		__entry->ofs_in_node)
+		__entry->ofs_in_node,
+		__entry->count)
 );
 
 DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
-- 
2.8.2.311.gee88674

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH 2/6] f2fs: support in batch fzero in dnode page
  2016-05-09 11:56 [PATCH 1/6] f2fs: support in batch multi blocks preallocation Chao Yu
@ 2016-05-09 11:56 ` Chao Yu
  2016-05-09 23:03   ` Jaegeuk Kim
  2016-05-09 11:56 ` [PATCH 3/6] f2fs: use mnt_{want,drop}_write_file in ioctl Chao Yu
                   ` (4 subsequent siblings)
  5 siblings, 1 reply; 17+ messages in thread
From: Chao Yu @ 2016-05-09 11:56 UTC (permalink / raw)
  To: jaegeuk; +Cc: linux-f2fs-devel, linux-kernel, Chao Yu

This patch tries to speedup fzero_range by making space preallocation and
address removal of blocks in one dnode page as in batch operation.

In virtual machine, with zram driver:

dd if=/dev/zero of=/mnt/f2fs/file bs=1M count=4096
time xfs_io -f /mnt/f2fs/file -c "fzero 0 4096M"

Before:
real	0m3.276s
user	0m0.008s
sys	0m3.260s

After:
real	0m1.568s
user	0m0.000s
sys	0m1.564s

Signed-off-by: Chao Yu <yuchao0@huawei.com>
---
 fs/f2fs/f2fs.h |  2 ++
 fs/f2fs/file.c | 61 +++++++++++++++++++++++++++++++++++++++++++---------------
 2 files changed, 47 insertions(+), 16 deletions(-)

diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 75b0084..f75cd65 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1963,8 +1963,10 @@ void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *, struct inode *,
 void f2fs_flush_merged_bios(struct f2fs_sb_info *);
 int f2fs_submit_page_bio(struct f2fs_io_info *);
 void f2fs_submit_page_mbio(struct f2fs_io_info *);
+void __set_data_blkaddr(struct dnode_of_data *);
 void set_data_blkaddr(struct dnode_of_data *);
 void f2fs_update_data_blkaddr(struct dnode_of_data *, block_t);
+int reserve_new_blocks(struct dnode_of_data *, unsigned int, unsigned int);
 int reserve_new_block(struct dnode_of_data *);
 int f2fs_get_block(struct dnode_of_data *, pgoff_t);
 ssize_t f2fs_preallocate_blocks(struct kiocb *, struct iov_iter *);
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 5ead254..d5910bc 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -1035,6 +1035,38 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
 	return ret;
 }
 
+static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
+								pgoff_t end)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
+	pgoff_t index = start;
+	unsigned int ofs_in_node = dn->ofs_in_node, count = 0;
+	int ret;
+
+	for (; index < end; index++, ofs_in_node++) {
+		if (datablock_addr(dn->node_page, ofs_in_node) == NULL_ADDR)
+			count++;
+	}
+
+	ret = reserve_new_blocks(dn, dn->ofs_in_node, count);
+	if (ret)
+		return ret;
+
+	for (index = start; index < end; index++, dn->ofs_in_node++) {
+		dn->data_blkaddr =
+				datablock_addr(dn->node_page, dn->ofs_in_node);
+		if (dn->data_blkaddr != NEW_ADDR) {
+			invalidate_blocks(sbi, dn->data_blkaddr);
+			dn->data_blkaddr = NEW_ADDR;
+			__set_data_blkaddr(dn);
+		}
+	}
+
+	f2fs_update_extent_cache_range(dn, start, 0, end - start);
+
+	return 0;
+}
+
 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
 								int mode)
 {
@@ -1085,35 +1117,32 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
 					(loff_t)pg_start << PAGE_SHIFT);
 		}
 
-		for (index = pg_start; index < pg_end; index++) {
+		for (index = pg_start; index < pg_end;) {
 			struct dnode_of_data dn;
-			struct page *ipage;
+			unsigned int end_offset;
+			pgoff_t end;
 
 			f2fs_lock_op(sbi);
 
-			ipage = get_node_page(sbi, inode->i_ino);
-			if (IS_ERR(ipage)) {
-				ret = PTR_ERR(ipage);
-				f2fs_unlock_op(sbi);
-				goto out;
-			}
-
-			set_new_dnode(&dn, inode, ipage, NULL, 0);
-			ret = f2fs_reserve_block(&dn, index);
+			set_new_dnode(&dn, inode, NULL, NULL, 0);
+			ret = get_dnode_of_data(&dn, index, ALLOC_NODE);
 			if (ret) {
 				f2fs_unlock_op(sbi);
 				goto out;
 			}
 
-			if (dn.data_blkaddr != NEW_ADDR) {
-				invalidate_blocks(sbi, dn.data_blkaddr);
-				f2fs_update_data_blkaddr(&dn, NEW_ADDR);
-			}
+			end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+			end = min(pg_end, end_offset - dn.ofs_in_node + index);
+
+			ret = f2fs_do_zero_range(&dn, index, end);
 			f2fs_put_dnode(&dn);
 			f2fs_unlock_op(sbi);
+			if (ret)
+				goto out;
 
+			index = end;
 			new_size = max_t(loff_t, new_size,
-				(loff_t)(index + 1) << PAGE_SHIFT);
+					(loff_t)index << PAGE_SHIFT);
 		}
 
 		if (off_end) {
-- 
2.8.2.311.gee88674

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH 3/6] f2fs: use mnt_{want,drop}_write_file in ioctl
  2016-05-09 11:56 [PATCH 1/6] f2fs: support in batch multi blocks preallocation Chao Yu
  2016-05-09 11:56 ` [PATCH 2/6] f2fs: support in batch fzero in dnode page Chao Yu
@ 2016-05-09 11:56 ` Chao Yu
  2016-05-09 11:56 ` [PATCH 4/6] f2fs: make atomic/volatile operation exclusive Chao Yu
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 17+ messages in thread
From: Chao Yu @ 2016-05-09 11:56 UTC (permalink / raw)
  To: jaegeuk; +Cc: linux-f2fs-devel, linux-kernel, Chao Yu

In interfaces of ioctl, mnt_{want,drop}_write_file should be used for:
- get exclusion against file system freezing which may used by lvm
  snapshot.
- do telling filesystem that a write is about to be performed on it, and
  make sure that the writes are permitted.

Signed-off-by: Chao Yu <yuchao0@huawei.com>
---
 fs/f2fs/file.c | 130 ++++++++++++++++++++++++++++++++++++++++++---------------
 1 file changed, 97 insertions(+), 33 deletions(-)

diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index d5910bc..828b53e 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -1374,20 +1374,16 @@ static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
 	unsigned int oldflags;
 	int ret;
 
+	if (!inode_owner_or_capable(inode))
+		return -EACCES;
+
+	if (get_user(flags, (int __user *)arg))
+		return -EFAULT;
+
 	ret = mnt_want_write_file(filp);
 	if (ret)
 		return ret;
 
-	if (!inode_owner_or_capable(inode)) {
-		ret = -EACCES;
-		goto out;
-	}
-
-	if (get_user(flags, (int __user *)arg)) {
-		ret = -EFAULT;
-		goto out;
-	}
-
 	flags = f2fs_mask_flags(inode->i_mode, flags);
 
 	inode_lock(inode);
@@ -1430,18 +1426,22 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
 	if (!inode_owner_or_capable(inode))
 		return -EACCES;
 
+	ret = mnt_want_write_file(filp);
+	if (ret)
+		return ret;
+
 	if (f2fs_is_atomic_file(inode))
-		return 0;
+		goto out;
 
 	ret = f2fs_convert_inline_inode(inode);
 	if (ret)
-		return ret;
+		goto out;
 
 	set_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
 
 	if (!get_dirty_pages(inode))
-		return 0;
+		goto out;
 
 	f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
 		"Unexpected flush for atomic writes: ino=%lu, npages=%u",
@@ -1449,6 +1449,8 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
 	ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
 	if (ret)
 		clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
+out:
+	mnt_drop_write_file(filp);
 	return ret;
 }
 
@@ -1460,13 +1462,13 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp)
 	if (!inode_owner_or_capable(inode))
 		return -EACCES;
 
-	if (f2fs_is_volatile_file(inode))
-		return 0;
-
 	ret = mnt_want_write_file(filp);
 	if (ret)
 		return ret;
 
+	if (f2fs_is_volatile_file(inode))
+		goto err_out;
+
 	if (f2fs_is_atomic_file(inode)) {
 		clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
 		ret = commit_inmem_pages(inode);
@@ -1490,32 +1492,48 @@ static int f2fs_ioc_start_volatile_write(struct file *filp)
 	if (!inode_owner_or_capable(inode))
 		return -EACCES;
 
+	ret = mnt_want_write_file(filp);
+	if (ret)
+		return ret;
+
 	if (f2fs_is_volatile_file(inode))
-		return 0;
+		goto out;
 
 	ret = f2fs_convert_inline_inode(inode);
 	if (ret)
-		return ret;
+		goto out;
 
 	set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
-	return 0;
+out:
+	mnt_drop_write_file(filp);
+	return ret;
 }
 
 static int f2fs_ioc_release_volatile_write(struct file *filp)
 {
 	struct inode *inode = file_inode(filp);
+	int ret;
 
 	if (!inode_owner_or_capable(inode))
 		return -EACCES;
 
+	ret = mnt_want_write_file(filp);
+	if (ret)
+		return ret;
+
 	if (!f2fs_is_volatile_file(inode))
-		return 0;
+		goto out;
 
-	if (!f2fs_is_first_block_written(inode))
-		return truncate_partial_data_page(inode, 0, true);
+	if (!f2fs_is_first_block_written(inode)) {
+		ret = truncate_partial_data_page(inode, 0, true);
+		goto out;
+	}
 
-	return punch_hole(inode, 0, F2FS_BLKSIZE);
+	ret = punch_hole(inode, 0, F2FS_BLKSIZE);
+out:
+	mnt_drop_write_file(filp);
+	return ret;
 }
 
 static int f2fs_ioc_abort_volatile_write(struct file *filp)
@@ -1548,6 +1566,7 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	struct super_block *sb = sbi->sb;
 	__u32 in;
+	int ret;
 
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
@@ -1555,6 +1574,10 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
 	if (get_user(in, (__u32 __user *)arg))
 		return -EFAULT;
 
+	ret = mnt_want_write_file(filp);
+	if (ret)
+		return ret;
+
 	switch (in) {
 	case F2FS_GOING_DOWN_FULLSYNC:
 		sb = freeze_bdev(sb->s_bdev);
@@ -1576,10 +1599,13 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
 		f2fs_stop_checkpoint(sbi);
 		break;
 	default:
-		return -EINVAL;
+		ret = -EINVAL;
+		goto out;
 	}
 	f2fs_update_time(sbi, REQ_TIME);
-	return 0;
+out:
+	mnt_drop_write_file(filp);
+	return ret;
 }
 
 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
@@ -1600,9 +1626,14 @@ static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
 				sizeof(range)))
 		return -EFAULT;
 
+	ret = mnt_want_write_file(filp);
+	if (ret)
+		return ret;
+
 	range.minlen = max((unsigned int)range.minlen,
 				q->limits.discard_granularity);
 	ret = f2fs_trim_fs(F2FS_SB(sb), &range);
+	mnt_drop_write_file(filp);
 	if (ret < 0)
 		return ret;
 
@@ -1629,6 +1660,7 @@ static int f2fs_ioc_keyctl(struct file *filp, unsigned long arg)
 	struct f2fs_key key;
 	void *value = key.key;
 	int type = XATTR_CREATE;
+	int ret;
 
 	if (copy_from_user(&key, (u8 __user *)arg, sizeof(key)))
 		return -EFAULT;
@@ -1636,33 +1668,47 @@ static int f2fs_ioc_keyctl(struct file *filp, unsigned long arg)
 	if (!S_ISREG(inode->i_mode))
 		return -EINVAL;
 
+	ret = mnt_want_write_file(filp);
+	if (ret)
+		return ret;
+
 	if (key.mode == F2FS_DROP_KEY) {
 		int ret = validate_access_key(inode);
-
 		if (ret)
-			return ret;
+			goto out;
 
 		value = NULL;
 		type = XATTR_REPLACE;
 	}
 
 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
-	return f2fs_setxattr(inode, F2FS_XATTR_INDEX_KEY,
+	ret = f2fs_setxattr(inode, F2FS_XATTR_INDEX_KEY,
 				F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
 				value, F2FS_KEY_SIZE, NULL, type);
+out:
+	mnt_drop_write_file(filp);
+	return ret;
 }
 
 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
 {
 	struct fscrypt_policy policy;
 	struct inode *inode = file_inode(filp);
+	int ret;
 
 	if (copy_from_user(&policy, (struct fscrypt_policy __user *)arg,
 							sizeof(policy)))
 		return -EFAULT;
 
+	ret = mnt_want_write_file(filp);
+	if (ret)
+		return ret;
+
 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
-	return fscrypt_process_policy(inode, &policy);
+	ret = fscrypt_process_policy(inode, &policy);
+
+	mnt_drop_write_file(filp);
+	return ret;
 }
 
 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
@@ -1719,6 +1765,7 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
 	struct inode *inode = file_inode(filp);
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	__u32 sync;
+	int ret;
 
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
@@ -1729,20 +1776,30 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
 	if (f2fs_readonly(sbi->sb))
 		return -EROFS;
 
+	ret = mnt_want_write_file(filp);
+	if (ret)
+		return ret;
+
 	if (!sync) {
-		if (!mutex_trylock(&sbi->gc_mutex))
-			return -EBUSY;
+		if (!mutex_trylock(&sbi->gc_mutex)) {
+			ret = -EBUSY;
+			goto out;
+		}
 	} else {
 		mutex_lock(&sbi->gc_mutex);
 	}
 
-	return f2fs_gc(sbi, sync);
+	ret = f2fs_gc(sbi, sync);
+out:
+	mnt_drop_write_file(filp);
+	return ret;
 }
 
 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
 {
 	struct inode *inode = file_inode(filp);
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	int ret;
 
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
@@ -1750,7 +1807,14 @@ static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
 	if (f2fs_readonly(sbi->sb))
 		return -EROFS;
 
-	return f2fs_sync_fs(sbi->sb, 1);
+	ret = mnt_want_write_file(filp);
+	if (ret)
+		return ret;
+
+	ret = f2fs_sync_fs(sbi->sb, 1);
+
+	mnt_drop_write_file(filp);
+	return ret;
 }
 
 static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
-- 
2.8.2.311.gee88674

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH 4/6] f2fs: make atomic/volatile operation exclusive
  2016-05-09 11:56 [PATCH 1/6] f2fs: support in batch multi blocks preallocation Chao Yu
  2016-05-09 11:56 ` [PATCH 2/6] f2fs: support in batch fzero in dnode page Chao Yu
  2016-05-09 11:56 ` [PATCH 3/6] f2fs: use mnt_{want,drop}_write_file in ioctl Chao Yu
@ 2016-05-09 11:56 ` Chao Yu
  2016-05-09 11:56 ` [PATCH 5/6] f2fs: enable inline_dentry by default Chao Yu
                   ` (2 subsequent siblings)
  5 siblings, 0 replies; 17+ messages in thread
From: Chao Yu @ 2016-05-09 11:56 UTC (permalink / raw)
  To: jaegeuk; +Cc: linux-f2fs-devel, linux-kernel, Chao Yu

atomic/volatile ioctl interfaces are exposed to user like other file
operation interface, it needs to make them getting exclusion against
to each other to avoid potential conflict among these operations
in concurrent scenario.

Signed-off-by: Chao Yu <yuchao0@huawei.com>
---
 fs/f2fs/file.c | 16 ++++++++++++++++
 1 file changed, 16 insertions(+)

diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 828b53e..24d7189 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -1430,6 +1430,8 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
 	if (ret)
 		return ret;
 
+	inode_lock(inode);
+
 	if (f2fs_is_atomic_file(inode))
 		goto out;
 
@@ -1450,6 +1452,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
 	if (ret)
 		clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
 out:
+	inode_unlock(inode);
 	mnt_drop_write_file(filp);
 	return ret;
 }
@@ -1466,6 +1469,8 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp)
 	if (ret)
 		return ret;
 
+	inode_lock(inode);
+
 	if (f2fs_is_volatile_file(inode))
 		goto err_out;
 
@@ -1480,6 +1485,7 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp)
 
 	ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
 err_out:
+	inode_unlock(inode);
 	mnt_drop_write_file(filp);
 	return ret;
 }
@@ -1496,6 +1502,8 @@ static int f2fs_ioc_start_volatile_write(struct file *filp)
 	if (ret)
 		return ret;
 
+	inode_lock(inode);
+
 	if (f2fs_is_volatile_file(inode))
 		goto out;
 
@@ -1506,6 +1514,7 @@ static int f2fs_ioc_start_volatile_write(struct file *filp)
 	set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
 out:
+	inode_unlock(inode);
 	mnt_drop_write_file(filp);
 	return ret;
 }
@@ -1522,6 +1531,8 @@ static int f2fs_ioc_release_volatile_write(struct file *filp)
 	if (ret)
 		return ret;
 
+	inode_lock(inode);
+
 	if (!f2fs_is_volatile_file(inode))
 		goto out;
 
@@ -1532,6 +1543,7 @@ static int f2fs_ioc_release_volatile_write(struct file *filp)
 
 	ret = punch_hole(inode, 0, F2FS_BLKSIZE);
 out:
+	inode_unlock(inode);
 	mnt_drop_write_file(filp);
 	return ret;
 }
@@ -1548,6 +1560,8 @@ static int f2fs_ioc_abort_volatile_write(struct file *filp)
 	if (ret)
 		return ret;
 
+	inode_lock(inode);
+
 	if (f2fs_is_atomic_file(inode))
 		drop_inmem_pages(inode);
 	if (f2fs_is_volatile_file(inode)) {
@@ -1555,6 +1569,8 @@ static int f2fs_ioc_abort_volatile_write(struct file *filp)
 		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
 	}
 
+	inode_unlock(inode);
+
 	mnt_drop_write_file(filp);
 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
 	return ret;
-- 
2.8.2.311.gee88674

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH 5/6] f2fs: enable inline_dentry by default
  2016-05-09 11:56 [PATCH 1/6] f2fs: support in batch multi blocks preallocation Chao Yu
                   ` (2 preceding siblings ...)
  2016-05-09 11:56 ` [PATCH 4/6] f2fs: make atomic/volatile operation exclusive Chao Yu
@ 2016-05-09 11:56 ` Chao Yu
  2016-05-09 23:04   ` Jaegeuk Kim
  2016-05-09 11:56 ` [PATCH 6/6] f2fs: add noinline_dentry mount option Chao Yu
  2016-05-09 23:00 ` [PATCH 1/6] f2fs: support in batch multi blocks preallocation Jaegeuk Kim
  5 siblings, 1 reply; 17+ messages in thread
From: Chao Yu @ 2016-05-09 11:56 UTC (permalink / raw)
  To: jaegeuk; +Cc: linux-f2fs-devel, linux-kernel, Chao Yu

Make inline_dentry as default mount option to improve space usage and
IO performance in scenario of numerous small directory.

Signed-off-by: Chao Yu <yuchao0@huawei.com>
---
 fs/f2fs/super.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 28c8992..4a4f4bd 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -824,6 +824,7 @@ static void default_options(struct f2fs_sb_info *sbi)
 
 	set_opt(sbi, BG_GC);
 	set_opt(sbi, INLINE_DATA);
+	set_opt(sbi, INLINE_DENTRY);
 	set_opt(sbi, EXTENT_CACHE);
 
 #ifdef CONFIG_F2FS_FS_XATTR
-- 
2.8.2.311.gee88674

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH 6/6] f2fs: add noinline_dentry mount option
  2016-05-09 11:56 [PATCH 1/6] f2fs: support in batch multi blocks preallocation Chao Yu
                   ` (3 preceding siblings ...)
  2016-05-09 11:56 ` [PATCH 5/6] f2fs: enable inline_dentry by default Chao Yu
@ 2016-05-09 11:56 ` Chao Yu
  2016-05-09 23:00 ` [PATCH 1/6] f2fs: support in batch multi blocks preallocation Jaegeuk Kim
  5 siblings, 0 replies; 17+ messages in thread
From: Chao Yu @ 2016-05-09 11:56 UTC (permalink / raw)
  To: jaegeuk; +Cc: linux-f2fs-devel, linux-kernel, Chao Yu

This patch adds noinline_dentry mount option.

Signed-off-by: Chao Yu <yuchao0@huawei.com>
---
 Documentation/filesystems/f2fs.txt | 1 +
 fs/f2fs/super.c                    | 7 +++++++
 2 files changed, 8 insertions(+)

diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt
index e1c9f08..ee3a6c9 100644
--- a/Documentation/filesystems/f2fs.txt
+++ b/Documentation/filesystems/f2fs.txt
@@ -151,6 +151,7 @@ noinline_data          Disable the inline data feature, inline data feature is
                        enabled by default.
 data_flush             Enable data flushing before checkpoint in order to
                        persist data of regular and symlink.
+noextent_cache         Disable the inline dentry feature.
 
 ================================================================================
 DEBUGFS ENTRIES
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 4a4f4bd..de2cb78 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -83,6 +83,7 @@ enum {
 	Opt_noinline_data,
 	Opt_data_flush,
 	Opt_fault_injection,
+	Opt_noinline_dentry,
 	Opt_err,
 };
 
@@ -109,6 +110,7 @@ static match_table_t f2fs_tokens = {
 	{Opt_noinline_data, "noinline_data"},
 	{Opt_data_flush, "data_flush"},
 	{Opt_fault_injection, "fault_injection=%u"},
+	{Opt_noinline_dentry, "noinline_dentry"},
 	{Opt_err, NULL},
 };
 
@@ -463,6 +465,9 @@ static int parse_options(struct super_block *sb, char *options)
 				"FAULT_INJECTION was not selected");
 #endif
 			break;
+		case Opt_noinline_dentry:
+			clear_opt(sbi, INLINE_DENTRY);
+			break;
 		default:
 			f2fs_msg(sb, KERN_ERR,
 				"Unrecognized mount option \"%s\" or missing value",
@@ -732,6 +737,8 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
 		seq_puts(seq, ",noinline_data");
 	if (test_opt(sbi, INLINE_DENTRY))
 		seq_puts(seq, ",inline_dentry");
+	else
+		seq_puts(seq, ",noinline_dentry");
 	if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
 		seq_puts(seq, ",flush_merge");
 	if (test_opt(sbi, NOBARRIER))
-- 
2.8.2.311.gee88674

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* Re: [PATCH 1/6] f2fs: support in batch multi blocks preallocation
  2016-05-09 11:56 [PATCH 1/6] f2fs: support in batch multi blocks preallocation Chao Yu
                   ` (4 preceding siblings ...)
  2016-05-09 11:56 ` [PATCH 6/6] f2fs: add noinline_dentry mount option Chao Yu
@ 2016-05-09 23:00 ` Jaegeuk Kim
  2016-05-10 12:55   ` Chao Yu
  5 siblings, 1 reply; 17+ messages in thread
From: Jaegeuk Kim @ 2016-05-09 23:00 UTC (permalink / raw)
  To: Chao Yu; +Cc: linux-f2fs-devel, linux-kernel

Hi Chao,

On Mon, May 09, 2016 at 07:56:30PM +0800, Chao Yu wrote:
> This patch introduces reserve_new_blocks to make preallocation of multi
> blocks as in batch operation, so it can avoid lots of redundant
> operation, result in better performance.
> 
> In virtual machine, with rotational device:
> 
> time fallocate -l 32G /mnt/f2fs/file
> 
> Before:
> real	0m4.584s
> user	0m0.000s
> sys	0m4.580s
> 
> After:
> real	0m0.292s
> user	0m0.000s
> sys	0m0.272s

It's cool.
Let me add my test results as well.

In x86, with SSD:

time fallocate -l 500G $MNT/testfile

Before : 24.758 s
After  :  1.604 s

By the way, there is one thing we should consider, which is the ENOSPC case.
Could you check this out on top of your patch?

If you don't mind, let me integrate this into your patch.
Let me know.

Thanks,

---
 fs/f2fs/data.c              |  9 +++++----
 fs/f2fs/f2fs.h              | 20 +++++++++++++-------
 include/trace/events/f2fs.h |  8 ++++----
 3 files changed, 22 insertions(+), 15 deletions(-)

diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index ea0abdc..da640e1 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -278,7 +278,7 @@ alloc_new:
 	trace_f2fs_submit_page_mbio(fio->page, fio);
 }
 
-void __set_data_blkaddr(struct dnode_of_data *dn)
+static void __set_data_blkaddr(struct dnode_of_data *dn)
 {
 	struct f2fs_node *rn = F2FS_NODE(dn->node_page);
 	__le32 *addr_array;
@@ -310,7 +310,7 @@ void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
 }
 
 int reserve_new_blocks(struct dnode_of_data *dn, unsigned int start,
-							unsigned int count)
+							blkcnt_t count)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
 	unsigned int ofs_in_node;
@@ -320,7 +320,7 @@ int reserve_new_blocks(struct dnode_of_data *dn, unsigned int start,
 
 	if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
 		return -EPERM;
-	if (unlikely(!inc_valid_block_count(sbi, dn->inode, count)))
+	if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
 		return -ENOSPC;
 
 	trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
@@ -574,6 +574,7 @@ static int __allocate_data_block(struct dnode_of_data *dn)
 	struct node_info ni;
 	int seg = CURSEG_WARM_DATA;
 	pgoff_t fofs;
+	blkcnt_t count = 1;
 
 	if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
 		return -EPERM;
@@ -582,7 +583,7 @@ static int __allocate_data_block(struct dnode_of_data *dn)
 	if (dn->data_blkaddr == NEW_ADDR)
 		goto alloc;
 
-	if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
+	if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
 		return -ENOSPC;
 
 alloc:
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 75b0084..00fe63c 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1114,7 +1114,7 @@ static inline bool f2fs_has_xattr_block(unsigned int ofs)
 }
 
 static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi,
-				 struct inode *inode, blkcnt_t count)
+				 struct inode *inode, blkcnt_t *count)
 {
 	block_t	valid_block_count;
 
@@ -1126,14 +1126,19 @@ static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi,
 	}
 #endif
 	valid_block_count =
-		sbi->total_valid_block_count + (block_t)count;
+		sbi->total_valid_block_count + (block_t)(*count);
 	if (unlikely(valid_block_count > sbi->user_block_count)) {
-		spin_unlock(&sbi->stat_lock);
-		return false;
+		*count = sbi->user_block_count - sbi->total_valid_block_count;
+		if (!*count) {
+			spin_unlock(&sbi->stat_lock);
+			return false;
+		}
 	}
-	inode->i_blocks += count;
-	sbi->total_valid_block_count = valid_block_count;
-	sbi->alloc_valid_block_count += (block_t)count;
+	/* *count can be recalculated */
+	inode->i_blocks += *count;
+	sbi->total_valid_block_count =
+		sbi->total_valid_block_count + (block_t)(*count);
+	sbi->alloc_valid_block_count += (block_t)(*count);
 	spin_unlock(&sbi->stat_lock);
 	return true;
 }
@@ -1965,6 +1970,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *);
 void f2fs_submit_page_mbio(struct f2fs_io_info *);
 void set_data_blkaddr(struct dnode_of_data *);
 void f2fs_update_data_blkaddr(struct dnode_of_data *, block_t);
+int reserve_new_blocks(struct dnode_of_data *, unsigned int, blkcnt_t);
 int reserve_new_block(struct dnode_of_data *);
 int f2fs_get_block(struct dnode_of_data *, pgoff_t);
 ssize_t f2fs_preallocate_blocks(struct kiocb *, struct iov_iter *);
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 5f927ff..497e6e8 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -697,7 +697,7 @@ TRACE_EVENT(f2fs_direct_IO_exit,
 TRACE_EVENT(f2fs_reserve_new_blocks,
 
 	TP_PROTO(struct inode *inode, nid_t nid, unsigned int ofs_in_node,
-						unsigned int count),
+							blkcnt_t count),
 
 	TP_ARGS(inode, nid, ofs_in_node, count),
 
@@ -705,7 +705,7 @@ TRACE_EVENT(f2fs_reserve_new_blocks,
 		__field(dev_t,	dev)
 		__field(nid_t, nid)
 		__field(unsigned int, ofs_in_node)
-		__field(unsigned int, count)
+		__field(blkcnt_t, count)
 	),
 
 	TP_fast_assign(
@@ -715,11 +715,11 @@ TRACE_EVENT(f2fs_reserve_new_blocks,
 		__entry->count = count;
 	),
 
-	TP_printk("dev = (%d,%d), nid = %u, ofs_in_node = %u, count = %u",
+	TP_printk("dev = (%d,%d), nid = %u, ofs_in_node = %u, count = %llu",
 		show_dev(__entry),
 		(unsigned int)__entry->nid,
 		__entry->ofs_in_node,
-		__entry->count)
+		(unsigned long long)__entry->count)
 );
 
 DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
-- 
2.6.3



> 
> Signed-off-by: Chao Yu <yuchao0@huawei.com>
> ---
>  fs/f2fs/data.c              | 93 +++++++++++++++++++++++++++++++++------------
>  include/trace/events/f2fs.h | 14 ++++---
>  2 files changed, 78 insertions(+), 29 deletions(-)
> 
> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> index 369d953..ea0abdc 100644
> --- a/fs/f2fs/data.c
> +++ b/fs/f2fs/data.c
> @@ -278,6 +278,16 @@ alloc_new:
>  	trace_f2fs_submit_page_mbio(fio->page, fio);
>  }
>  
> +void __set_data_blkaddr(struct dnode_of_data *dn)
> +{
> +	struct f2fs_node *rn = F2FS_NODE(dn->node_page);
> +	__le32 *addr_array;
> +
> +	/* Get physical address of data block */
> +	addr_array = blkaddr_in_node(rn);
> +	addr_array[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
> +}
> +
>  /*
>   * Lock ordering for the change of data block address:
>   * ->data_page
> @@ -286,19 +296,9 @@ alloc_new:
>   */
>  void set_data_blkaddr(struct dnode_of_data *dn)
>  {
> -	struct f2fs_node *rn;
> -	__le32 *addr_array;
> -	struct page *node_page = dn->node_page;
> -	unsigned int ofs_in_node = dn->ofs_in_node;
> -
> -	f2fs_wait_on_page_writeback(node_page, NODE, true);
> -
> -	rn = F2FS_NODE(node_page);
> -
> -	/* Get physical address of data block */
> -	addr_array = blkaddr_in_node(rn);
> -	addr_array[ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
> -	if (set_page_dirty(node_page))
> +	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
> +	__set_data_blkaddr(dn);
> +	if (set_page_dirty(dn->node_page))
>  		dn->node_changed = true;
>  }
>  
> @@ -309,24 +309,53 @@ void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
>  	f2fs_update_extent_cache(dn);
>  }
>  
> -int reserve_new_block(struct dnode_of_data *dn)
> +int reserve_new_blocks(struct dnode_of_data *dn, unsigned int start,
> +							unsigned int count)
>  {
>  	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
> +	unsigned int ofs_in_node;
> +
> +	if (!count)
> +		return 0;
>  
>  	if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
>  		return -EPERM;
> -	if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
> +	if (unlikely(!inc_valid_block_count(sbi, dn->inode, count)))
>  		return -ENOSPC;
>  
> -	trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
> +	trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
> +						dn->ofs_in_node, count);
> +
> +	ofs_in_node = dn->ofs_in_node;
> +	dn->ofs_in_node = start;
> +
> +	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
> +
> +	for (; count > 0; dn->ofs_in_node++) {
> +		block_t blkaddr =
> +			datablock_addr(dn->node_page, dn->ofs_in_node);
> +		if (blkaddr == NULL_ADDR) {
> +			dn->data_blkaddr = NEW_ADDR;
> +			__set_data_blkaddr(dn);
> +			count--;
> +		}
> +	}
> +
> +	dn->ofs_in_node = ofs_in_node;
> +
> +	if (set_page_dirty(dn->node_page))
> +		dn->node_changed = true;
>  
> -	dn->data_blkaddr = NEW_ADDR;
> -	set_data_blkaddr(dn);
>  	mark_inode_dirty(dn->inode);
>  	sync_inode_page(dn);
>  	return 0;
>  }
>  
> +int reserve_new_block(struct dnode_of_data *dn)
> +{
> +	return reserve_new_blocks(dn, dn->ofs_in_node, 1);
> +}
> +
>  int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
>  {
>  	bool need_put = dn->inode_page ? false : true;
> @@ -621,8 +650,8 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
>  	struct dnode_of_data dn;
>  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
>  	int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
> -	pgoff_t pgofs, end_offset;
> -	int err = 0, ofs = 1;
> +	pgoff_t pgofs, end_offset, end;
> +	int err = 0, ofs = 1, prealloc, start;
>  	struct extent_info ei;
>  	bool allocated = false;
>  	block_t blkaddr;
> @@ -632,6 +661,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
>  
>  	/* it only supports block size == page size */
>  	pgofs =	(pgoff_t)map->m_lblk;
> +	end = pgofs + maxblocks;
>  
>  	if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
>  		map->m_pblk = ei.blk + pgofs - ei.fofs;
> @@ -659,6 +689,8 @@ next_dnode:
>  		goto unlock_out;
>  	}
>  
> +	prealloc = 0;
> +	start = dn.ofs_in_node;
>  	end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
>  
>  next_block:
> @@ -672,7 +704,7 @@ next_block:
>  			}
>  			if (flag == F2FS_GET_BLOCK_PRE_AIO) {
>  				if (blkaddr == NULL_ADDR)
> -					err = reserve_new_block(&dn);
> +					prealloc++;
>  			} else {
>  				err = __allocate_data_block(&dn);
>  				if (!err)
> @@ -700,6 +732,9 @@ next_block:
>  		}
>  	}
>  
> +	if (flag == F2FS_GET_BLOCK_PRE_AIO)
> +		goto skip;
> +
>  	if (map->m_len == 0) {
>  		/* preallocated unwritten block should be mapped for fiemap. */
>  		if (blkaddr == NEW_ADDR)
> @@ -711,18 +746,28 @@ next_block:
>  	} else if ((map->m_pblk != NEW_ADDR &&
>  			blkaddr == (map->m_pblk + ofs)) ||
>  			(map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
> -			flag == F2FS_GET_BLOCK_PRE_DIO ||
> -			flag == F2FS_GET_BLOCK_PRE_AIO) {
> +			flag == F2FS_GET_BLOCK_PRE_DIO) {
>  		ofs++;
>  		map->m_len++;
>  	} else {
>  		goto sync_out;
>  	}
>  
> +skip:
>  	dn.ofs_in_node++;
>  	pgofs++;
>  
> -	if (map->m_len < maxblocks) {
> +	/* preallocate blocks in batch for one dnode page */
> +	if (flag == F2FS_GET_BLOCK_PRE_AIO &&
> +			(pgofs == end || dn.ofs_in_node == end_offset)) {
> +		allocated = false;
> +		err = reserve_new_blocks(&dn, start, prealloc);
> +		if (err)
> +			goto sync_out;
> +		map->m_len = pgofs - start;
> +	}
> +
> +	if (pgofs < end) {
>  		if (dn.ofs_in_node < end_offset)
>  			goto next_block;
>  
> diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
> index 0f56584..5f927ff 100644
> --- a/include/trace/events/f2fs.h
> +++ b/include/trace/events/f2fs.h
> @@ -694,28 +694,32 @@ TRACE_EVENT(f2fs_direct_IO_exit,
>  		__entry->ret)
>  );
>  
> -TRACE_EVENT(f2fs_reserve_new_block,
> +TRACE_EVENT(f2fs_reserve_new_blocks,
>  
> -	TP_PROTO(struct inode *inode, nid_t nid, unsigned int ofs_in_node),
> +	TP_PROTO(struct inode *inode, nid_t nid, unsigned int ofs_in_node,
> +						unsigned int count),
>  
> -	TP_ARGS(inode, nid, ofs_in_node),
> +	TP_ARGS(inode, nid, ofs_in_node, count),
>  
>  	TP_STRUCT__entry(
>  		__field(dev_t,	dev)
>  		__field(nid_t, nid)
>  		__field(unsigned int, ofs_in_node)
> +		__field(unsigned int, count)
>  	),
>  
>  	TP_fast_assign(
>  		__entry->dev	= inode->i_sb->s_dev;
>  		__entry->nid	= nid;
>  		__entry->ofs_in_node = ofs_in_node;
> +		__entry->count = count;
>  	),
>  
> -	TP_printk("dev = (%d,%d), nid = %u, ofs_in_node = %u",
> +	TP_printk("dev = (%d,%d), nid = %u, ofs_in_node = %u, count = %u",
>  		show_dev(__entry),
>  		(unsigned int)__entry->nid,
> -		__entry->ofs_in_node)
> +		__entry->ofs_in_node,
> +		__entry->count)
>  );
>  
>  DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
> -- 
> 2.8.2.311.gee88674

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* Re: [PATCH 2/6] f2fs: support in batch fzero in dnode page
  2016-05-09 11:56 ` [PATCH 2/6] f2fs: support in batch fzero in dnode page Chao Yu
@ 2016-05-09 23:03   ` Jaegeuk Kim
  0 siblings, 0 replies; 17+ messages in thread
From: Jaegeuk Kim @ 2016-05-09 23:03 UTC (permalink / raw)
  To: Chao Yu; +Cc: linux-f2fs-devel, linux-kernel

Hi Chao,

Could you check this patch as well?
I slightly changed the routine to consider ENOSPC case.

Thanks,

>From 8b22fca4188319132d913a4d5f0d74c9ef676406 Mon Sep 17 00:00:00 2001
From: Chao Yu <yuchao0@huawei.com>
Date: Mon, 9 May 2016 19:56:31 +0800
Subject: [PATCH] f2fs: support in batch fzero in dnode page

This patch tries to speedup fzero_range by making space preallocation and
address removal of blocks in one dnode page as in batch operation.

In virtual machine, with zram driver:

dd if=/dev/zero of=/mnt/f2fs/file bs=1M count=4096
time xfs_io -f /mnt/f2fs/file -c "fzero 0 4096M"

Before:
real	0m3.276s
user	0m0.008s
sys	0m3.260s

After:
real	0m1.568s
user	0m0.000s
sys	0m1.564s

Signed-off-by: Chao Yu <yuchao0@huawei.com>
[Jaegeuk Kim: consider ENOSPC case]
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
---
 fs/f2fs/file.c | 70 ++++++++++++++++++++++++++++++++++++++++++++--------------
 1 file changed, 54 insertions(+), 16 deletions(-)

diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 05829ff..f54c3e2 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -1035,6 +1035,47 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
 	return ret;
 }
 
+static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
+								pgoff_t end)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
+	pgoff_t index = start;
+	unsigned int ofs_in_node = dn->ofs_in_node;
+	blkcnt_t count = 0;
+	int ret;
+
+	for (; index < end; index++, ofs_in_node++) {
+		if (datablock_addr(dn->node_page, ofs_in_node) == NULL_ADDR)
+			count++;
+	}
+
+	ret = reserve_new_blocks(dn, dn->ofs_in_node, count);
+	if (ret)
+		return ret;
+
+	for (index = start; index < end; index++, dn->ofs_in_node++) {
+		dn->data_blkaddr =
+				datablock_addr(dn->node_page, dn->ofs_in_node);
+		/*
+		 * reserve_new_blocks will not guarantee entire block
+		 * allocation.
+		 */
+		if (dn->data_blkaddr == NULL_ADDR) {
+			ret = -ENOSPC;
+			break;
+		}
+		if (dn->data_blkaddr != NEW_ADDR) {
+			invalidate_blocks(sbi, dn->data_blkaddr);
+			dn->data_blkaddr = NEW_ADDR;
+			set_data_blkaddr(dn);
+		}
+	}
+
+	f2fs_update_extent_cache_range(dn, start, 0, index - start);
+
+	return ret;
+}
+
 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
 								int mode)
 {
@@ -1085,35 +1126,32 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
 					(loff_t)pg_start << PAGE_SHIFT);
 		}
 
-		for (index = pg_start; index < pg_end; index++) {
+		for (index = pg_start; index < pg_end;) {
 			struct dnode_of_data dn;
-			struct page *ipage;
+			unsigned int end_offset;
+			pgoff_t end;
 
 			f2fs_lock_op(sbi);
 
-			ipage = get_node_page(sbi, inode->i_ino);
-			if (IS_ERR(ipage)) {
-				ret = PTR_ERR(ipage);
-				f2fs_unlock_op(sbi);
-				goto out;
-			}
-
-			set_new_dnode(&dn, inode, ipage, NULL, 0);
-			ret = f2fs_reserve_block(&dn, index);
+			set_new_dnode(&dn, inode, NULL, NULL, 0);
+			ret = get_dnode_of_data(&dn, index, ALLOC_NODE);
 			if (ret) {
 				f2fs_unlock_op(sbi);
 				goto out;
 			}
 
-			if (dn.data_blkaddr != NEW_ADDR) {
-				invalidate_blocks(sbi, dn.data_blkaddr);
-				f2fs_update_data_blkaddr(&dn, NEW_ADDR);
-			}
+			end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+			end = min(pg_end, end_offset - dn.ofs_in_node + index);
+
+			ret = f2fs_do_zero_range(&dn, index, end);
 			f2fs_put_dnode(&dn);
 			f2fs_unlock_op(sbi);
+			if (ret)
+				goto out;
 
+			index = end;
 			new_size = max_t(loff_t, new_size,
-				(loff_t)(index + 1) << PAGE_SHIFT);
+					(loff_t)index << PAGE_SHIFT);
 		}
 
 		if (off_end) {
-- 
2.6.3

On Mon, May 09, 2016 at 07:56:31PM +0800, Chao Yu wrote:
> This patch tries to speedup fzero_range by making space preallocation and
> address removal of blocks in one dnode page as in batch operation.
> 
> In virtual machine, with zram driver:
> 
> dd if=/dev/zero of=/mnt/f2fs/file bs=1M count=4096
> time xfs_io -f /mnt/f2fs/file -c "fzero 0 4096M"
> 
> Before:
> real	0m3.276s
> user	0m0.008s
> sys	0m3.260s
> 
> After:
> real	0m1.568s
> user	0m0.000s
> sys	0m1.564s
> 
> Signed-off-by: Chao Yu <yuchao0@huawei.com>
> ---
>  fs/f2fs/f2fs.h |  2 ++
>  fs/f2fs/file.c | 61 +++++++++++++++++++++++++++++++++++++++++++---------------
>  2 files changed, 47 insertions(+), 16 deletions(-)
> 
> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> index 75b0084..f75cd65 100644
> --- a/fs/f2fs/f2fs.h
> +++ b/fs/f2fs/f2fs.h
> @@ -1963,8 +1963,10 @@ void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *, struct inode *,
>  void f2fs_flush_merged_bios(struct f2fs_sb_info *);
>  int f2fs_submit_page_bio(struct f2fs_io_info *);
>  void f2fs_submit_page_mbio(struct f2fs_io_info *);
> +void __set_data_blkaddr(struct dnode_of_data *);
>  void set_data_blkaddr(struct dnode_of_data *);
>  void f2fs_update_data_blkaddr(struct dnode_of_data *, block_t);
> +int reserve_new_blocks(struct dnode_of_data *, unsigned int, unsigned int);
>  int reserve_new_block(struct dnode_of_data *);
>  int f2fs_get_block(struct dnode_of_data *, pgoff_t);
>  ssize_t f2fs_preallocate_blocks(struct kiocb *, struct iov_iter *);
> diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
> index 5ead254..d5910bc 100644
> --- a/fs/f2fs/file.c
> +++ b/fs/f2fs/file.c
> @@ -1035,6 +1035,38 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
>  	return ret;
>  }
>  
> +static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
> +								pgoff_t end)
> +{
> +	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
> +	pgoff_t index = start;
> +	unsigned int ofs_in_node = dn->ofs_in_node, count = 0;
> +	int ret;
> +
> +	for (; index < end; index++, ofs_in_node++) {
> +		if (datablock_addr(dn->node_page, ofs_in_node) == NULL_ADDR)
> +			count++;
> +	}
> +
> +	ret = reserve_new_blocks(dn, dn->ofs_in_node, count);
> +	if (ret)
> +		return ret;
> +
> +	for (index = start; index < end; index++, dn->ofs_in_node++) {
> +		dn->data_blkaddr =
> +				datablock_addr(dn->node_page, dn->ofs_in_node);
> +		if (dn->data_blkaddr != NEW_ADDR) {
> +			invalidate_blocks(sbi, dn->data_blkaddr);
> +			dn->data_blkaddr = NEW_ADDR;
> +			__set_data_blkaddr(dn);
> +		}
> +	}
> +
> +	f2fs_update_extent_cache_range(dn, start, 0, end - start);
> +
> +	return 0;
> +}
> +
>  static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
>  								int mode)
>  {
> @@ -1085,35 +1117,32 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
>  					(loff_t)pg_start << PAGE_SHIFT);
>  		}
>  
> -		for (index = pg_start; index < pg_end; index++) {
> +		for (index = pg_start; index < pg_end;) {
>  			struct dnode_of_data dn;
> -			struct page *ipage;
> +			unsigned int end_offset;
> +			pgoff_t end;
>  
>  			f2fs_lock_op(sbi);
>  
> -			ipage = get_node_page(sbi, inode->i_ino);
> -			if (IS_ERR(ipage)) {
> -				ret = PTR_ERR(ipage);
> -				f2fs_unlock_op(sbi);
> -				goto out;
> -			}
> -
> -			set_new_dnode(&dn, inode, ipage, NULL, 0);
> -			ret = f2fs_reserve_block(&dn, index);
> +			set_new_dnode(&dn, inode, NULL, NULL, 0);
> +			ret = get_dnode_of_data(&dn, index, ALLOC_NODE);
>  			if (ret) {
>  				f2fs_unlock_op(sbi);
>  				goto out;
>  			}
>  
> -			if (dn.data_blkaddr != NEW_ADDR) {
> -				invalidate_blocks(sbi, dn.data_blkaddr);
> -				f2fs_update_data_blkaddr(&dn, NEW_ADDR);
> -			}
> +			end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
> +			end = min(pg_end, end_offset - dn.ofs_in_node + index);
> +
> +			ret = f2fs_do_zero_range(&dn, index, end);
>  			f2fs_put_dnode(&dn);
>  			f2fs_unlock_op(sbi);
> +			if (ret)
> +				goto out;
>  
> +			index = end;
>  			new_size = max_t(loff_t, new_size,
> -				(loff_t)(index + 1) << PAGE_SHIFT);
> +					(loff_t)index << PAGE_SHIFT);
>  		}
>  
>  		if (off_end) {
> -- 
> 2.8.2.311.gee88674

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* Re: [PATCH 5/6] f2fs: enable inline_dentry by default
  2016-05-09 11:56 ` [PATCH 5/6] f2fs: enable inline_dentry by default Chao Yu
@ 2016-05-09 23:04   ` Jaegeuk Kim
  2016-08-22  1:49     ` Chao Yu
  0 siblings, 1 reply; 17+ messages in thread
From: Jaegeuk Kim @ 2016-05-09 23:04 UTC (permalink / raw)
  To: Chao Yu; +Cc: linux-f2fs-devel, linux-kernel

On Mon, May 09, 2016 at 07:56:34PM +0800, Chao Yu wrote:
> Make inline_dentry as default mount option to improve space usage and
> IO performance in scenario of numerous small directory.

Hmm, I've not much tested this so far.
Let me take time to consider this for a while.

Thanks,

> 
> Signed-off-by: Chao Yu <yuchao0@huawei.com>
> ---
>  fs/f2fs/super.c | 1 +
>  1 file changed, 1 insertion(+)
> 
> diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
> index 28c8992..4a4f4bd 100644
> --- a/fs/f2fs/super.c
> +++ b/fs/f2fs/super.c
> @@ -824,6 +824,7 @@ static void default_options(struct f2fs_sb_info *sbi)
>  
>  	set_opt(sbi, BG_GC);
>  	set_opt(sbi, INLINE_DATA);
> +	set_opt(sbi, INLINE_DENTRY);
>  	set_opt(sbi, EXTENT_CACHE);
>  
>  #ifdef CONFIG_F2FS_FS_XATTR
> -- 
> 2.8.2.311.gee88674

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 1/6] f2fs: support in batch multi blocks preallocation
  2016-05-09 23:00 ` [PATCH 1/6] f2fs: support in batch multi blocks preallocation Jaegeuk Kim
@ 2016-05-10 12:55   ` Chao Yu
  2016-05-10 21:41     ` Jaegeuk Kim
  0 siblings, 1 reply; 17+ messages in thread
From: Chao Yu @ 2016-05-10 12:55 UTC (permalink / raw)
  To: Jaegeuk Kim; +Cc: linux-f2fs-devel, linux-kernel

Hi Jaegeuk,

On 2016/5/10 7:00, Jaegeuk Kim wrote:
> Hi Chao,
> 
> On Mon, May 09, 2016 at 07:56:30PM +0800, Chao Yu wrote:
>> This patch introduces reserve_new_blocks to make preallocation of multi
>> blocks as in batch operation, so it can avoid lots of redundant
>> operation, result in better performance.
>>
>> In virtual machine, with rotational device:
>>
>> time fallocate -l 32G /mnt/f2fs/file
>>
>> Before:
>> real	0m4.584s
>> user	0m0.000s
>> sys	0m4.580s
>>
>> After:
>> real	0m0.292s
>> user	0m0.000s
>> sys	0m0.272s
> 
> It's cool.
> Let me add my test results as well.
> 
> In x86, with SSD:
> 
> time fallocate -l 500G $MNT/testfile
> 
> Before : 24.758 s
> After  :  1.604 s
> 
> By the way, there is one thing we should consider, which is the ENOSPC case.
> Could you check this out on top of your patch?
> 
> If you don't mind, let me integrate this into your patch.

No problem. :)

And see below comments please.

> Let me know.
> 
> Thanks,
> 
> ---
>  fs/f2fs/data.c              |  9 +++++----
>  fs/f2fs/f2fs.h              | 20 +++++++++++++-------
>  include/trace/events/f2fs.h |  8 ++++----
>  3 files changed, 22 insertions(+), 15 deletions(-)
> 
> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> index ea0abdc..da640e1 100644
> --- a/fs/f2fs/data.c
> +++ b/fs/f2fs/data.c
> @@ -278,7 +278,7 @@ alloc_new:
>  	trace_f2fs_submit_page_mbio(fio->page, fio);
>  }
>  
> -void __set_data_blkaddr(struct dnode_of_data *dn)
> +static void __set_data_blkaddr(struct dnode_of_data *dn)
>  {
>  	struct f2fs_node *rn = F2FS_NODE(dn->node_page);
>  	__le32 *addr_array;
> @@ -310,7 +310,7 @@ void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
>  }
>  
>  int reserve_new_blocks(struct dnode_of_data *dn, unsigned int start,
> -							unsigned int count)
> +							blkcnt_t count)
>  {
>  	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
>  	unsigned int ofs_in_node;
> @@ -320,7 +320,7 @@ int reserve_new_blocks(struct dnode_of_data *dn, unsigned int start,
>  
>  	if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
>  		return -EPERM;
> -	if (unlikely(!inc_valid_block_count(sbi, dn->inode, count)))
> +	if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
>  		return -ENOSPC;
>  
>  	trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
> @@ -574,6 +574,7 @@ static int __allocate_data_block(struct dnode_of_data *dn)
>  	struct node_info ni;
>  	int seg = CURSEG_WARM_DATA;
>  	pgoff_t fofs;
> +	blkcnt_t count = 1;
>  
>  	if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
>  		return -EPERM;
> @@ -582,7 +583,7 @@ static int __allocate_data_block(struct dnode_of_data *dn)
>  	if (dn->data_blkaddr == NEW_ADDR)
>  		goto alloc;
>  
> -	if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
> +	if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
>  		return -ENOSPC;
>  
>  alloc:
> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> index 75b0084..00fe63c 100644
> --- a/fs/f2fs/f2fs.h
> +++ b/fs/f2fs/f2fs.h
> @@ -1114,7 +1114,7 @@ static inline bool f2fs_has_xattr_block(unsigned int ofs)
>  }
>  
>  static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi,
> -				 struct inode *inode, blkcnt_t count)
> +				 struct inode *inode, blkcnt_t *count)
>  {
>  	block_t	valid_block_count;
>  
> @@ -1126,14 +1126,19 @@ static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi,
>  	}
>  #endif
>  	valid_block_count =
> -		sbi->total_valid_block_count + (block_t)count;
> +		sbi->total_valid_block_count + (block_t)(*count);
>  	if (unlikely(valid_block_count > sbi->user_block_count)) {
> -		spin_unlock(&sbi->stat_lock);
> -		return false;
> +		*count = sbi->user_block_count - sbi->total_valid_block_count;
> +		if (!*count) {
> +			spin_unlock(&sbi->stat_lock);
> +			return false;
> +		}

If we can only allocate partial blocks, we should let f2fs_map_blocks being
ware of that, otherwise, map->m_len will be updated incorrectly.

Thanks,

>  	}
> -	inode->i_blocks += count;
> -	sbi->total_valid_block_count = valid_block_count;
> -	sbi->alloc_valid_block_count += (block_t)count;
> +	/* *count can be recalculated */
> +	inode->i_blocks += *count;
> +	sbi->total_valid_block_count =
> +		sbi->total_valid_block_count + (block_t)(*count);
> +	sbi->alloc_valid_block_count += (block_t)(*count);
>  	spin_unlock(&sbi->stat_lock);
>  	return true;
>  }
> @@ -1965,6 +1970,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *);
>  void f2fs_submit_page_mbio(struct f2fs_io_info *);
>  void set_data_blkaddr(struct dnode_of_data *);
>  void f2fs_update_data_blkaddr(struct dnode_of_data *, block_t);
> +int reserve_new_blocks(struct dnode_of_data *, unsigned int, blkcnt_t);
>  int reserve_new_block(struct dnode_of_data *);
>  int f2fs_get_block(struct dnode_of_data *, pgoff_t);
>  ssize_t f2fs_preallocate_blocks(struct kiocb *, struct iov_iter *);
> diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
> index 5f927ff..497e6e8 100644
> --- a/include/trace/events/f2fs.h
> +++ b/include/trace/events/f2fs.h
> @@ -697,7 +697,7 @@ TRACE_EVENT(f2fs_direct_IO_exit,
>  TRACE_EVENT(f2fs_reserve_new_blocks,
>  
>  	TP_PROTO(struct inode *inode, nid_t nid, unsigned int ofs_in_node,
> -						unsigned int count),
> +							blkcnt_t count),
>  
>  	TP_ARGS(inode, nid, ofs_in_node, count),
>  
> @@ -705,7 +705,7 @@ TRACE_EVENT(f2fs_reserve_new_blocks,
>  		__field(dev_t,	dev)
>  		__field(nid_t, nid)
>  		__field(unsigned int, ofs_in_node)
> -		__field(unsigned int, count)
> +		__field(blkcnt_t, count)
>  	),
>  
>  	TP_fast_assign(
> @@ -715,11 +715,11 @@ TRACE_EVENT(f2fs_reserve_new_blocks,
>  		__entry->count = count;
>  	),
>  
> -	TP_printk("dev = (%d,%d), nid = %u, ofs_in_node = %u, count = %u",
> +	TP_printk("dev = (%d,%d), nid = %u, ofs_in_node = %u, count = %llu",
>  		show_dev(__entry),
>  		(unsigned int)__entry->nid,
>  		__entry->ofs_in_node,
> -		__entry->count)
> +		(unsigned long long)__entry->count)
>  );
>  
>  DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
> 

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 1/6] f2fs: support in batch multi blocks preallocation
  2016-05-10 12:55   ` Chao Yu
@ 2016-05-10 21:41     ` Jaegeuk Kim
  2016-05-11  2:22       ` Chao Yu
  0 siblings, 1 reply; 17+ messages in thread
From: Jaegeuk Kim @ 2016-05-10 21:41 UTC (permalink / raw)
  To: Chao Yu; +Cc: linux-f2fs-devel, linux-kernel

> >  	valid_block_count =
> > -		sbi->total_valid_block_count + (block_t)count;
> > +		sbi->total_valid_block_count + (block_t)(*count);
> >  	if (unlikely(valid_block_count > sbi->user_block_count)) {
> > -		spin_unlock(&sbi->stat_lock);
> > -		return false;
> > +		*count = sbi->user_block_count - sbi->total_valid_block_count;
> > +		if (!*count) {
> > +			spin_unlock(&sbi->stat_lock);
> > +			return false;
> > +		}
> 
> If we can only allocate partial blocks, we should let f2fs_map_blocks being
> ware of that, otherwise, map->m_len will be updated incorrectly.

Hmm, I've reworked this patch.
Can you review this?

Thanks,

>From c8706b30c2646082f1a1f66e363526bb3d6d8ee4 Mon Sep 17 00:00:00 2001
From: Chao Yu <yuchao0@huawei.com>
Date: Mon, 9 May 2016 19:56:30 +0800
Subject: [PATCH] f2fs: support in batch multi blocks preallocation

This patch introduces reserve_new_blocks to make preallocation of multi
blocks as in batch operation, so it can avoid lots of redundant
operation, result in better performance.

In virtual machine, with rotational device:

time fallocate -l 32G /mnt/f2fs/file

Before:
real	0m4.584s
user	0m0.000s
sys	0m4.580s

After:
real	0m0.292s
user	0m0.000s
sys	0m0.272s

In x86, with SSD:

time fallocate -l 500G $MNT/testfile

Before : 24.758 s
After  :  1.604 s

Signed-off-by: Chao Yu <yuchao0@huawei.com>
[Jaegeuk Kim: fix bugs and add performance numbers measured in x86.]
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
---
 fs/f2fs/data.c              | 108 +++++++++++++++++++++++++++++++++-----------
 fs/f2fs/f2fs.h              |  20 +++++---
 include/trace/events/f2fs.h |  14 ++++--
 3 files changed, 104 insertions(+), 38 deletions(-)

diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 369d953..eb6ce31 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -278,6 +278,16 @@ alloc_new:
 	trace_f2fs_submit_page_mbio(fio->page, fio);
 }
 
+static void __set_data_blkaddr(struct dnode_of_data *dn)
+{
+	struct f2fs_node *rn = F2FS_NODE(dn->node_page);
+	__le32 *addr_array;
+
+	/* Get physical address of data block */
+	addr_array = blkaddr_in_node(rn);
+	addr_array[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
+}
+
 /*
  * Lock ordering for the change of data block address:
  * ->data_page
@@ -286,19 +296,9 @@ alloc_new:
  */
 void set_data_blkaddr(struct dnode_of_data *dn)
 {
-	struct f2fs_node *rn;
-	__le32 *addr_array;
-	struct page *node_page = dn->node_page;
-	unsigned int ofs_in_node = dn->ofs_in_node;
-
-	f2fs_wait_on_page_writeback(node_page, NODE, true);
-
-	rn = F2FS_NODE(node_page);
-
-	/* Get physical address of data block */
-	addr_array = blkaddr_in_node(rn);
-	addr_array[ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
-	if (set_page_dirty(node_page))
+	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
+	__set_data_blkaddr(dn);
+	if (set_page_dirty(dn->node_page))
 		dn->node_changed = true;
 }
 
@@ -309,24 +309,53 @@ void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
 	f2fs_update_extent_cache(dn);
 }
 
-int reserve_new_block(struct dnode_of_data *dn)
+/* dn->ofs_in_node will be returned with up-to-date last block pointer */
+int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
 
+	if (!count)
+		return 0;
+
 	if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
 		return -EPERM;
-	if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
+	if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
 		return -ENOSPC;
 
-	trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
+	trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
+						dn->ofs_in_node, count);
+
+	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
+
+	for (; count > 0; dn->ofs_in_node++) {
+		block_t blkaddr =
+			datablock_addr(dn->node_page, dn->ofs_in_node);
+		if (blkaddr == NULL_ADDR) {
+			dn->data_blkaddr = NEW_ADDR;
+			__set_data_blkaddr(dn);
+			count--;
+		}
+	}
+
+	if (set_page_dirty(dn->node_page))
+		dn->node_changed = true;
 
-	dn->data_blkaddr = NEW_ADDR;
-	set_data_blkaddr(dn);
 	mark_inode_dirty(dn->inode);
 	sync_inode_page(dn);
 	return 0;
 }
 
+/* Should keep dn->ofs_in_node unchanged */
+int reserve_new_block(struct dnode_of_data *dn)
+{
+	unsigned int ofs_in_node = dn->ofs_in_node;
+	int ret;
+
+	ret = reserve_new_blocks(dn, 1);
+	dn->ofs_in_node = ofs_in_node;
+	return ret;
+}
+
 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
 {
 	bool need_put = dn->inode_page ? false : true;
@@ -545,6 +574,7 @@ static int __allocate_data_block(struct dnode_of_data *dn)
 	struct node_info ni;
 	int seg = CURSEG_WARM_DATA;
 	pgoff_t fofs;
+	blkcnt_t count = 1;
 
 	if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
 		return -EPERM;
@@ -553,7 +583,7 @@ static int __allocate_data_block(struct dnode_of_data *dn)
 	if (dn->data_blkaddr == NEW_ADDR)
 		goto alloc;
 
-	if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
+	if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
 		return -ENOSPC;
 
 alloc:
@@ -621,8 +651,10 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
 	struct dnode_of_data dn;
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
-	pgoff_t pgofs, end_offset;
+	pgoff_t pgofs, end_offset, end;
 	int err = 0, ofs = 1;
+	unsigned int ofs_in_node;
+	blkcnt_t prealloc;
 	struct extent_info ei;
 	bool allocated = false;
 	block_t blkaddr;
@@ -632,6 +664,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
 
 	/* it only supports block size == page size */
 	pgofs =	(pgoff_t)map->m_lblk;
+	end = pgofs + maxblocks;
 
 	if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
 		map->m_pblk = ei.blk + pgofs - ei.fofs;
@@ -659,6 +692,8 @@ next_dnode:
 		goto unlock_out;
 	}
 
+	prealloc = 0;
+	ofs_in_node = dn.ofs_in_node;
 	end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
 
 next_block:
@@ -672,16 +707,17 @@ next_block:
 			}
 			if (flag == F2FS_GET_BLOCK_PRE_AIO) {
 				if (blkaddr == NULL_ADDR)
-					err = reserve_new_block(&dn);
+					prealloc++;
 			} else {
 				err = __allocate_data_block(&dn);
-				if (!err)
+				if (!err) {
 					set_inode_flag(F2FS_I(inode),
 							FI_APPEND_WRITE);
+					allocated = true;
+				}
 			}
 			if (err)
 				goto sync_out;
-			allocated = true;
 			map->m_flags = F2FS_MAP_NEW;
 			blkaddr = dn.data_blkaddr;
 		} else {
@@ -700,6 +736,9 @@ next_block:
 		}
 	}
 
+	if (flag == F2FS_GET_BLOCK_PRE_AIO)
+		goto skip;
+
 	if (map->m_len == 0) {
 		/* preallocated unwritten block should be mapped for fiemap. */
 		if (blkaddr == NEW_ADDR)
@@ -711,18 +750,35 @@ next_block:
 	} else if ((map->m_pblk != NEW_ADDR &&
 			blkaddr == (map->m_pblk + ofs)) ||
 			(map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
-			flag == F2FS_GET_BLOCK_PRE_DIO ||
-			flag == F2FS_GET_BLOCK_PRE_AIO) {
+			flag == F2FS_GET_BLOCK_PRE_DIO) {
 		ofs++;
 		map->m_len++;
 	} else {
 		goto sync_out;
 	}
 
+skip:
 	dn.ofs_in_node++;
 	pgofs++;
 
-	if (map->m_len < maxblocks) {
+	/* preallocate blocks in batch for one dnode page */
+	if (flag == F2FS_GET_BLOCK_PRE_AIO &&
+			(pgofs == end || dn.ofs_in_node == end_offset)) {
+		unsigned int last_ofs_in_node = dn.ofs_in_node;
+
+		dn.ofs_in_node = ofs_in_node;
+		err = reserve_new_blocks(&dn, prealloc);
+		if (err)
+			goto sync_out;
+
+		map->m_len += dn.ofs_in_node - ofs_in_node;
+		if (prealloc && dn.ofs_in_node != last_ofs_in_node) {
+			err = -ENOSPC;
+			goto sync_out;
+		}
+	}
+
+	if (pgofs < end) {
 		if (dn.ofs_in_node < end_offset)
 			goto next_block;
 
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 75b0084..1c51f37 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1114,7 +1114,7 @@ static inline bool f2fs_has_xattr_block(unsigned int ofs)
 }
 
 static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi,
-				 struct inode *inode, blkcnt_t count)
+				 struct inode *inode, blkcnt_t *count)
 {
 	block_t	valid_block_count;
 
@@ -1126,14 +1126,19 @@ static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi,
 	}
 #endif
 	valid_block_count =
-		sbi->total_valid_block_count + (block_t)count;
+		sbi->total_valid_block_count + (block_t)(*count);
 	if (unlikely(valid_block_count > sbi->user_block_count)) {
-		spin_unlock(&sbi->stat_lock);
-		return false;
+		*count = sbi->user_block_count - sbi->total_valid_block_count;
+		if (!*count) {
+			spin_unlock(&sbi->stat_lock);
+			return false;
+		}
 	}
-	inode->i_blocks += count;
-	sbi->total_valid_block_count = valid_block_count;
-	sbi->alloc_valid_block_count += (block_t)count;
+	/* *count can be recalculated */
+	inode->i_blocks += *count;
+	sbi->total_valid_block_count =
+		sbi->total_valid_block_count + (block_t)(*count);
+	sbi->alloc_valid_block_count += (block_t)(*count);
 	spin_unlock(&sbi->stat_lock);
 	return true;
 }
@@ -1965,6 +1970,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *);
 void f2fs_submit_page_mbio(struct f2fs_io_info *);
 void set_data_blkaddr(struct dnode_of_data *);
 void f2fs_update_data_blkaddr(struct dnode_of_data *, block_t);
+int reserve_new_blocks(struct dnode_of_data *, blkcnt_t);
 int reserve_new_block(struct dnode_of_data *);
 int f2fs_get_block(struct dnode_of_data *, pgoff_t);
 ssize_t f2fs_preallocate_blocks(struct kiocb *, struct iov_iter *);
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 0f56584..497e6e8 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -694,28 +694,32 @@ TRACE_EVENT(f2fs_direct_IO_exit,
 		__entry->ret)
 );
 
-TRACE_EVENT(f2fs_reserve_new_block,
+TRACE_EVENT(f2fs_reserve_new_blocks,
 
-	TP_PROTO(struct inode *inode, nid_t nid, unsigned int ofs_in_node),
+	TP_PROTO(struct inode *inode, nid_t nid, unsigned int ofs_in_node,
+							blkcnt_t count),
 
-	TP_ARGS(inode, nid, ofs_in_node),
+	TP_ARGS(inode, nid, ofs_in_node, count),
 
 	TP_STRUCT__entry(
 		__field(dev_t,	dev)
 		__field(nid_t, nid)
 		__field(unsigned int, ofs_in_node)
+		__field(blkcnt_t, count)
 	),
 
 	TP_fast_assign(
 		__entry->dev	= inode->i_sb->s_dev;
 		__entry->nid	= nid;
 		__entry->ofs_in_node = ofs_in_node;
+		__entry->count = count;
 	),
 
-	TP_printk("dev = (%d,%d), nid = %u, ofs_in_node = %u",
+	TP_printk("dev = (%d,%d), nid = %u, ofs_in_node = %u, count = %llu",
 		show_dev(__entry),
 		(unsigned int)__entry->nid,
-		__entry->ofs_in_node)
+		__entry->ofs_in_node,
+		(unsigned long long)__entry->count)
 );
 
 DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
-- 
2.6.3

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* Re: [PATCH 1/6] f2fs: support in batch multi blocks preallocation
  2016-05-10 21:41     ` Jaegeuk Kim
@ 2016-05-11  2:22       ` Chao Yu
  2016-05-11  2:32         ` Jaegeuk Kim
  0 siblings, 1 reply; 17+ messages in thread
From: Chao Yu @ 2016-05-11  2:22 UTC (permalink / raw)
  To: Jaegeuk Kim; +Cc: linux-f2fs-devel, linux-kernel

On 2016/5/11 5:41, Jaegeuk Kim wrote:
> +
> +	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
> +
> +	for (; count > 0; dn->ofs_in_node++) {
> +		block_t blkaddr =
> +			datablock_addr(dn->node_page, dn->ofs_in_node);
> +		if (blkaddr == NULL_ADDR) {
> +			dn->data_blkaddr = NEW_ADDR;
> +			__set_data_blkaddr(dn);
> +			count--;
> +		}
> +	}

Should let ofs_in_node increase to offset where blkaddr = NULL_ADDR in
ENOSPC case or increase to end_offset in normal case, right?

Thanks,

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 1/6] f2fs: support in batch multi blocks preallocation
  2016-05-11  2:22       ` Chao Yu
@ 2016-05-11  2:32         ` Jaegeuk Kim
  2016-05-11  3:00           ` Chao Yu
  0 siblings, 1 reply; 17+ messages in thread
From: Jaegeuk Kim @ 2016-05-11  2:32 UTC (permalink / raw)
  To: Chao Yu; +Cc: linux-f2fs-devel, linux-kernel

On Wed, May 11, 2016 at 10:22:05AM +0800, Chao Yu wrote:
> On 2016/5/11 5:41, Jaegeuk Kim wrote:
> > +
> > +	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
> > +
> > +	for (; count > 0; dn->ofs_in_node++) {
> > +		block_t blkaddr =
> > +			datablock_addr(dn->node_page, dn->ofs_in_node);
> > +		if (blkaddr == NULL_ADDR) {
> > +			dn->data_blkaddr = NEW_ADDR;
> > +			__set_data_blkaddr(dn);
> > +			count--;
> > +		}
> > +	}
> 
> Should let ofs_in_node increase to offset where blkaddr = NULL_ADDR in
> ENOSPC case or increase to end_offset in normal case, right?

hehe, I could get some errors on this patch. :)
Finally, I've made a patch which passes xfstests and fsstress.
Could you find the latest ones?

http://git.kernel.org/cgit/linux/kernel/git/jaegeuk/f2fs.git/log/?h=dev-test

> 
> Thanks,

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 1/6] f2fs: support in batch multi blocks preallocation
  2016-05-11  2:32         ` Jaegeuk Kim
@ 2016-05-11  3:00           ` Chao Yu
  0 siblings, 0 replies; 17+ messages in thread
From: Chao Yu @ 2016-05-11  3:00 UTC (permalink / raw)
  To: Jaegeuk Kim; +Cc: linux-f2fs-devel, linux-kernel

On 2016/5/11 10:32, Jaegeuk Kim wrote:
> On Wed, May 11, 2016 at 10:22:05AM +0800, Chao Yu wrote:
>> On 2016/5/11 5:41, Jaegeuk Kim wrote:
>>> +
>>> +	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
>>> +
>>> +	for (; count > 0; dn->ofs_in_node++) {
>>> +		block_t blkaddr =
>>> +			datablock_addr(dn->node_page, dn->ofs_in_node);
>>> +		if (blkaddr == NULL_ADDR) {
>>> +			dn->data_blkaddr = NEW_ADDR;
>>> +			__set_data_blkaddr(dn);
>>> +			count--;
>>> +		}
>>> +	}
>>
>> Should let ofs_in_node increase to offset where blkaddr = NULL_ADDR in
>> ENOSPC case or increase to end_offset in normal case, right?
> 
> hehe, I could get some errors on this patch. :)
> Finally, I've made a patch which passes xfstests and fsstress.
> Could you find the latest ones?

OK, I will check them.
Thanks for your rework. :)

Thanks,

> 
> http://git.kernel.org/cgit/linux/kernel/git/jaegeuk/f2fs.git/log/?h=dev-test
> 
>>
>> Thanks,
> .
> 

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 5/6] f2fs: enable inline_dentry by default
  2016-05-09 23:04   ` Jaegeuk Kim
@ 2016-08-22  1:49     ` Chao Yu
  2016-08-23 16:57       ` Jaegeuk Kim
  0 siblings, 1 reply; 17+ messages in thread
From: Chao Yu @ 2016-08-22  1:49 UTC (permalink / raw)
  To: Jaegeuk Kim; +Cc: linux-f2fs-devel, linux-kernel

Hi Jaegeuk,

On 2016/5/10 7:04, Jaegeuk Kim wrote:
> On Mon, May 09, 2016 at 07:56:34PM +0800, Chao Yu wrote:
>> Make inline_dentry as default mount option to improve space usage and
>> IO performance in scenario of numerous small directory.
> 
> Hmm, I've not much tested this so far.
> Let me take time to consider this for a while.

IMO, this feature is almost stable since I fixed most of bugs which occurs
during inline conversion. And now I enable this feature by default when I do the
test with fstest suit and fsstress, I didn't find any more bugs reported by
those test tools.

How do you think of enabling inline_dentry by default now?

Thanks,

> 
> Thanks,
> 
>>
>> Signed-off-by: Chao Yu <yuchao0@huawei.com>
>> ---
>>  fs/f2fs/super.c | 1 +
>>  1 file changed, 1 insertion(+)
>>
>> diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
>> index 28c8992..4a4f4bd 100644
>> --- a/fs/f2fs/super.c
>> +++ b/fs/f2fs/super.c
>> @@ -824,6 +824,7 @@ static void default_options(struct f2fs_sb_info *sbi)
>>  
>>  	set_opt(sbi, BG_GC);
>>  	set_opt(sbi, INLINE_DATA);
>> +	set_opt(sbi, INLINE_DENTRY);
>>  	set_opt(sbi, EXTENT_CACHE);
>>  
>>  #ifdef CONFIG_F2FS_FS_XATTR
>> -- 
>> 2.8.2.311.gee88674
> .
> 

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 5/6] f2fs: enable inline_dentry by default
  2016-08-22  1:49     ` Chao Yu
@ 2016-08-23 16:57       ` Jaegeuk Kim
  2016-08-25  9:23         ` Chao Yu
  0 siblings, 1 reply; 17+ messages in thread
From: Jaegeuk Kim @ 2016-08-23 16:57 UTC (permalink / raw)
  To: Chao Yu; +Cc: linux-f2fs-devel, linux-kernel

Hi Chao,

On Mon, Aug 22, 2016 at 09:49:13AM +0800, Chao Yu wrote:
> Hi Jaegeuk,
> 
> On 2016/5/10 7:04, Jaegeuk Kim wrote:
> > On Mon, May 09, 2016 at 07:56:34PM +0800, Chao Yu wrote:
> >> Make inline_dentry as default mount option to improve space usage and
> >> IO performance in scenario of numerous small directory.
> > 
> > Hmm, I've not much tested this so far.
> > Let me take time to consider this for a while.
> 
> IMO, this feature is almost stable since I fixed most of bugs which occurs
> during inline conversion. And now I enable this feature by default when I do the
> test with fstest suit and fsstress, I didn't find any more bugs reported by
> those test tools.
> 
> How do you think of enabling inline_dentry by default now?

Okay, let me start all my tests with this. :)

> 
> Thanks,
> 
> > 
> > Thanks,
> > 
> >>
> >> Signed-off-by: Chao Yu <yuchao0@huawei.com>
> >> ---
> >>  fs/f2fs/super.c | 1 +
> >>  1 file changed, 1 insertion(+)
> >>
> >> diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
> >> index 28c8992..4a4f4bd 100644
> >> --- a/fs/f2fs/super.c
> >> +++ b/fs/f2fs/super.c
> >> @@ -824,6 +824,7 @@ static void default_options(struct f2fs_sb_info *sbi)
> >>  
> >>  	set_opt(sbi, BG_GC);
> >>  	set_opt(sbi, INLINE_DATA);
> >> +	set_opt(sbi, INLINE_DENTRY);
> >>  	set_opt(sbi, EXTENT_CACHE);
> >>  
> >>  #ifdef CONFIG_F2FS_FS_XATTR
> >> -- 
> >> 2.8.2.311.gee88674
> > .
> > 

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 5/6] f2fs: enable inline_dentry by default
  2016-08-23 16:57       ` Jaegeuk Kim
@ 2016-08-25  9:23         ` Chao Yu
  0 siblings, 0 replies; 17+ messages in thread
From: Chao Yu @ 2016-08-25  9:23 UTC (permalink / raw)
  To: Jaegeuk Kim; +Cc: linux-f2fs-devel, linux-kernel

Hi Jaegeuk,

On 2016/8/24 0:57, Jaegeuk Kim wrote:
> Hi Chao,
> 
> On Mon, Aug 22, 2016 at 09:49:13AM +0800, Chao Yu wrote:
>> Hi Jaegeuk,
>>
>> On 2016/5/10 7:04, Jaegeuk Kim wrote:
>>> On Mon, May 09, 2016 at 07:56:34PM +0800, Chao Yu wrote:
>>>> Make inline_dentry as default mount option to improve space usage and
>>>> IO performance in scenario of numerous small directory.
>>>
>>> Hmm, I've not much tested this so far.
>>> Let me take time to consider this for a while.
>>
>> IMO, this feature is almost stable since I fixed most of bugs which occurs
>> during inline conversion. And now I enable this feature by default when I do the
>> test with fstest suit and fsstress, I didn't find any more bugs reported by
>> those test tools.
>>
>> How do you think of enabling inline_dentry by default now?
> 
> Okay, let me start all my tests with this. :)

Cool, thanks for your support. ;)

Thanks,

> 
>>
>> Thanks,
>>
>>>
>>> Thanks,
>>>
>>>>
>>>> Signed-off-by: Chao Yu <yuchao0@huawei.com>
>>>> ---
>>>>  fs/f2fs/super.c | 1 +
>>>>  1 file changed, 1 insertion(+)
>>>>
>>>> diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
>>>> index 28c8992..4a4f4bd 100644
>>>> --- a/fs/f2fs/super.c
>>>> +++ b/fs/f2fs/super.c
>>>> @@ -824,6 +824,7 @@ static void default_options(struct f2fs_sb_info *sbi)
>>>>  
>>>>  	set_opt(sbi, BG_GC);
>>>>  	set_opt(sbi, INLINE_DATA);
>>>> +	set_opt(sbi, INLINE_DENTRY);
>>>>  	set_opt(sbi, EXTENT_CACHE);
>>>>  
>>>>  #ifdef CONFIG_F2FS_FS_XATTR
>>>> -- 
>>>> 2.8.2.311.gee88674
>>> .
>>>
> 
> .
> 

^ permalink raw reply	[flat|nested] 17+ messages in thread

end of thread, other threads:[~2016-08-25  9:23 UTC | newest]

Thread overview: 17+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-05-09 11:56 [PATCH 1/6] f2fs: support in batch multi blocks preallocation Chao Yu
2016-05-09 11:56 ` [PATCH 2/6] f2fs: support in batch fzero in dnode page Chao Yu
2016-05-09 23:03   ` Jaegeuk Kim
2016-05-09 11:56 ` [PATCH 3/6] f2fs: use mnt_{want,drop}_write_file in ioctl Chao Yu
2016-05-09 11:56 ` [PATCH 4/6] f2fs: make atomic/volatile operation exclusive Chao Yu
2016-05-09 11:56 ` [PATCH 5/6] f2fs: enable inline_dentry by default Chao Yu
2016-05-09 23:04   ` Jaegeuk Kim
2016-08-22  1:49     ` Chao Yu
2016-08-23 16:57       ` Jaegeuk Kim
2016-08-25  9:23         ` Chao Yu
2016-05-09 11:56 ` [PATCH 6/6] f2fs: add noinline_dentry mount option Chao Yu
2016-05-09 23:00 ` [PATCH 1/6] f2fs: support in batch multi blocks preallocation Jaegeuk Kim
2016-05-10 12:55   ` Chao Yu
2016-05-10 21:41     ` Jaegeuk Kim
2016-05-11  2:22       ` Chao Yu
2016-05-11  2:32         ` Jaegeuk Kim
2016-05-11  3:00           ` Chao Yu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).