linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [f2fs-dev] [PATCH] f2fs: readahead contiguous pages for restore_node_summary
@ 2013-11-22  7:48 Chao Yu
  2013-11-27  5:29 ` Jaegeuk Kim
  0 siblings, 1 reply; 8+ messages in thread
From: Chao Yu @ 2013-11-22  7:48 UTC (permalink / raw)
  To: ???; +Cc: linux-fsdevel, linux-kernel, linux-f2fs-devel, 谭姝

If cp has no CP_UMOUNT_FLAG, we will read all pages in whole node segment 
one by one, it makes low performance. So let's merge contiguous pages and 
readahead for better performance.

Signed-off-by: Chao Yu <chao2.yu@samsung.com>
---
 fs/f2fs/node.c |   89 +++++++++++++++++++++++++++++++++++++++-----------------
 1 file changed, 63 insertions(+), 26 deletions(-)

diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 4ac4150..81e704a 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1572,47 +1572,84 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
 	return 0;
 }
 
+/*
+ * ra_sum_pages() merge contiguous pages into one bio and submit.
+ * these pre-readed pages are linked in pages list.
+ */
+static int ra_sum_pages(struct f2fs_sb_info *sbi, struct list_head *pages,
+				int start, int nrpages)
+{
+	struct page *page;
+	int page_idx = start;
+
+	for (; page_idx < start + nrpages; page_idx++) {
+		/* alloc temporal page for read node summary info*/
+		page = alloc_page(GFP_NOFS | __GFP_ZERO);
+		if (!page) {
+			struct page *tmp;
+			list_for_each_entry_safe(page, tmp, pages, lru) {
+				list_del(&page->lru);
+				unlock_page(page);
+				__free_pages(page, 0);
+			}
+			return -ENOMEM;
+		}
+
+		lock_page(page);
+		page->index = page_idx;
+		list_add_tail(&page->lru, pages);
+	}
+
+	list_for_each_entry(page, pages, lru)
+		submit_read_page(sbi, page, page->index, READ_SYNC);
+
+	f2fs_submit_read_bio(sbi, READ_SYNC);
+	return 0;
+}
+
 int restore_node_summary(struct f2fs_sb_info *sbi,
 			unsigned int segno, struct f2fs_summary_block *sum)
 {
 	struct f2fs_node *rn;
 	struct f2fs_summary *sum_entry;
-	struct page *page;
+	struct page *page, *tmp;
 	block_t addr;
-	int i, last_offset;
-
-	/* alloc temporal page for read node */
-	page = alloc_page(GFP_NOFS | __GFP_ZERO);
-	if (!page)
-		return -ENOMEM;
-	lock_page(page);
+	int bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
+	int i, last_offset, nrpages, err = 0;
+	LIST_HEAD(page_list);
 
 	/* scan the node segment */
 	last_offset = sbi->blocks_per_seg;
 	addr = START_BLOCK(sbi, segno);
 	sum_entry = &sum->entries[0];
 
-	for (i = 0; i < last_offset; i++, sum_entry++) {
-		/*
-		 * In order to read next node page,
-		 * we must clear PageUptodate flag.
-		 */
-		ClearPageUptodate(page);
+	for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
 
-		if (f2fs_readpage(sbi, page, addr, READ_SYNC))
-			goto out;
+		nrpages = min(last_offset - i, bio_blocks);
+		/* read ahead node pages */
+		err = ra_sum_pages(sbi, &page_list, addr, nrpages);
+		if (err)
+			return err;
 
-		lock_page(page);
-		rn = F2FS_NODE(page);
-		sum_entry->nid = rn->footer.nid;
-		sum_entry->version = 0;
-		sum_entry->ofs_in_node = 0;
-		addr++;
+		list_for_each_entry_safe(page, tmp, &page_list, lru) {
+
+			lock_page(page);
+			if(PageUptodate(page)) {
+				rn = F2FS_NODE(page);
+				sum_entry->nid = rn->footer.nid;
+				sum_entry->version = 0;
+				sum_entry->ofs_in_node = 0;
+				sum_entry++;
+			} else {
+				err = -EIO;
+			}
+
+			list_del(&page->lru);
+			unlock_page(page);
+			__free_pages(page, 0);
+		}
 	}
-	unlock_page(page);
-out:
-	__free_pages(page, 0);
-	return 0;
+	return err;
 }
 
 static bool flush_nats_in_journal(struct f2fs_sb_info *sbi)
-- 
1.7.9.5


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [f2fs-dev] [PATCH] f2fs: readahead contiguous pages for restore_node_summary
  2013-11-22  7:48 [f2fs-dev] [PATCH] f2fs: readahead contiguous pages for restore_node_summary Chao Yu
@ 2013-11-27  5:29 ` Jaegeuk Kim
  2013-11-27  7:58   ` Chao Yu
  0 siblings, 1 reply; 8+ messages in thread
From: Jaegeuk Kim @ 2013-11-27  5:29 UTC (permalink / raw)
  To: Chao Yu; +Cc: linux-fsdevel, linux-kernel, linux-f2fs-devel, 谭姝

Hi Chao,

It seems that we already have a readahed function for node pages,
ra_node_page().
So, we don't make a page list for this, but can use the node_inode's
page cache.

So how about writing ra_node_pages() which use the node_inode's page
cache?

Thanks,

2013-11-22 (금), 15:48 +0800, Chao Yu:
> If cp has no CP_UMOUNT_FLAG, we will read all pages in whole node segment 
> one by one, it makes low performance. So let's merge contiguous pages and 
> readahead for better performance.
> 
> Signed-off-by: Chao Yu <chao2.yu@samsung.com>
> ---
>  fs/f2fs/node.c |   89 +++++++++++++++++++++++++++++++++++++++-----------------
>  1 file changed, 63 insertions(+), 26 deletions(-)
> 
> diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
> index 4ac4150..81e704a 100644
> --- a/fs/f2fs/node.c
> +++ b/fs/f2fs/node.c
> @@ -1572,47 +1572,84 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
>  	return 0;
>  }
>  
> +/*
> + * ra_sum_pages() merge contiguous pages into one bio and submit.
> + * these pre-readed pages are linked in pages list.
> + */
> +static int ra_sum_pages(struct f2fs_sb_info *sbi, struct list_head *pages,
> +				int start, int nrpages)
> +{
> +	struct page *page;
> +	int page_idx = start;
> +
> +	for (; page_idx < start + nrpages; page_idx++) {
> +		/* alloc temporal page for read node summary info*/
> +		page = alloc_page(GFP_NOFS | __GFP_ZERO);
> +		if (!page) {
> +			struct page *tmp;
> +			list_for_each_entry_safe(page, tmp, pages, lru) {
> +				list_del(&page->lru);
> +				unlock_page(page);
> +				__free_pages(page, 0);
> +			}
> +			return -ENOMEM;
> +		}
> +
> +		lock_page(page);
> +		page->index = page_idx;
> +		list_add_tail(&page->lru, pages);
> +	}
> +
> +	list_for_each_entry(page, pages, lru)
> +		submit_read_page(sbi, page, page->index, READ_SYNC);
> +
> +	f2fs_submit_read_bio(sbi, READ_SYNC);
> +	return 0;
> +}
> +
>  int restore_node_summary(struct f2fs_sb_info *sbi,
>  			unsigned int segno, struct f2fs_summary_block *sum)
>  {
>  	struct f2fs_node *rn;
>  	struct f2fs_summary *sum_entry;
> -	struct page *page;
> +	struct page *page, *tmp;
>  	block_t addr;
> -	int i, last_offset;
> -
> -	/* alloc temporal page for read node */
> -	page = alloc_page(GFP_NOFS | __GFP_ZERO);
> -	if (!page)
> -		return -ENOMEM;
> -	lock_page(page);
> +	int bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
> +	int i, last_offset, nrpages, err = 0;
> +	LIST_HEAD(page_list);
>  
>  	/* scan the node segment */
>  	last_offset = sbi->blocks_per_seg;
>  	addr = START_BLOCK(sbi, segno);
>  	sum_entry = &sum->entries[0];
>  
> -	for (i = 0; i < last_offset; i++, sum_entry++) {
> -		/*
> -		 * In order to read next node page,
> -		 * we must clear PageUptodate flag.
> -		 */
> -		ClearPageUptodate(page);
> +	for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
>  
> -		if (f2fs_readpage(sbi, page, addr, READ_SYNC))
> -			goto out;
> +		nrpages = min(last_offset - i, bio_blocks);
> +		/* read ahead node pages */
> +		err = ra_sum_pages(sbi, &page_list, addr, nrpages);
> +		if (err)
> +			return err;
>  
> -		lock_page(page);
> -		rn = F2FS_NODE(page);
> -		sum_entry->nid = rn->footer.nid;
> -		sum_entry->version = 0;
> -		sum_entry->ofs_in_node = 0;
> -		addr++;
> +		list_for_each_entry_safe(page, tmp, &page_list, lru) {
> +
> +			lock_page(page);
> +			if(PageUptodate(page)) {
> +				rn = F2FS_NODE(page);
> +				sum_entry->nid = rn->footer.nid;
> +				sum_entry->version = 0;
> +				sum_entry->ofs_in_node = 0;
> +				sum_entry++;
> +			} else {
> +				err = -EIO;
> +			}
> +
> +			list_del(&page->lru);
> +			unlock_page(page);
> +			__free_pages(page, 0);
> +		}
>  	}
> -	unlock_page(page);
> -out:
> -	__free_pages(page, 0);
> -	return 0;
> +	return err;
>  }
>  
>  static bool flush_nats_in_journal(struct f2fs_sb_info *sbi)

-- 
Jaegeuk Kim
Samsung


^ permalink raw reply	[flat|nested] 8+ messages in thread

* RE: [f2fs-dev] [PATCH] f2fs: readahead contiguous pages for restore_node_summary
  2013-11-27  5:29 ` Jaegeuk Kim
@ 2013-11-27  7:58   ` Chao Yu
  2013-11-27  8:19     ` Jaegeuk Kim
  0 siblings, 1 reply; 8+ messages in thread
From: Chao Yu @ 2013-11-27  7:58 UTC (permalink / raw)
  To: jaegeuk.kim
  Cc: linux-fsdevel, linux-kernel, linux-f2fs-devel,
	'谭姝'

Hi Kim,

> -----Original Message-----
> From: Jaegeuk Kim [mailto:jaegeuk.kim@samsung.com]
> Sent: Wednesday, November 27, 2013 1:30 PM
> To: Chao Yu
> Cc: linux-fsdevel@vger.kernel.org; linux-kernel@vger.kernel.org; linux-f2fs-devel@lists.sourceforge.net; 谭姝
> Subject: Re: [f2fs-dev] [PATCH] f2fs: readahead contiguous pages for restore_node_summary
> 
> Hi Chao,
> 
> It seems that we already have a readahed function for node pages,
> ra_node_page().
> So, we don't make a page list for this, but can use the node_inode's
> page cache.

So you mean it's waste to release page list with updated data after we
finish work in restore_node_summary, right?

> 
> So how about writing ra_node_pages() which use the node_inode's page
> cache?

Hmm, so ra_node_pages is introduced for read node_inode's pages which are 
logical contiguously? and it also could take place of ra_node_page?


> 
> Thanks,
> 
> 2013-11-22 (금), 15:48 +0800, Chao Yu:
> > If cp has no CP_UMOUNT_FLAG, we will read all pages in whole node segment
> > one by one, it makes low performance. So let's merge contiguous pages and
> > readahead for better performance.
> >
> > Signed-off-by: Chao Yu <chao2.yu@samsung.com>
> > ---
> >  fs/f2fs/node.c |   89 +++++++++++++++++++++++++++++++++++++++-----------------
> >  1 file changed, 63 insertions(+), 26 deletions(-)
> >
> > diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
> > index 4ac4150..81e704a 100644
> > --- a/fs/f2fs/node.c
> > +++ b/fs/f2fs/node.c
> > @@ -1572,47 +1572,84 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
> >  	return 0;
> >  }
> >
> > +/*
> > + * ra_sum_pages() merge contiguous pages into one bio and submit.
> > + * these pre-readed pages are linked in pages list.
> > + */
> > +static int ra_sum_pages(struct f2fs_sb_info *sbi, struct list_head *pages,
> > +				int start, int nrpages)
> > +{
> > +	struct page *page;
> > +	int page_idx = start;
> > +
> > +	for (; page_idx < start + nrpages; page_idx++) {
> > +		/* alloc temporal page for read node summary info*/
> > +		page = alloc_page(GFP_NOFS | __GFP_ZERO);
> > +		if (!page) {
> > +			struct page *tmp;
> > +			list_for_each_entry_safe(page, tmp, pages, lru) {
> > +				list_del(&page->lru);
> > +				unlock_page(page);
> > +				__free_pages(page, 0);
> > +			}
> > +			return -ENOMEM;
> > +		}
> > +
> > +		lock_page(page);
> > +		page->index = page_idx;
> > +		list_add_tail(&page->lru, pages);
> > +	}
> > +
> > +	list_for_each_entry(page, pages, lru)
> > +		submit_read_page(sbi, page, page->index, READ_SYNC);
> > +
> > +	f2fs_submit_read_bio(sbi, READ_SYNC);
> > +	return 0;
> > +}
> > +
> >  int restore_node_summary(struct f2fs_sb_info *sbi,
> >  			unsigned int segno, struct f2fs_summary_block *sum)
> >  {
> >  	struct f2fs_node *rn;
> >  	struct f2fs_summary *sum_entry;
> > -	struct page *page;
> > +	struct page *page, *tmp;
> >  	block_t addr;
> > -	int i, last_offset;
> > -
> > -	/* alloc temporal page for read node */
> > -	page = alloc_page(GFP_NOFS | __GFP_ZERO);
> > -	if (!page)
> > -		return -ENOMEM;
> > -	lock_page(page);
> > +	int bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
> > +	int i, last_offset, nrpages, err = 0;
> > +	LIST_HEAD(page_list);
> >
> >  	/* scan the node segment */
> >  	last_offset = sbi->blocks_per_seg;
> >  	addr = START_BLOCK(sbi, segno);
> >  	sum_entry = &sum->entries[0];
> >
> > -	for (i = 0; i < last_offset; i++, sum_entry++) {
> > -		/*
> > -		 * In order to read next node page,
> > -		 * we must clear PageUptodate flag.
> > -		 */
> > -		ClearPageUptodate(page);
> > +	for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
> >
> > -		if (f2fs_readpage(sbi, page, addr, READ_SYNC))
> > -			goto out;
> > +		nrpages = min(last_offset - i, bio_blocks);
> > +		/* read ahead node pages */
> > +		err = ra_sum_pages(sbi, &page_list, addr, nrpages);
> > +		if (err)
> > +			return err;
> >
> > -		lock_page(page);
> > -		rn = F2FS_NODE(page);
> > -		sum_entry->nid = rn->footer.nid;
> > -		sum_entry->version = 0;
> > -		sum_entry->ofs_in_node = 0;
> > -		addr++;
> > +		list_for_each_entry_safe(page, tmp, &page_list, lru) {
> > +
> > +			lock_page(page);
> > +			if(PageUptodate(page)) {
> > +				rn = F2FS_NODE(page);
> > +				sum_entry->nid = rn->footer.nid;
> > +				sum_entry->version = 0;
> > +				sum_entry->ofs_in_node = 0;
> > +				sum_entry++;
> > +			} else {
> > +				err = -EIO;
> > +			}
> > +
> > +			list_del(&page->lru);
> > +			unlock_page(page);
> > +			__free_pages(page, 0);
> > +		}
> >  	}
> > -	unlock_page(page);
> > -out:
> > -	__free_pages(page, 0);
> > -	return 0;
> > +	return err;
> >  }
> >
> >  static bool flush_nats_in_journal(struct f2fs_sb_info *sbi)
> 
> --
> Jaegeuk Kim
> Samsung


^ permalink raw reply	[flat|nested] 8+ messages in thread

* RE: [f2fs-dev] [PATCH] f2fs: readahead contiguous pages for restore_node_summary
  2013-11-27  7:58   ` Chao Yu
@ 2013-11-27  8:19     ` Jaegeuk Kim
  2013-11-28  1:26       ` Chao Yu
  0 siblings, 1 reply; 8+ messages in thread
From: Jaegeuk Kim @ 2013-11-27  8:19 UTC (permalink / raw)
  To: Chao Yu
  Cc: linux-fsdevel, linux-kernel, linux-f2fs-devel,
	'谭姝'

Hi,

2013-11-27 (수), 15:58 +0800, Chao Yu:
> Hi Kim,
> 
> > -----Original Message-----
> > From: Jaegeuk Kim [mailto:jaegeuk.kim@samsung.com]
> > Sent: Wednesday, November 27, 2013 1:30 PM
> > To: Chao Yu
> > Cc: linux-fsdevel@vger.kernel.org; linux-kernel@vger.kernel.org; linux-f2fs-devel@lists.sourceforge.net; 谭姝
> > Subject: Re: [f2fs-dev] [PATCH] f2fs: readahead contiguous pages for restore_node_summary
> > 
> > Hi Chao,
> > 
> > It seems that we already have a readahed function for node pages,
> > ra_node_page().
> > So, we don't make a page list for this, but can use the node_inode's
> > page cache.
> 
> So you mean it's waste to release page list with updated data after we
> finish work in restore_node_summary, right?

Right.

> 
> > 
> > So how about writing ra_node_pages() which use the node_inode's page
> > cache?
> 
> Hmm, so ra_node_pages is introduced for read node_inode's pages which are 
> logical contiguously? and it also could take place of ra_node_page?

Ah. The ra_node_page() read a node page ahead for a given node id.
So it doesn't match exactly between ra_node_page() and ra_node_pages()
that I suggested.
So how about reading node pages and then caching some of them in the
page cache, node_inode's address space?

Thanks,

> 
> 
> > 
> > Thanks,
> > 
> > 2013-11-22 (금), 15:48 +0800, Chao Yu:
> > > If cp has no CP_UMOUNT_FLAG, we will read all pages in whole node segment
> > > one by one, it makes low performance. So let's merge contiguous pages and
> > > readahead for better performance.
> > >
> > > Signed-off-by: Chao Yu <chao2.yu@samsung.com>
> > > ---
> > >  fs/f2fs/node.c |   89 +++++++++++++++++++++++++++++++++++++++-----------------
> > >  1 file changed, 63 insertions(+), 26 deletions(-)
> > >
> > > diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
> > > index 4ac4150..81e704a 100644
> > > --- a/fs/f2fs/node.c
> > > +++ b/fs/f2fs/node.c
> > > @@ -1572,47 +1572,84 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
> > >  	return 0;
> > >  }
> > >
> > > +/*
> > > + * ra_sum_pages() merge contiguous pages into one bio and submit.
> > > + * these pre-readed pages are linked in pages list.
> > > + */
> > > +static int ra_sum_pages(struct f2fs_sb_info *sbi, struct list_head *pages,
> > > +				int start, int nrpages)
> > > +{
> > > +	struct page *page;
> > > +	int page_idx = start;
> > > +
> > > +	for (; page_idx < start + nrpages; page_idx++) {
> > > +		/* alloc temporal page for read node summary info*/
> > > +		page = alloc_page(GFP_NOFS | __GFP_ZERO);
> > > +		if (!page) {
> > > +			struct page *tmp;
> > > +			list_for_each_entry_safe(page, tmp, pages, lru) {
> > > +				list_del(&page->lru);
> > > +				unlock_page(page);
> > > +				__free_pages(page, 0);
> > > +			}
> > > +			return -ENOMEM;
> > > +		}
> > > +
> > > +		lock_page(page);
> > > +		page->index = page_idx;
> > > +		list_add_tail(&page->lru, pages);
> > > +	}
> > > +
> > > +	list_for_each_entry(page, pages, lru)
> > > +		submit_read_page(sbi, page, page->index, READ_SYNC);
> > > +
> > > +	f2fs_submit_read_bio(sbi, READ_SYNC);
> > > +	return 0;
> > > +}
> > > +
> > >  int restore_node_summary(struct f2fs_sb_info *sbi,
> > >  			unsigned int segno, struct f2fs_summary_block *sum)
> > >  {
> > >  	struct f2fs_node *rn;
> > >  	struct f2fs_summary *sum_entry;
> > > -	struct page *page;
> > > +	struct page *page, *tmp;
> > >  	block_t addr;
> > > -	int i, last_offset;
> > > -
> > > -	/* alloc temporal page for read node */
> > > -	page = alloc_page(GFP_NOFS | __GFP_ZERO);
> > > -	if (!page)
> > > -		return -ENOMEM;
> > > -	lock_page(page);
> > > +	int bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
> > > +	int i, last_offset, nrpages, err = 0;
> > > +	LIST_HEAD(page_list);
> > >
> > >  	/* scan the node segment */
> > >  	last_offset = sbi->blocks_per_seg;
> > >  	addr = START_BLOCK(sbi, segno);
> > >  	sum_entry = &sum->entries[0];
> > >
> > > -	for (i = 0; i < last_offset; i++, sum_entry++) {
> > > -		/*
> > > -		 * In order to read next node page,
> > > -		 * we must clear PageUptodate flag.
> > > -		 */
> > > -		ClearPageUptodate(page);
> > > +	for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
> > >
> > > -		if (f2fs_readpage(sbi, page, addr, READ_SYNC))
> > > -			goto out;
> > > +		nrpages = min(last_offset - i, bio_blocks);
> > > +		/* read ahead node pages */
> > > +		err = ra_sum_pages(sbi, &page_list, addr, nrpages);
> > > +		if (err)
> > > +			return err;
> > >
> > > -		lock_page(page);
> > > -		rn = F2FS_NODE(page);
> > > -		sum_entry->nid = rn->footer.nid;
> > > -		sum_entry->version = 0;
> > > -		sum_entry->ofs_in_node = 0;
> > > -		addr++;
> > > +		list_for_each_entry_safe(page, tmp, &page_list, lru) {
> > > +
> > > +			lock_page(page);
> > > +			if(PageUptodate(page)) {
> > > +				rn = F2FS_NODE(page);
> > > +				sum_entry->nid = rn->footer.nid;
> > > +				sum_entry->version = 0;
> > > +				sum_entry->ofs_in_node = 0;
> > > +				sum_entry++;
> > > +			} else {
> > > +				err = -EIO;
> > > +			}
> > > +
> > > +			list_del(&page->lru);
> > > +			unlock_page(page);
> > > +			__free_pages(page, 0);
> > > +		}
> > >  	}
> > > -	unlock_page(page);
> > > -out:
> > > -	__free_pages(page, 0);
> > > -	return 0;
> > > +	return err;
> > >  }
> > >
> > >  static bool flush_nats_in_journal(struct f2fs_sb_info *sbi)
> > 
> > --
> > Jaegeuk Kim
> > Samsung
> 

-- 
Jaegeuk Kim
Samsung


^ permalink raw reply	[flat|nested] 8+ messages in thread

* RE: [f2fs-dev] [PATCH] f2fs: readahead contiguous pages for restore_node_summary
  2013-11-27  8:19     ` Jaegeuk Kim
@ 2013-11-28  1:26       ` Chao Yu
  2013-11-28  3:33         ` Jaegeuk Kim
  0 siblings, 1 reply; 8+ messages in thread
From: Chao Yu @ 2013-11-28  1:26 UTC (permalink / raw)
  To: jaegeuk.kim
  Cc: linux-fsdevel, linux-kernel, linux-f2fs-devel,
	'谭姝'

Hi Kim,

> -----Original Message-----
> From: Jaegeuk Kim [mailto:jaegeuk.kim@samsung.com]
> Sent: Wednesday, November 27, 2013 4:19 PM
> To: Chao Yu
> Cc: linux-fsdevel@vger.kernel.org; linux-kernel@vger.kernel.org; linux-f2fs-devel@lists.sourceforge.net; '谭姝'
> Subject: RE: [f2fs-dev] [PATCH] f2fs: readahead contiguous pages for restore_node_summary
> 
> Hi,
> 
> 2013-11-27 (수), 15:58 +0800, Chao Yu:
> > Hi Kim,
> >
> > > -----Original Message-----
> > > From: Jaegeuk Kim [mailto:jaegeuk.kim@samsung.com]
> > > Sent: Wednesday, November 27, 2013 1:30 PM
> > > To: Chao Yu
> > > Cc: linux-fsdevel@vger.kernel.org; linux-kernel@vger.kernel.org; linux-f2fs-devel@lists.sourceforge.net; 谭姝
> > > Subject: Re: [f2fs-dev] [PATCH] f2fs: readahead contiguous pages for restore_node_summary
> > >
> > > Hi Chao,
> > >
> > > It seems that we already have a readahed function for node pages,
> > > ra_node_page().
> > > So, we don't make a page list for this, but can use the node_inode's
> > > page cache.
> >
> > So you mean it's waste to release page list with updated data after we
> > finish work in restore_node_summary, right?
> 
> Right.

So how about add all pages of page list to node_inode's address space by
add_to_page_cache_lru() with arg sum_entry->nid?

> 
> >
> > >
> > > So how about writing ra_node_pages() which use the node_inode's page
> > > cache?
> >
> > Hmm, so ra_node_pages is introduced for read node_inode's pages which are
> > logical contiguously? and it also could take place of ra_node_page?
> 
> Ah. The ra_node_page() read a node page ahead for a given node id.
> So it doesn't match exactly between ra_node_page() and ra_node_pages()
> that I suggested.
> So how about reading node pages and then caching some of them in the
> page cache, node_inode's address space?

Got it,
If we do not use the method above, we should search the NAT for nid number
as the index of node_inode's page by the specified node page blkaddr, that costs
a lot.
How do you think?

> 
> Thanks,
> 
> >
> >
> > >
> > > Thanks,
> > >
> > > 2013-11-22 (금), 15:48 +0800, Chao Yu:
> > > > If cp has no CP_UMOUNT_FLAG, we will read all pages in whole node segment
> > > > one by one, it makes low performance. So let's merge contiguous pages and
> > > > readahead for better performance.
> > > >
> > > > Signed-off-by: Chao Yu <chao2.yu@samsung.com>
> > > > ---
> > > >  fs/f2fs/node.c |   89 +++++++++++++++++++++++++++++++++++++++-----------------
> > > >  1 file changed, 63 insertions(+), 26 deletions(-)
> > > >
> > > > diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
> > > > index 4ac4150..81e704a 100644
> > > > --- a/fs/f2fs/node.c
> > > > +++ b/fs/f2fs/node.c
> > > > @@ -1572,47 +1572,84 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
> > > >  	return 0;
> > > >  }
> > > >
> > > > +/*
> > > > + * ra_sum_pages() merge contiguous pages into one bio and submit.
> > > > + * these pre-readed pages are linked in pages list.
> > > > + */
> > > > +static int ra_sum_pages(struct f2fs_sb_info *sbi, struct list_head *pages,
> > > > +				int start, int nrpages)
> > > > +{
> > > > +	struct page *page;
> > > > +	int page_idx = start;
> > > > +
> > > > +	for (; page_idx < start + nrpages; page_idx++) {
> > > > +		/* alloc temporal page for read node summary info*/
> > > > +		page = alloc_page(GFP_NOFS | __GFP_ZERO);
> > > > +		if (!page) {
> > > > +			struct page *tmp;
> > > > +			list_for_each_entry_safe(page, tmp, pages, lru) {
> > > > +				list_del(&page->lru);
> > > > +				unlock_page(page);
> > > > +				__free_pages(page, 0);
> > > > +			}
> > > > +			return -ENOMEM;
> > > > +		}
> > > > +
> > > > +		lock_page(page);
> > > > +		page->index = page_idx;
> > > > +		list_add_tail(&page->lru, pages);
> > > > +	}
> > > > +
> > > > +	list_for_each_entry(page, pages, lru)
> > > > +		submit_read_page(sbi, page, page->index, READ_SYNC);
> > > > +
> > > > +	f2fs_submit_read_bio(sbi, READ_SYNC);
> > > > +	return 0;
> > > > +}
> > > > +
> > > >  int restore_node_summary(struct f2fs_sb_info *sbi,
> > > >  			unsigned int segno, struct f2fs_summary_block *sum)
> > > >  {
> > > >  	struct f2fs_node *rn;
> > > >  	struct f2fs_summary *sum_entry;
> > > > -	struct page *page;
> > > > +	struct page *page, *tmp;
> > > >  	block_t addr;
> > > > -	int i, last_offset;
> > > > -
> > > > -	/* alloc temporal page for read node */
> > > > -	page = alloc_page(GFP_NOFS | __GFP_ZERO);
> > > > -	if (!page)
> > > > -		return -ENOMEM;
> > > > -	lock_page(page);
> > > > +	int bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
> > > > +	int i, last_offset, nrpages, err = 0;
> > > > +	LIST_HEAD(page_list);
> > > >
> > > >  	/* scan the node segment */
> > > >  	last_offset = sbi->blocks_per_seg;
> > > >  	addr = START_BLOCK(sbi, segno);
> > > >  	sum_entry = &sum->entries[0];
> > > >
> > > > -	for (i = 0; i < last_offset; i++, sum_entry++) {
> > > > -		/*
> > > > -		 * In order to read next node page,
> > > > -		 * we must clear PageUptodate flag.
> > > > -		 */
> > > > -		ClearPageUptodate(page);
> > > > +	for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
> > > >
> > > > -		if (f2fs_readpage(sbi, page, addr, READ_SYNC))
> > > > -			goto out;
> > > > +		nrpages = min(last_offset - i, bio_blocks);
> > > > +		/* read ahead node pages */
> > > > +		err = ra_sum_pages(sbi, &page_list, addr, nrpages);
> > > > +		if (err)
> > > > +			return err;
> > > >
> > > > -		lock_page(page);
> > > > -		rn = F2FS_NODE(page);
> > > > -		sum_entry->nid = rn->footer.nid;
> > > > -		sum_entry->version = 0;
> > > > -		sum_entry->ofs_in_node = 0;
> > > > -		addr++;
> > > > +		list_for_each_entry_safe(page, tmp, &page_list, lru) {
> > > > +
> > > > +			lock_page(page);
> > > > +			if(PageUptodate(page)) {
> > > > +				rn = F2FS_NODE(page);
> > > > +				sum_entry->nid = rn->footer.nid;
> > > > +				sum_entry->version = 0;
> > > > +				sum_entry->ofs_in_node = 0;
> > > > +				sum_entry++;
> > > > +			} else {
> > > > +				err = -EIO;
> > > > +			}
> > > > +
> > > > +			list_del(&page->lru);
> > > > +			unlock_page(page);
> > > > +			__free_pages(page, 0);
> > > > +		}
> > > >  	}
> > > > -	unlock_page(page);
> > > > -out:
> > > > -	__free_pages(page, 0);
> > > > -	return 0;
> > > > +	return err;
> > > >  }
> > > >
> > > >  static bool flush_nats_in_journal(struct f2fs_sb_info *sbi)
> > >
> > > --
> > > Jaegeuk Kim
> > > Samsung
> >
> 
> --
> Jaegeuk Kim
> Samsung


^ permalink raw reply	[flat|nested] 8+ messages in thread

* RE: [f2fs-dev] [PATCH] f2fs: readahead contiguous pages for restore_node_summary
  2013-11-28  1:26       ` Chao Yu
@ 2013-11-28  3:33         ` Jaegeuk Kim
  2013-11-28  5:56           ` Chao Yu
  0 siblings, 1 reply; 8+ messages in thread
From: Jaegeuk Kim @ 2013-11-28  3:33 UTC (permalink / raw)
  To: Chao Yu
  Cc: linux-fsdevel, linux-kernel, linux-f2fs-devel,
	'谭姝'

Hi,

2013-11-28 (목), 09:26 +0800, Chao Yu:
> Hi Kim,
> 
> > -----Original Message-----
> > From: Jaegeuk Kim [mailto:jaegeuk.kim@samsung.com]
> > Sent: Wednesday, November 27, 2013 4:19 PM
> > To: Chao Yu
> > Cc: linux-fsdevel@vger.kernel.org; linux-kernel@vger.kernel.org; linux-f2fs-devel@lists.sourceforge.net; '谭姝'
> > Subject: RE: [f2fs-dev] [PATCH] f2fs: readahead contiguous pages for restore_node_summary
> > 
> > Hi,
> > 
> > 2013-11-27 (수), 15:58 +0800, Chao Yu:
> > > Hi Kim,
> > >
> > > > -----Original Message-----
> > > > From: Jaegeuk Kim [mailto:jaegeuk.kim@samsung.com]
> > > > Sent: Wednesday, November 27, 2013 1:30 PM
> > > > To: Chao Yu
> > > > Cc: linux-fsdevel@vger.kernel.org; linux-kernel@vger.kernel.org; linux-f2fs-devel@lists.sourceforge.net; 谭姝
> > > > Subject: Re: [f2fs-dev] [PATCH] f2fs: readahead contiguous pages for restore_node_summary
> > > >
> > > > Hi Chao,
> > > >
> > > > It seems that we already have a readahed function for node pages,
> > > > ra_node_page().
> > > > So, we don't make a page list for this, but can use the node_inode's
> > > > page cache.
> > >
> > > So you mean it's waste to release page list with updated data after we
> > > finish work in restore_node_summary, right?
> > 
> > Right.
> 
> So how about add all pages of page list to node_inode's address space by
> add_to_page_cache_lru() with arg sum_entry->nid?

I don't think it's proper way to use add_to_page_cache_lru() directly.

> 
> > 
> > >
> > > >
> > > > So how about writing ra_node_pages() which use the node_inode's page
> > > > cache?
> > >
> > > Hmm, so ra_node_pages is introduced for read node_inode's pages which are
> > > logical contiguously? and it also could take place of ra_node_page?
> > 
> > Ah. The ra_node_page() read a node page ahead for a given node id.
> > So it doesn't match exactly between ra_node_page() and ra_node_pages()
> > that I suggested.
> > So how about reading node pages and then caching some of them in the
> > page cache, node_inode's address space?
> 
> Got it,
> If we do not use the method above, we should search the NAT for nid number
> as the index of node_inode's page by the specified node page blkaddr, that costs
> a lot.
> How do you think?

1. grab_cache_page(node_footer->nid);
2. memcpy();
3. SetPageUptodate();
4. f2fs_put_page();

Thanks,

> 
> > 
> > Thanks,
> > 
> > >
> > >
> > > >
> > > > Thanks,
> > > >
> > > > 2013-11-22 (금), 15:48 +0800, Chao Yu:
> > > > > If cp has no CP_UMOUNT_FLAG, we will read all pages in whole node segment
> > > > > one by one, it makes low performance. So let's merge contiguous pages and
> > > > > readahead for better performance.
> > > > >
> > > > > Signed-off-by: Chao Yu <chao2.yu@samsung.com>
> > > > > ---
> > > > >  fs/f2fs/node.c |   89 +++++++++++++++++++++++++++++++++++++++-----------------
> > > > >  1 file changed, 63 insertions(+), 26 deletions(-)
> > > > >
> > > > > diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
> > > > > index 4ac4150..81e704a 100644
> > > > > --- a/fs/f2fs/node.c
> > > > > +++ b/fs/f2fs/node.c
> > > > > @@ -1572,47 +1572,84 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
> > > > >  	return 0;
> > > > >  }
> > > > >
> > > > > +/*
> > > > > + * ra_sum_pages() merge contiguous pages into one bio and submit.
> > > > > + * these pre-readed pages are linked in pages list.
> > > > > + */
> > > > > +static int ra_sum_pages(struct f2fs_sb_info *sbi, struct list_head *pages,
> > > > > +				int start, int nrpages)
> > > > > +{
> > > > > +	struct page *page;
> > > > > +	int page_idx = start;
> > > > > +
> > > > > +	for (; page_idx < start + nrpages; page_idx++) {
> > > > > +		/* alloc temporal page for read node summary info*/
> > > > > +		page = alloc_page(GFP_NOFS | __GFP_ZERO);
> > > > > +		if (!page) {
> > > > > +			struct page *tmp;
> > > > > +			list_for_each_entry_safe(page, tmp, pages, lru) {
> > > > > +				list_del(&page->lru);
> > > > > +				unlock_page(page);
> > > > > +				__free_pages(page, 0);
> > > > > +			}
> > > > > +			return -ENOMEM;
> > > > > +		}
> > > > > +
> > > > > +		lock_page(page);
> > > > > +		page->index = page_idx;
> > > > > +		list_add_tail(&page->lru, pages);
> > > > > +	}
> > > > > +
> > > > > +	list_for_each_entry(page, pages, lru)
> > > > > +		submit_read_page(sbi, page, page->index, READ_SYNC);
> > > > > +
> > > > > +	f2fs_submit_read_bio(sbi, READ_SYNC);
> > > > > +	return 0;
> > > > > +}
> > > > > +
> > > > >  int restore_node_summary(struct f2fs_sb_info *sbi,
> > > > >  			unsigned int segno, struct f2fs_summary_block *sum)
> > > > >  {
> > > > >  	struct f2fs_node *rn;
> > > > >  	struct f2fs_summary *sum_entry;
> > > > > -	struct page *page;
> > > > > +	struct page *page, *tmp;
> > > > >  	block_t addr;
> > > > > -	int i, last_offset;
> > > > > -
> > > > > -	/* alloc temporal page for read node */
> > > > > -	page = alloc_page(GFP_NOFS | __GFP_ZERO);
> > > > > -	if (!page)
> > > > > -		return -ENOMEM;
> > > > > -	lock_page(page);
> > > > > +	int bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
> > > > > +	int i, last_offset, nrpages, err = 0;
> > > > > +	LIST_HEAD(page_list);
> > > > >
> > > > >  	/* scan the node segment */
> > > > >  	last_offset = sbi->blocks_per_seg;
> > > > >  	addr = START_BLOCK(sbi, segno);
> > > > >  	sum_entry = &sum->entries[0];
> > > > >
> > > > > -	for (i = 0; i < last_offset; i++, sum_entry++) {
> > > > > -		/*
> > > > > -		 * In order to read next node page,
> > > > > -		 * we must clear PageUptodate flag.
> > > > > -		 */
> > > > > -		ClearPageUptodate(page);
> > > > > +	for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
> > > > >
> > > > > -		if (f2fs_readpage(sbi, page, addr, READ_SYNC))
> > > > > -			goto out;
> > > > > +		nrpages = min(last_offset - i, bio_blocks);
> > > > > +		/* read ahead node pages */
> > > > > +		err = ra_sum_pages(sbi, &page_list, addr, nrpages);
> > > > > +		if (err)
> > > > > +			return err;
> > > > >
> > > > > -		lock_page(page);
> > > > > -		rn = F2FS_NODE(page);
> > > > > -		sum_entry->nid = rn->footer.nid;
> > > > > -		sum_entry->version = 0;
> > > > > -		sum_entry->ofs_in_node = 0;
> > > > > -		addr++;
> > > > > +		list_for_each_entry_safe(page, tmp, &page_list, lru) {
> > > > > +
> > > > > +			lock_page(page);
> > > > > +			if(PageUptodate(page)) {
> > > > > +				rn = F2FS_NODE(page);
> > > > > +				sum_entry->nid = rn->footer.nid;
> > > > > +				sum_entry->version = 0;
> > > > > +				sum_entry->ofs_in_node = 0;
> > > > > +				sum_entry++;
> > > > > +			} else {
> > > > > +				err = -EIO;
> > > > > +			}
> > > > > +
> > > > > +			list_del(&page->lru);
> > > > > +			unlock_page(page);
> > > > > +			__free_pages(page, 0);
> > > > > +		}
> > > > >  	}
> > > > > -	unlock_page(page);
> > > > > -out:
> > > > > -	__free_pages(page, 0);
> > > > > -	return 0;
> > > > > +	return err;
> > > > >  }
> > > > >
> > > > >  static bool flush_nats_in_journal(struct f2fs_sb_info *sbi)
> > > >
> > > > --
> > > > Jaegeuk Kim
> > > > Samsung
> > >
> > 
> > --
> > Jaegeuk Kim
> > Samsung
> 

-- 
Jaegeuk Kim
Samsung


^ permalink raw reply	[flat|nested] 8+ messages in thread

* RE: [f2fs-dev] [PATCH] f2fs: readahead contiguous pages for restore_node_summary
  2013-11-28  3:33         ` Jaegeuk Kim
@ 2013-11-28  5:56           ` Chao Yu
  2013-11-30  5:12             ` Jaegeuk Kim
  0 siblings, 1 reply; 8+ messages in thread
From: Chao Yu @ 2013-11-28  5:56 UTC (permalink / raw)
  To: jaegeuk.kim
  Cc: linux-fsdevel, linux-kernel, linux-f2fs-devel,
	'谭姝'

Hi,

> -----Original Message-----
> From: Jaegeuk Kim [mailto:jaegeuk.kim@samsung.com]
> Sent: Thursday, November 28, 2013 11:33 AM
> To: Chao Yu
> Cc: linux-fsdevel@vger.kernel.org; linux-kernel@vger.kernel.org; linux-f2fs-devel@lists.sourceforge.net; '谭姝'
> Subject: RE: [f2fs-dev] [PATCH] f2fs: readahead contiguous pages for restore_node_summary
> 
> Hi,
> 
> 2013-11-28 (목), 09:26 +0800, Chao Yu:
> > Hi Kim,
> >
> > > -----Original Message-----
> > > From: Jaegeuk Kim [mailto:jaegeuk.kim@samsung.com]
> > > Sent: Wednesday, November 27, 2013 4:19 PM
> > > To: Chao Yu
> > > Cc: linux-fsdevel@vger.kernel.org; linux-kernel@vger.kernel.org; linux-f2fs-devel@lists.sourceforge.net; '谭姝'
> > > Subject: RE: [f2fs-dev] [PATCH] f2fs: readahead contiguous pages for restore_node_summary
> > >
> > > Hi,
> > >
> > > 2013-11-27 (수), 15:58 +0800, Chao Yu:
> > > > Hi Kim,
> > > >
> > > > > -----Original Message-----
> > > > > From: Jaegeuk Kim [mailto:jaegeuk.kim@samsung.com]
> > > > > Sent: Wednesday, November 27, 2013 1:30 PM
> > > > > To: Chao Yu
> > > > > Cc: linux-fsdevel@vger.kernel.org; linux-kernel@vger.kernel.org; linux-f2fs-devel@lists.sourceforge.net; 谭姝
> > > > > Subject: Re: [f2fs-dev] [PATCH] f2fs: readahead contiguous pages for restore_node_summary
> > > > >
> > > > > Hi Chao,
> > > > >
> > > > > It seems that we already have a readahed function for node pages,
> > > > > ra_node_page().
> > > > > So, we don't make a page list for this, but can use the node_inode's
> > > > > page cache.
> > > >
> > > > So you mean it's waste to release page list with updated data after we
> > > > finish work in restore_node_summary, right?
> > >
> > > Right.
> >
> > So how about add all pages of page list to node_inode's address space by
> > add_to_page_cache_lru() with arg sum_entry->nid?
> 
> I don't think it's proper way to use add_to_page_cache_lru() directly.

This is the way used in VM readahead(i.e. read_pages/mpage_readpages/
read_cache_pages).
So what you worry about is that using lonely add_to_page_cache_lru()
may cause exception, is it?

> 
> >
> > >
> > > >
> > > > >
> > > > > So how about writing ra_node_pages() which use the node_inode's page
> > > > > cache?
> > > >
> > > > Hmm, so ra_node_pages is introduced for read node_inode's pages which are
> > > > logical contiguously? and it also could take place of ra_node_page?
> > >
> > > Ah. The ra_node_page() read a node page ahead for a given node id.
> > > So it doesn't match exactly between ra_node_page() and ra_node_pages()
> > > that I suggested.
> > > So how about reading node pages and then caching some of them in the
> > > page cache, node_inode's address space?
> >
> > Got it,
> > If we do not use the method above, we should search the NAT for nid number
> > as the index of node_inode's page by the specified node page blkaddr, that costs
> > a lot.
> > How do you think?
> 
> 1. grab_cache_page(node_footer->nid);
> 2. memcpy();
> 3. SetPageUptodate();
> 4. f2fs_put_page();

It could be.

This make ra_node_pages() synchronized, because we should read node_footer->nid
from updated node page before we cache node pages, and we will still use page list to
pass the updated page.

Why not introduce f2fs_cache_node_pages() include your code to cache node pages after
ra_node_pages()?

Thanks,
Yu

> 
> Thanks,
> 
> >
> > >
> > > Thanks,
> > >
> > > >
> > > >
> > > > >
> > > > > Thanks,
> > > > >
> > > > > 2013-11-22 (금), 15:48 +0800, Chao Yu:
> > > > > > If cp has no CP_UMOUNT_FLAG, we will read all pages in whole node segment
> > > > > > one by one, it makes low performance. So let's merge contiguous pages and
> > > > > > readahead for better performance.
> > > > > >
> > > > > > Signed-off-by: Chao Yu <chao2.yu@samsung.com>
> > > > > > ---
> > > > > >  fs/f2fs/node.c |   89 +++++++++++++++++++++++++++++++++++++++-----------------
> > > > > >  1 file changed, 63 insertions(+), 26 deletions(-)
> > > > > >
> > > > > > diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
> > > > > > index 4ac4150..81e704a 100644
> > > > > > --- a/fs/f2fs/node.c
> > > > > > +++ b/fs/f2fs/node.c
> > > > > > @@ -1572,47 +1572,84 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
> > > > > >  	return 0;
> > > > > >  }
> > > > > >
> > > > > > +/*
> > > > > > + * ra_sum_pages() merge contiguous pages into one bio and submit.
> > > > > > + * these pre-readed pages are linked in pages list.
> > > > > > + */
> > > > > > +static int ra_sum_pages(struct f2fs_sb_info *sbi, struct list_head *pages,
> > > > > > +				int start, int nrpages)
> > > > > > +{
> > > > > > +	struct page *page;
> > > > > > +	int page_idx = start;
> > > > > > +
> > > > > > +	for (; page_idx < start + nrpages; page_idx++) {
> > > > > > +		/* alloc temporal page for read node summary info*/
> > > > > > +		page = alloc_page(GFP_NOFS | __GFP_ZERO);
> > > > > > +		if (!page) {
> > > > > > +			struct page *tmp;
> > > > > > +			list_for_each_entry_safe(page, tmp, pages, lru) {
> > > > > > +				list_del(&page->lru);
> > > > > > +				unlock_page(page);
> > > > > > +				__free_pages(page, 0);
> > > > > > +			}
> > > > > > +			return -ENOMEM;
> > > > > > +		}
> > > > > > +
> > > > > > +		lock_page(page);
> > > > > > +		page->index = page_idx;
> > > > > > +		list_add_tail(&page->lru, pages);
> > > > > > +	}
> > > > > > +
> > > > > > +	list_for_each_entry(page, pages, lru)
> > > > > > +		submit_read_page(sbi, page, page->index, READ_SYNC);
> > > > > > +
> > > > > > +	f2fs_submit_read_bio(sbi, READ_SYNC);
> > > > > > +	return 0;
> > > > > > +}
> > > > > > +
> > > > > >  int restore_node_summary(struct f2fs_sb_info *sbi,
> > > > > >  			unsigned int segno, struct f2fs_summary_block *sum)
> > > > > >  {
> > > > > >  	struct f2fs_node *rn;
> > > > > >  	struct f2fs_summary *sum_entry;
> > > > > > -	struct page *page;
> > > > > > +	struct page *page, *tmp;
> > > > > >  	block_t addr;
> > > > > > -	int i, last_offset;
> > > > > > -
> > > > > > -	/* alloc temporal page for read node */
> > > > > > -	page = alloc_page(GFP_NOFS | __GFP_ZERO);
> > > > > > -	if (!page)
> > > > > > -		return -ENOMEM;
> > > > > > -	lock_page(page);
> > > > > > +	int bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
> > > > > > +	int i, last_offset, nrpages, err = 0;
> > > > > > +	LIST_HEAD(page_list);
> > > > > >
> > > > > >  	/* scan the node segment */
> > > > > >  	last_offset = sbi->blocks_per_seg;
> > > > > >  	addr = START_BLOCK(sbi, segno);
> > > > > >  	sum_entry = &sum->entries[0];
> > > > > >
> > > > > > -	for (i = 0; i < last_offset; i++, sum_entry++) {
> > > > > > -		/*
> > > > > > -		 * In order to read next node page,
> > > > > > -		 * we must clear PageUptodate flag.
> > > > > > -		 */
> > > > > > -		ClearPageUptodate(page);
> > > > > > +	for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
> > > > > >
> > > > > > -		if (f2fs_readpage(sbi, page, addr, READ_SYNC))
> > > > > > -			goto out;
> > > > > > +		nrpages = min(last_offset - i, bio_blocks);
> > > > > > +		/* read ahead node pages */
> > > > > > +		err = ra_sum_pages(sbi, &page_list, addr, nrpages);
> > > > > > +		if (err)
> > > > > > +			return err;
> > > > > >
> > > > > > -		lock_page(page);
> > > > > > -		rn = F2FS_NODE(page);
> > > > > > -		sum_entry->nid = rn->footer.nid;
> > > > > > -		sum_entry->version = 0;
> > > > > > -		sum_entry->ofs_in_node = 0;
> > > > > > -		addr++;
> > > > > > +		list_for_each_entry_safe(page, tmp, &page_list, lru) {
> > > > > > +
> > > > > > +			lock_page(page);
> > > > > > +			if(PageUptodate(page)) {
> > > > > > +				rn = F2FS_NODE(page);
> > > > > > +				sum_entry->nid = rn->footer.nid;
> > > > > > +				sum_entry->version = 0;
> > > > > > +				sum_entry->ofs_in_node = 0;
> > > > > > +				sum_entry++;
> > > > > > +			} else {
> > > > > > +				err = -EIO;
> > > > > > +			}
> > > > > > +
> > > > > > +			list_del(&page->lru);
> > > > > > +			unlock_page(page);
> > > > > > +			__free_pages(page, 0);
> > > > > > +		}
> > > > > >  	}
> > > > > > -	unlock_page(page);
> > > > > > -out:
> > > > > > -	__free_pages(page, 0);
> > > > > > -	return 0;
> > > > > > +	return err;
> > > > > >  }
> > > > > >
> > > > > >  static bool flush_nats_in_journal(struct f2fs_sb_info *sbi)
> > > > >
> > > > > --
> > > > > Jaegeuk Kim
> > > > > Samsung
> > > >
> > >
> > > --
> > > Jaegeuk Kim
> > > Samsung
> >
> 
> --
> Jaegeuk Kim
> Samsung


^ permalink raw reply	[flat|nested] 8+ messages in thread

* RE: [f2fs-dev] [PATCH] f2fs: readahead contiguous pages for restore_node_summary
  2013-11-28  5:56           ` Chao Yu
@ 2013-11-30  5:12             ` Jaegeuk Kim
  0 siblings, 0 replies; 8+ messages in thread
From: Jaegeuk Kim @ 2013-11-30  5:12 UTC (permalink / raw)
  To: Chao Yu
  Cc: linux-fsdevel, linux-kernel, linux-f2fs-devel,
	'谭姝'

Hi,

[snip]
> > > So how about add all pages of page list to node_inode's address space by
> > > add_to_page_cache_lru() with arg sum_entry->nid?
> > 
> > I don't think it's proper way to use add_to_page_cache_lru() directly.
> 
> This is the way used in VM readahead(i.e. read_pages/mpage_readpages/
> read_cache_pages).
> So what you worry about is that using lonely add_to_page_cache_lru()
> may cause exception, is it?

Right, what I meant was that, IMO, we should avoid copy and paste MM
codes, but use its wrappers, exported symbols, as much as possible.

> > > > > >
> > > > > > So how about writing ra_node_pages() which use the node_inode's page
> > > > > > cache?
> > > > >
> > > > > Hmm, so ra_node_pages is introduced for read node_inode's pages which are
> > > > > logical contiguously? and it also could take place of ra_node_page?
> > > >
> > > > Ah. The ra_node_page() read a node page ahead for a given node id.
> > > > So it doesn't match exactly between ra_node_page() and ra_node_pages()
> > > > that I suggested.
> > > > So how about reading node pages and then caching some of them in the
> > > > page cache, node_inode's address space?
> > >
> > > Got it,
> > > If we do not use the method above, we should search the NAT for nid number
> > > as the index of node_inode's page by the specified node page blkaddr, that costs
> > > a lot.
> > > How do you think?
> > 
> > 1. grab_cache_page(node_footer->nid);
> > 2. memcpy();
> > 3. SetPageUptodate();
> > 4. f2fs_put_page();
> 
> It could be.
> 
> This make ra_node_pages() synchronized, because we should read node_footer->nid
> from updated node page before we cache node pages, and we will still use page list to
> pass the updated page.
> 
> Why not introduce f2fs_cache_node_pages() include your code to cache node pages after
> ra_node_pages()?

Ok, right.
I'll test again and then merge this patch. :)

-- 
Jaegeuk Kim
Samsung


^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2013-11-30  5:13 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2013-11-22  7:48 [f2fs-dev] [PATCH] f2fs: readahead contiguous pages for restore_node_summary Chao Yu
2013-11-27  5:29 ` Jaegeuk Kim
2013-11-27  7:58   ` Chao Yu
2013-11-27  8:19     ` Jaegeuk Kim
2013-11-28  1:26       ` Chao Yu
2013-11-28  3:33         ` Jaegeuk Kim
2013-11-28  5:56           ` Chao Yu
2013-11-30  5:12             ` Jaegeuk Kim

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).