linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 07/12] mm, thp, tmpfs: handle huge page in shmem_undo_range for truncate
@ 2013-10-15  0:13 Ning Qu
  2013-10-15 11:01 ` Kirill A. Shutemov
  0 siblings, 1 reply; 6+ messages in thread
From: Ning Qu @ 2013-10-15  0:13 UTC (permalink / raw)
  To: Andrea Arcangeli, Andrew Morton, Kirill A. Shutemov, Hugh Dickins
  Cc: Al Viro, Hugh Dickins, Wu Fengguang, Jan Kara, Mel Gorman,
	linux-mm, Andi Kleen, Matthew Wilcox, Hillf Danton, Dave Hansen,
	Alexander Shishkin, linux-fsdevel, linux-kernel, Ning Qu

When comes to truncate file, add support to handle huge page in the
truncate range.

Signed-off-by: Ning Qu <quning@gmail.com>
---
 mm/shmem.c | 97 +++++++++++++++++++++++++++++++++++++++++++++++++++++++-------
 1 file changed, 86 insertions(+), 11 deletions(-)

diff --git a/mm/shmem.c b/mm/shmem.c
index 0a423a9..90f2e0e 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -559,6 +559,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
 	struct shmem_inode_info *info = SHMEM_I(inode);
 	pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
 	pgoff_t end = (lend + 1) >> PAGE_CACHE_SHIFT;
+	/* Whether we have to do partial truncate */
 	unsigned int partial_start = lstart & (PAGE_CACHE_SIZE - 1);
 	unsigned int partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);
 	struct pagevec pvec;
@@ -570,12 +571,16 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
 	if (lend == -1)
 		end = -1;	/* unsigned, so actually very big */
 
+	i_split_down_read(inode);
 	pagevec_init(&pvec, 0);
 	index = start;
 	while (index < end) {
+		bool thp = false;
+
 		pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
 				min(end - index, (pgoff_t)PAGEVEC_SIZE),
 							pvec.pages, indices);
+
 		if (!pvec.nr)
 			break;
 		mem_cgroup_uncharge_start();
@@ -586,6 +591,25 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
 			if (index >= end)
 				break;
 
+			thp = PageTransHugeCache(page);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE_PAGECACHE
+			if (thp) {
+				/* the range starts in middle of huge page */
+			       if (index < start) {
+					partial_start = true;
+					start = index + HPAGE_CACHE_NR;
+					break;
+			       }
+
+			       /* the range ends on huge page */
+			       if (index == (end & ~HPAGE_CACHE_INDEX_MASK)) {
+					partial_end = true;
+					end = index;
+					break;
+			       }
+			}
+#endif
+
 			if (radix_tree_exceptional_entry(page)) {
 				if (unfalloc)
 					continue;
@@ -603,26 +627,52 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
 				}
 			}
 			unlock_page(page);
+			if (thp)
+				break;
 		}
 		shmem_deswap_pagevec(&pvec);
 		pagevec_release(&pvec);
 		mem_cgroup_uncharge_end();
 		cond_resched();
-		index++;
+		if (thp)
+			index += HPAGE_CACHE_NR;
+		else
+			index++;
 	}
 
 	if (partial_start) {
 		struct page *page = NULL;
 		gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
+		int flags = 0;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE_PAGECACHE
+		flags |= AOP_FLAG_TRANSHUGE;
+#endif
 
-		shmem_getpage(inode, start - 1, &page, SGP_READ, gfp, 0, NULL);
+		shmem_getpage(inode, start - 1, &page, SGP_READ, gfp,
+				flags, NULL);
 		if (page) {
-			unsigned int top = PAGE_CACHE_SIZE;
-			if (start > end) {
-				top = partial_end;
-				partial_end = 0;
+			pgoff_t index_mask;
+			loff_t page_cache_mask;
+			unsigned pstart, pend;
+
+			index_mask = 0UL;
+			page_cache_mask = PAGE_CACHE_MASK;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE_PAGECACHE
+			if (PageTransHugeCache(page)) {
+				index_mask = HPAGE_CACHE_INDEX_MASK;
+				page_cache_mask = HPAGE_PMD_MASK;
 			}
-			zero_user_segment(page, partial_start, top);
+#endif
+
+			pstart = lstart & ~page_cache_mask;
+			if ((end & ~index_mask) == page->index) {
+				pend = (lend + 1) & ~page_cache_mask;
+				end = page->index;
+				partial_end = false; /* handled here */
+			} else
+				pend = PAGE_CACHE_SIZE << compound_order(page);
+
+			zero_pagecache_segment(page, pstart, pend);
 			set_page_dirty(page);
 			unlock_page(page);
 			page_cache_release(page);
@@ -631,20 +681,37 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
 	if (partial_end) {
 		struct page *page = NULL;
 		gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
+		int flags = 0;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE_PAGECACHE
+		flags |= AOP_FLAG_TRANSHUGE;
+#endif
 
-		shmem_getpage(inode, end, &page, SGP_READ, gfp, 0, NULL);
+		shmem_getpage(inode, end, &page, SGP_READ, gfp,
+				flags, NULL);
 		if (page) {
-			zero_user_segment(page, 0, partial_end);
+			loff_t page_cache_mask;
+			unsigned pend;
+
+			page_cache_mask = PAGE_CACHE_MASK;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE_PAGECACHE
+			if (PageTransHugeCache(page))
+				page_cache_mask = HPAGE_PMD_MASK;
+#endif
+			pend = (lend + 1) & ~page_cache_mask;
+			end = page->index;
+			zero_pagecache_segment(page, 0, pend);
 			set_page_dirty(page);
 			unlock_page(page);
 			page_cache_release(page);
 		}
 	}
 	if (start >= end)
-		return;
+		goto out;
 
 	index = start;
 	for ( ; ; ) {
+		bool thp = false;
+
 		cond_resched();
 		pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
 				min(end - index, (pgoff_t)PAGEVEC_SIZE),
@@ -676,6 +743,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
 				continue;
 			}
 
+			thp = PageTransHugeCache(page);
 			lock_page(page);
 			if (!unfalloc || !PageUptodate(page)) {
 				if (page->mapping == mapping) {
@@ -684,17 +752,24 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
 				}
 			}
 			unlock_page(page);
+			if (thp)
+				break;
 		}
 		shmem_deswap_pagevec(&pvec);
 		pagevec_release(&pvec);
 		mem_cgroup_uncharge_end();
-		index++;
+		if (thp)
+			index += HPAGE_CACHE_NR;
+		else
+			index++;
 	}
 
 	spin_lock(&info->lock);
 	info->swapped -= nr_swaps_freed;
 	shmem_recalc_inode(inode);
 	spin_unlock(&info->lock);
+out:
+	i_split_up_read(inode);
 }
 
 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
-- 
1.8.4



^ permalink raw reply related	[flat|nested] 6+ messages in thread

* RE: [PATCH 07/12] mm, thp, tmpfs: handle huge page in shmem_undo_range for truncate
  2013-10-15  0:13 [PATCH 07/12] mm, thp, tmpfs: handle huge page in shmem_undo_range for truncate Ning Qu
@ 2013-10-15 11:01 ` Kirill A. Shutemov
  2013-10-15 18:48   ` Ning Qu
  0 siblings, 1 reply; 6+ messages in thread
From: Kirill A. Shutemov @ 2013-10-15 11:01 UTC (permalink / raw)
  To: Ning Qu
  Cc: Andrea Arcangeli, Andrew Morton, Kirill A. Shutemov,
	Hugh Dickins, Al Viro, Hugh Dickins, Wu Fengguang, Jan Kara,
	Mel Gorman, linux-mm, Andi Kleen, Matthew Wilcox, Hillf Danton,
	Dave Hansen, Alexander Shishkin, linux-fsdevel, linux-kernel,
	Ning Qu

Ning Qu wrote:
> When comes to truncate file, add support to handle huge page in the
> truncate range.
> 
> Signed-off-by: Ning Qu <quning@gmail.com>
> ---
>  mm/shmem.c | 97 +++++++++++++++++++++++++++++++++++++++++++++++++++++++-------
>  1 file changed, 86 insertions(+), 11 deletions(-)
> 
> diff --git a/mm/shmem.c b/mm/shmem.c
> index 0a423a9..90f2e0e 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -559,6 +559,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
>  	struct shmem_inode_info *info = SHMEM_I(inode);
>  	pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
>  	pgoff_t end = (lend + 1) >> PAGE_CACHE_SHIFT;
> +	/* Whether we have to do partial truncate */
>  	unsigned int partial_start = lstart & (PAGE_CACHE_SIZE - 1);
>  	unsigned int partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);
>  	struct pagevec pvec;
> @@ -570,12 +571,16 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
>  	if (lend == -1)
>  		end = -1;	/* unsigned, so actually very big */
>  
> +	i_split_down_read(inode);
>  	pagevec_init(&pvec, 0);
>  	index = start;
>  	while (index < end) {
> +		bool thp = false;
> +
>  		pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
>  				min(end - index, (pgoff_t)PAGEVEC_SIZE),
>  							pvec.pages, indices);
> +
>  		if (!pvec.nr)
>  			break;
>  		mem_cgroup_uncharge_start();
> @@ -586,6 +591,25 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
>  			if (index >= end)
>  				break;
>  
> +			thp = PageTransHugeCache(page);
> +#ifdef CONFIG_TRANSPARENT_HUGEPAGE_PAGECACHE

Again. Here and below ifdef is redundant: PageTransHugeCache() is zero
compile-time and  thp case will be optimize out.

And do we really need a copy of truncate logic here? Is there a way to
share code?

-- 
 Kirill A. Shutemov

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 07/12] mm, thp, tmpfs: handle huge page in shmem_undo_range for truncate
  2013-10-15 11:01 ` Kirill A. Shutemov
@ 2013-10-15 18:48   ` Ning Qu
  2013-10-16 12:09     ` Kirill A. Shutemov
  0 siblings, 1 reply; 6+ messages in thread
From: Ning Qu @ 2013-10-15 18:48 UTC (permalink / raw)
  To: Kirill A. Shutemov
  Cc: Andrea Arcangeli, Andrew Morton, Hugh Dickins, Al Viro,
	Wu Fengguang, Jan Kara, Mel Gorman, linux-mm, Andi Kleen,
	Matthew Wilcox, Hillf Danton, Dave Hansen, Alexander Shishkin,
	linux-fsdevel, linux-kernel

Best wishes,
-- 
Ning Qu (曲宁) | Software Engineer | quning@google.com | +1-408-418-6066


On Tue, Oct 15, 2013 at 4:01 AM, Kirill A. Shutemov
<kirill.shutemov@linux.intel.com> wrote:
> Ning Qu wrote:
>> When comes to truncate file, add support to handle huge page in the
>> truncate range.
>>
>> Signed-off-by: Ning Qu <quning@gmail.com>
>> ---
>>  mm/shmem.c | 97 +++++++++++++++++++++++++++++++++++++++++++++++++++++++-------
>>  1 file changed, 86 insertions(+), 11 deletions(-)
>>
>> diff --git a/mm/shmem.c b/mm/shmem.c
>> index 0a423a9..90f2e0e 100644
>> --- a/mm/shmem.c
>> +++ b/mm/shmem.c
>> @@ -559,6 +559,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
>>       struct shmem_inode_info *info = SHMEM_I(inode);
>>       pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
>>       pgoff_t end = (lend + 1) >> PAGE_CACHE_SHIFT;
>> +     /* Whether we have to do partial truncate */
>>       unsigned int partial_start = lstart & (PAGE_CACHE_SIZE - 1);
>>       unsigned int partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);
>>       struct pagevec pvec;
>> @@ -570,12 +571,16 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
>>       if (lend == -1)
>>               end = -1;       /* unsigned, so actually very big */
>>
>> +     i_split_down_read(inode);
>>       pagevec_init(&pvec, 0);
>>       index = start;
>>       while (index < end) {
>> +             bool thp = false;
>> +
>>               pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
>>                               min(end - index, (pgoff_t)PAGEVEC_SIZE),
>>                                                       pvec.pages, indices);
>> +
>>               if (!pvec.nr)
>>                       break;
>>               mem_cgroup_uncharge_start();
>> @@ -586,6 +591,25 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
>>                       if (index >= end)
>>                               break;
>>
>> +                     thp = PageTransHugeCache(page);
>> +#ifdef CONFIG_TRANSPARENT_HUGEPAGE_PAGECACHE
>
> Again. Here and below ifdef is redundant: PageTransHugeCache() is zero
> compile-time and  thp case will be optimize out.

The problem is actually from HPAGE_CACHE_INDEX_MASK, it is marked as
build bug when CONFIG_TRANSPARENT_HUGEPAGE_PAGECACHE is false. So we
either wrap some logic inside a inline function, or we have to be like
this .. Or we don't treat the HPAGE_CACHE_INDEX_MASK as a build bug?

>
> And do we really need a copy of truncate logic here? Is there a way to
> share code?
>
The truncate between tmpfs and general one is similar but not exactly
the same (no readahead), so share the whole function might not be a
good choice from the perspective of tmpfs? Anyway, there are other
similar functions in tmpfs, e.g. the one you mentioned for
shmem_add_to_page_cache. It is possible to share the code, I am just
worried it will make the logic more complicated?

Maybe Hugh is in better position to judge on this? Thanks!

> --
>  Kirill A. Shutemov

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 07/12] mm, thp, tmpfs: handle huge page in shmem_undo_range for truncate
  2013-10-15 18:48   ` Ning Qu
@ 2013-10-16 12:09     ` Kirill A. Shutemov
  2013-10-16 18:48       ` Ning Qu
  2013-10-17 20:58       ` Ning Qu
  0 siblings, 2 replies; 6+ messages in thread
From: Kirill A. Shutemov @ 2013-10-16 12:09 UTC (permalink / raw)
  To: Ning Qu
  Cc: Kirill A. Shutemov, Andrea Arcangeli, Andrew Morton,
	Hugh Dickins, Al Viro, Wu Fengguang, Jan Kara, Mel Gorman,
	linux-mm, Andi Kleen, Matthew Wilcox, Hillf Danton, Dave Hansen,
	Alexander Shishkin, linux-fsdevel, linux-kernel

Ning Qu wrote:
> > Again. Here and below ifdef is redundant: PageTransHugeCache() is zero
> > compile-time and  thp case will be optimize out.
> 
> The problem is actually from HPAGE_CACHE_INDEX_MASK, it is marked as
> build bug when CONFIG_TRANSPARENT_HUGEPAGE_PAGECACHE is false. So we
> either wrap some logic inside a inline function, or we have to be like
> this .. Or we don't treat the HPAGE_CACHE_INDEX_MASK as a build bug?

HPAGE_CACHE_INDEX_MASK shouldn't be a problem.
If it's wrapped into 'if PageTransHugeCache(page)' or similar it will be
eliminated by compiler if thp-pc disabled and build bug will not be
triggered.

> 
> >
> > And do we really need a copy of truncate logic here? Is there a way to
> > share code?
> >
> The truncate between tmpfs and general one is similar but not exactly
> the same (no readahead), so share the whole function might not be a
> good choice from the perspective of tmpfs? Anyway, there are other
> similar functions in tmpfs, e.g. the one you mentioned for
> shmem_add_to_page_cache. It is possible to share the code, I am just
> worried it will make the logic more complicated?

I think introducing thp-pc is good opportunity to refactor all these code.

-- 
 Kirill A. Shutemov

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 07/12] mm, thp, tmpfs: handle huge page in shmem_undo_range for truncate
  2013-10-16 12:09     ` Kirill A. Shutemov
@ 2013-10-16 18:48       ` Ning Qu
  2013-10-17 20:58       ` Ning Qu
  1 sibling, 0 replies; 6+ messages in thread
From: Ning Qu @ 2013-10-16 18:48 UTC (permalink / raw)
  To: Kirill A. Shutemov
  Cc: Andrea Arcangeli, Andrew Morton, Hugh Dickins, Al Viro,
	Wu Fengguang, Jan Kara, Mel Gorman, linux-mm, Andi Kleen,
	Matthew Wilcox, Hillf Danton, Dave Hansen, Alexander Shishkin,
	linux-fsdevel, linux-kernel

Best wishes,
-- 
Ning Qu (曲宁) | Software Engineer | quning@google.com | +1-408-418-6066


On Wed, Oct 16, 2013 at 5:09 AM, Kirill A. Shutemov
<kirill.shutemov@linux.intel.com> wrote:
> Ning Qu wrote:
>> > Again. Here and below ifdef is redundant: PageTransHugeCache() is zero
>> > compile-time and  thp case will be optimize out.
>>
>> The problem is actually from HPAGE_CACHE_INDEX_MASK, it is marked as
>> build bug when CONFIG_TRANSPARENT_HUGEPAGE_PAGECACHE is false. So we
>> either wrap some logic inside a inline function, or we have to be like
>> this .. Or we don't treat the HPAGE_CACHE_INDEX_MASK as a build bug?
>
> HPAGE_CACHE_INDEX_MASK shouldn't be a problem.
> If it's wrapped into 'if PageTransHugeCache(page)' or similar it will be
> eliminated by compiler if thp-pc disabled and build bug will not be
> triggered.
>
Yes, you are totally right about this. I have remove all the ifdef for
CONFIG_TRANSPARENT_HUGEPAGE_PAGECACHE now. Thanks!

>>
>> >
>> > And do we really need a copy of truncate logic here? Is there a way to
>> > share code?
>> >
>> The truncate between tmpfs and general one is similar but not exactly
>> the same (no readahead), so share the whole function might not be a
>> good choice from the perspective of tmpfs? Anyway, there are other
>> similar functions in tmpfs, e.g. the one you mentioned for
>> shmem_add_to_page_cache. It is possible to share the code, I am just
>> worried it will make the logic more complicated?
>
> I think introducing thp-pc is good opportunity to refactor all these code.
>
> --
>  Kirill A. Shutemov

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 07/12] mm, thp, tmpfs: handle huge page in shmem_undo_range for truncate
  2013-10-16 12:09     ` Kirill A. Shutemov
  2013-10-16 18:48       ` Ning Qu
@ 2013-10-17 20:58       ` Ning Qu
  1 sibling, 0 replies; 6+ messages in thread
From: Ning Qu @ 2013-10-17 20:58 UTC (permalink / raw)
  To: Kirill A. Shutemov
  Cc: Andrea Arcangeli, Andrew Morton, Hugh Dickins, Al Viro,
	Wu Fengguang, Jan Kara, Mel Gorman, linux-mm, Andi Kleen,
	Matthew Wilcox, Hillf Danton, Dave Hansen, Alexander Shishkin,
	linux-fsdevel, linux-kernel

Best wishes,
-- 
Ning Qu (曲宁) | Software Engineer | quning@google.com | +1-408-418-6066


On Wed, Oct 16, 2013 at 5:09 AM, Kirill A. Shutemov
<kirill.shutemov@linux.intel.com> wrote:
> Ning Qu wrote:
>> > Again. Here and below ifdef is redundant: PageTransHugeCache() is zero
>> > compile-time and  thp case will be optimize out.
>>
>> The problem is actually from HPAGE_CACHE_INDEX_MASK, it is marked as
>> build bug when CONFIG_TRANSPARENT_HUGEPAGE_PAGECACHE is false. So we
>> either wrap some logic inside a inline function, or we have to be like
>> this .. Or we don't treat the HPAGE_CACHE_INDEX_MASK as a build bug?
>
> HPAGE_CACHE_INDEX_MASK shouldn't be a problem.
> If it's wrapped into 'if PageTransHugeCache(page)' or similar it will be
> eliminated by compiler if thp-pc disabled and build bug will not be
> triggered.
>
>>
>> >
>> > And do we really need a copy of truncate logic here? Is there a way to
>> > share code?
>> >
>> The truncate between tmpfs and general one is similar but not exactly
>> the same (no readahead), so share the whole function might not be a
>> good choice from the perspective of tmpfs? Anyway, there are other
>> similar functions in tmpfs, e.g. the one you mentioned for
>> shmem_add_to_page_cache. It is possible to share the code, I am just
>> worried it will make the logic more complicated?
>
> I think introducing thp-pc is good opportunity to refactor all these code.

I agree, I review the code of generate truncate and shmem_undo_range again.
There are just too many differences in almost every major piece of
logic. It's really
not possible to extract any meaningful common function to share between them.

And I agree, we will try to refactor any other functions which are
possible. Thanks!

>
> --
>  Kirill A. Shutemov

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2013-10-17 20:58 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2013-10-15  0:13 [PATCH 07/12] mm, thp, tmpfs: handle huge page in shmem_undo_range for truncate Ning Qu
2013-10-15 11:01 ` Kirill A. Shutemov
2013-10-15 18:48   ` Ning Qu
2013-10-16 12:09     ` Kirill A. Shutemov
2013-10-16 18:48       ` Ning Qu
2013-10-17 20:58       ` Ning Qu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).