All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] mm: Introduce page_size()
@ 2018-12-31 13:42 Matthew Wilcox
  2018-12-31 23:02 ` Kirill A. Shutemov
  2019-01-01  3:27   ` Aneesh Kumar K.V
  0 siblings, 2 replies; 24+ messages in thread
From: Matthew Wilcox @ 2018-12-31 13:42 UTC (permalink / raw)
  To: Andrew Morton, linux-mm; +Cc: Matthew Wilcox

It's unnecessarily hard to find out the size of a potentially huge page.
Replace 'PAGE_SIZE << compound_order(page)' with page_size(page).

Signed-off-by: Matthew Wilcox <willy@infradead.org>
---
 arch/arm/mm/flush.c                           | 3 +--
 arch/arm64/mm/flush.c                         | 3 +--
 arch/ia64/mm/init.c                           | 2 +-
 drivers/crypto/chelsio/chtls/chtls_io.c       | 5 ++---
 drivers/staging/android/ion/ion_system_heap.c | 4 ++--
 drivers/target/tcm_fc/tfc_io.c                | 3 +--
 include/linux/hugetlb.h                       | 2 +-
 include/linux/mm.h                            | 6 ++++++
 lib/iov_iter.c                                | 2 +-
 mm/kasan/kasan.c                              | 8 +++-----
 mm/nommu.c                                    | 2 +-
 mm/page_vma_mapped.c                          | 3 +--
 mm/rmap.c                                     | 4 ++--
 mm/slob.c                                     | 2 +-
 mm/slub.c                                     | 4 ++--
 net/xdp/xsk.c                                 | 2 +-
 16 files changed, 27 insertions(+), 28 deletions(-)

diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 58469623b0158..c68a120de28b4 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -207,8 +207,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
 	 * coherent with the kernels mapping.
 	 */
 	if (!PageHighMem(page)) {
-		size_t page_size = PAGE_SIZE << compound_order(page);
-		__cpuc_flush_dcache_area(page_address(page), page_size);
+		__cpuc_flush_dcache_area(page_address(page), page_size(page));
 	} else {
 		unsigned long i;
 		if (cache_is_vipt_nonaliasing()) {
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index 30695a8681074..9822bd6955429 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -63,8 +63,7 @@ void __sync_icache_dcache(pte_t pte)
 	struct page *page = pte_page(pte);
 
 	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
-		sync_icache_aliases(page_address(page),
-				    PAGE_SIZE << compound_order(page));
+		sync_icache_aliases(page_address(page), page_size(page));
 }
 EXPORT_SYMBOL_GPL(__sync_icache_dcache);
 
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index d5e12ff1d73cf..e31c578e9c96d 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -62,7 +62,7 @@ __ia64_sync_icache_dcache (pte_t pte)
 	if (test_bit(PG_arch_1, &page->flags))
 		return;				/* i-cache is already coherent with d-cache */
 
-	flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
+	flush_icache_range(addr, addr + page_size(page));
 	set_bit(PG_arch_1, &page->flags);	/* mark page as clean */
 }
 
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
index 18f553fcc1673..97bf5ba3a5439 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -1082,7 +1082,7 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
 			bool merge;
 
 			if (page)
-				pg_size <<= compound_order(page);
+				pg_size = page_size(page);
 			if (off < pg_size &&
 			    skb_can_coalesce(skb, i, page, off)) {
 				merge = 1;
@@ -1109,8 +1109,7 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
 							   __GFP_NORETRY,
 							   order);
 					if (page)
-						pg_size <<=
-							compound_order(page);
+						pg_size <<= order;
 				}
 				if (!page) {
 					page = alloc_page(gfp);
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index 548bb02c0ca6b..3ac7488d893b9 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -120,7 +120,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
 		if (!page)
 			goto free_pages;
 		list_add_tail(&page->lru, &pages);
-		size_remaining -= PAGE_SIZE << compound_order(page);
+		size_remaining -= page_size(page);
 		max_order = compound_order(page);
 		i++;
 	}
@@ -133,7 +133,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
 
 	sg = table->sgl;
 	list_for_each_entry_safe(page, tmp_page, &pages, lru) {
-		sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
+		sg_set_page(sg, page, page_size(page), 0);
 		sg = sg_next(sg);
 		list_del(&page->lru);
 	}
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 1eb1f58e00e49..83c1ec65dbccc 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -148,8 +148,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
 					   page, off_in_page, tlen);
 			fr_len(fp) += tlen;
 			fp_skb(fp)->data_len += tlen;
-			fp_skb(fp)->truesize +=
-					PAGE_SIZE << compound_order(page);
+			fp_skb(fp)->truesize += page_size(page);
 		} else {
 			BUG_ON(!page);
 			from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 087fd5f48c912..6140dc031b8c9 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -466,7 +466,7 @@ static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
 static inline struct hstate *page_hstate(struct page *page)
 {
 	VM_BUG_ON_PAGE(!PageHuge(page), page);
-	return size_to_hstate(PAGE_SIZE << compound_order(page));
+	return size_to_hstate(page_size(page));
 }
 
 static inline unsigned hstate_index_to_shift(unsigned index)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 5411de93a363e..e920ef9927539 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -712,6 +712,12 @@ static inline void set_compound_order(struct page *page, unsigned int order)
 	page[1].compound_order = order;
 }
 
+/* Returns the number of bytes in this potentially compound page. */
+static inline unsigned long page_size(struct page *page)
+{
+	return (unsigned long)PAGE_SIZE << compound_order(page);
+}
+
 void free_compound_page(struct page *page);
 
 #ifdef CONFIG_MMU
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 54c248526b55f..8910a368c3e1b 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -857,7 +857,7 @@ static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
 	struct page *head = compound_head(page);
 	size_t v = n + offset + page_address(page) - page_address(head);
 
-	if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head))))
+	if (likely(n <= v && v <= (page_size(head))))
 		return true;
 	WARN_ON(1);
 	return false;
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index c3bd5209da380..9d2c9b11b49e9 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -396,8 +396,7 @@ size_t kasan_metadata_size(struct kmem_cache *cache)
 
 void kasan_poison_slab(struct page *page)
 {
-	kasan_poison_shadow(page_address(page),
-			PAGE_SIZE << compound_order(page),
+	kasan_poison_shadow(page_address(page), page_size(page),
 			KASAN_KMALLOC_REDZONE);
 }
 
@@ -569,7 +568,7 @@ void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
 	page = virt_to_page(ptr);
 	redzone_start = round_up((unsigned long)(ptr + size),
 				KASAN_SHADOW_SCALE_SIZE);
-	redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
+	redzone_end = (unsigned long)ptr + page_size(page);
 
 	kasan_unpoison_shadow(ptr, size);
 	kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
@@ -602,8 +601,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip)
 			kasan_report_invalid_free(ptr, ip);
 			return;
 		}
-		kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
-				KASAN_FREE_PAGE);
+		kasan_poison_shadow(ptr, page_size(page), KASAN_FREE_PAGE);
 	} else {
 		__kasan_slab_free(page->slab_cache, ptr, ip, false);
 	}
diff --git a/mm/nommu.c b/mm/nommu.c
index 749276beb1094..1603132273db8 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -107,7 +107,7 @@ unsigned int kobjsize(const void *objp)
 	 * The ksize() function is only guaranteed to work for pointers
 	 * returned by kmalloc(). So handle arbitrary pointers here.
 	 */
-	return PAGE_SIZE << compound_order(page);
+	return page_size(page);
 }
 
 static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index 11df03e71288c..eff4b4520c8d5 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -153,8 +153,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
 
 	if (unlikely(PageHuge(pvmw->page))) {
 		/* when pud is not present, pte will be NULL */
-		pvmw->pte = huge_pte_offset(mm, pvmw->address,
-					    PAGE_SIZE << compound_order(page));
+		pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
 		if (!pvmw->pte)
 			return false;
 
diff --git a/mm/rmap.c b/mm/rmap.c
index 85b7f94233526..b177925c08401 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -896,7 +896,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
 	 * We have to assume the worse case ie pmd for invalidation. Note that
 	 * the page can not be free from this function.
 	 */
-	end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page)));
+	end = min(vma->vm_end, start + page_size(page));
 	mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
 
 	while (page_vma_mapped_walk(&pvmw)) {
@@ -1369,7 +1369,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
 	 * Note that the page can not be free in this function as call of
 	 * try_to_unmap() must hold a reference on the page.
 	 */
-	end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page)));
+	end = min(vma->vm_end, start + page_size(page));
 	if (PageHuge(page)) {
 		/*
 		 * If sharing is possible, start and end will be adjusted
diff --git a/mm/slob.c b/mm/slob.c
index 307c2c9feb441..d7d3429e07e1a 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -516,7 +516,7 @@ size_t ksize(const void *block)
 
 	sp = virt_to_page(block);
 	if (unlikely(!PageSlab(sp)))
-		return PAGE_SIZE << compound_order(sp);
+		return page_size(sp);
 
 	align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
 	m = (unsigned int *)(block - align);
diff --git a/mm/slub.c b/mm/slub.c
index e3629cd7aff16..274fab6581e7c 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -830,7 +830,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
 		return 1;
 
 	start = page_address(page);
-	length = PAGE_SIZE << compound_order(page);
+	length = page_size(page);
 	end = start + length;
 	remainder = length % s->size;
 	if (!remainder)
@@ -3905,7 +3905,7 @@ static size_t __ksize(const void *object)
 
 	if (unlikely(!PageSlab(page))) {
 		WARN_ON(!PageCompound(page));
-		return PAGE_SIZE << compound_order(page);
+		return page_size(page);
 	}
 
 	return slab_ksize(page->slab_cache);
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index a03268454a276..902cd2e7b0189 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -679,7 +679,7 @@ static int xsk_mmap(struct file *file, struct socket *sock,
 		return -EINVAL;
 
 	qpg = virt_to_head_page(q->ring);
-	if (size > (PAGE_SIZE << compound_order(qpg)))
+	if (size > page_size(qpg))
 		return -EINVAL;
 
 	pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
-- 
2.19.2

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* Re: [PATCH] mm: Introduce page_size()
  2018-12-31 13:42 [PATCH] mm: Introduce page_size() Matthew Wilcox
@ 2018-12-31 23:02 ` Kirill A. Shutemov
  2019-01-01  6:39   ` Matthew Wilcox
  2019-01-01  3:27   ` Aneesh Kumar K.V
  1 sibling, 1 reply; 24+ messages in thread
From: Kirill A. Shutemov @ 2018-12-31 23:02 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: Andrew Morton, linux-mm

On Mon, Dec 31, 2018 at 05:42:23AM -0800, Matthew Wilcox wrote:
> It's unnecessarily hard to find out the size of a potentially huge page.
> Replace 'PAGE_SIZE << compound_order(page)' with page_size(page).

Good idea.

Should we add page_mask() and page_shift() too?

-- 
 Kirill A. Shutemov

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH] mm: Introduce page_size()
@ 2019-01-01  3:27   ` Aneesh Kumar K.V
  0 siblings, 0 replies; 24+ messages in thread
From: Aneesh Kumar K.V @ 2019-01-01  3:27 UTC (permalink / raw)
  To: Matthew Wilcox, Andrew Morton, linux-mm

Matthew Wilcox <willy@infradead.org> writes:


>  static inline unsigned hstate_index_to_shift(unsigned index)
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 5411de93a363e..e920ef9927539 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -712,6 +712,12 @@ static inline void set_compound_order(struct page *page, unsigned int order)
>  	page[1].compound_order = order;
>  }
>  
> +/* Returns the number of bytes in this potentially compound page. */
> +static inline unsigned long page_size(struct page *page)
> +{
> +	return (unsigned long)PAGE_SIZE << compound_order(page);
> +}
> +


How about compound_page_size() to make it clear this is for
compound_pages? Should we make it work with Tail pages by doing
compound_head(page)?


-aneesh

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH] mm: Introduce page_size()
@ 2019-01-01  3:27   ` Aneesh Kumar K.V
  0 siblings, 0 replies; 24+ messages in thread
From: Aneesh Kumar K.V @ 2019-01-01  3:27 UTC (permalink / raw)
  To: Matthew Wilcox, Andrew Morton, linux-mm; +Cc: Matthew Wilcox

Matthew Wilcox <willy@infradead.org> writes:


>  static inline unsigned hstate_index_to_shift(unsigned index)
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 5411de93a363e..e920ef9927539 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -712,6 +712,12 @@ static inline void set_compound_order(struct page *page, unsigned int order)
>  	page[1].compound_order = order;
>  }
>  
> +/* Returns the number of bytes in this potentially compound page. */
> +static inline unsigned long page_size(struct page *page)
> +{
> +	return (unsigned long)PAGE_SIZE << compound_order(page);
> +}
> +


How about compound_page_size() to make it clear this is for
compound_pages? Should we make it work with Tail pages by doing
compound_head(page)?


-aneesh


^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH] mm: Introduce page_size()
  2019-01-01  3:27   ` Aneesh Kumar K.V
  (?)
@ 2019-01-01  6:30   ` Matthew Wilcox
  2019-01-01 10:11     ` Aneesh Kumar K.V
  2019-01-01 10:15     ` Aneesh Kumar K.V
  -1 siblings, 2 replies; 24+ messages in thread
From: Matthew Wilcox @ 2019-01-01  6:30 UTC (permalink / raw)
  To: Aneesh Kumar K.V; +Cc: Andrew Morton, linux-mm

On Tue, Jan 01, 2019 at 08:57:53AM +0530, Aneesh Kumar K.V wrote:
> Matthew Wilcox <willy@infradead.org> writes:
> > +/* Returns the number of bytes in this potentially compound page. */
> > +static inline unsigned long page_size(struct page *page)
> > +{
> > +	return (unsigned long)PAGE_SIZE << compound_order(page);
> > +}
> > +
> 
> How about compound_page_size() to make it clear this is for
> compound_pages? Should we make it work with Tail pages by doing
> compound_head(page)?

I think that's a terrible idea.  Actually, I think the whole way we handle
compound pages is terrible; we should only ever see head pages.  Doing
page cache lookups should only give us head pages.  Calling pfn_to_page()
should give us the head page.  We should only put head pages into SG lists.
Everywhere you see a struct page should only be a head page.

I know we're far from that today, and there's lots of work to be done
to get there.  But the current state of handling compound pages is awful
and confusing.

Also, page_size() isn't just for compound pages.  It works for regular
pages too.  I'd be open to putting a VM_BUG_ON(PageTail(page)) in it
to catch people who misuse it.

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH] mm: Introduce page_size()
  2018-12-31 23:02 ` Kirill A. Shutemov
@ 2019-01-01  6:39   ` Matthew Wilcox
  2019-01-01 20:11     ` Zi Yan
  0 siblings, 1 reply; 24+ messages in thread
From: Matthew Wilcox @ 2019-01-01  6:39 UTC (permalink / raw)
  To: Kirill A. Shutemov; +Cc: Andrew Morton, linux-mm

On Tue, Jan 01, 2019 at 02:02:22AM +0300, Kirill A. Shutemov wrote:
> On Mon, Dec 31, 2018 at 05:42:23AM -0800, Matthew Wilcox wrote:
> > It's unnecessarily hard to find out the size of a potentially huge page.
> > Replace 'PAGE_SIZE << compound_order(page)' with page_size(page).
> 
> Good idea.
> 
> Should we add page_mask() and page_shift() too?

I'm not opposed to that at all.  I also have a patch to add compound_nr():

+/* Returns the number of pages in this potentially compound page. */
+static inline unsigned long compound_nr(struct page *page)
+{
+       return 1UL << compound_order(page);
+}

I just haven't sent it yet ;-)  It should, perhaps, be called page_count()
or nr_pages() or something.  That covers most of the remaining users of
compound_order() which look awkward.

PAGE_MASK (and its HPAGE counterparts) always confuses me because it's
a mask which returns the upper bits rather than one which returns the
lower bits.

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH] mm: Introduce page_size()
  2019-01-01  6:30   ` Matthew Wilcox
@ 2019-01-01 10:11     ` Aneesh Kumar K.V
  2019-01-02  3:14       ` Matthew Wilcox
  2019-01-01 10:15     ` Aneesh Kumar K.V
  1 sibling, 1 reply; 24+ messages in thread
From: Aneesh Kumar K.V @ 2019-01-01 10:11 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: Andrew Morton, linux-mm

Matthew Wilcox <willy@infradead.org> writes:

> On Tue, Jan 01, 2019 at 08:57:53AM +0530, Aneesh Kumar K.V wrote:
>> Matthew Wilcox <willy@infradead.org> writes:
>> > +/* Returns the number of bytes in this potentially compound page. */
>> > +static inline unsigned long page_size(struct page *page)
>> > +{
>> > +	return (unsigned long)PAGE_SIZE << compound_order(page);
>> > +}
>> > +
>> 
>> How about compound_page_size() to make it clear this is for
>> compound_pages? Should we make it work with Tail pages by doing
>> compound_head(page)?
>
> I think that's a terrible idea.  Actually, I think the whole way we handle
> compound pages is terrible; we should only ever see head pages.  Doing
> page cache lookups should only give us head pages.  Calling pfn_to_page()
> should give us the head page.  We should only put head pages into SG lists.
> Everywhere you see a struct page should only be a head page.
>
> I know we're far from that today, and there's lots of work to be done
> to get there.  But the current state of handling compound pages is awful
> and confusing.
>
> Also, page_size() isn't just for compound pages.  It works for regular
> pages too.  I'd be open to putting a VM_BUG_ON(PageTail(page)) in it
> to catch people who misuse it.

Adding VM_BUG_ON is a good idea.

Thanks,
-aneesh

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH] mm: Introduce page_size()
  2019-01-01  6:30   ` Matthew Wilcox
  2019-01-01 10:11     ` Aneesh Kumar K.V
@ 2019-01-01 10:15     ` Aneesh Kumar K.V
  1 sibling, 0 replies; 24+ messages in thread
From: Aneesh Kumar K.V @ 2019-01-01 10:15 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: Andrew Morton, linux-mm

Matthew Wilcox <willy@infradead.org> writes:

> On Tue, Jan 01, 2019 at 08:57:53AM +0530, Aneesh Kumar K.V wrote:
>> Matthew Wilcox <willy@infradead.org> writes:
>> > +/* Returns the number of bytes in this potentially compound page. */
>> > +static inline unsigned long page_size(struct page *page)
>> > +{
>> > +	return (unsigned long)PAGE_SIZE << compound_order(page);
>> > +}
>> > +
>> 
>> How about compound_page_size() to make it clear this is for
>> compound_pages? Should we make it work with Tail pages by doing
>> compound_head(page)?
>
> I think that's a terrible idea.  Actually, I think the whole way we handle
> compound pages is terrible; we should only ever see head pages.  Doing
> page cache lookups should only give us head pages.  Calling pfn_to_page()
> should give us the head page.  We should only put head pages into SG lists.
> Everywhere you see a struct page should only be a head page.
>
> I know we're far from that today, and there's lots of work to be done
> to get there.  But the current state of handling compound pages is awful
> and confusing.

One exception is THP compound pages which can also be mapped as regular
pages in some page tables?

-aneesh

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH] mm: Introduce page_size()
  2019-01-01  6:39   ` Matthew Wilcox
@ 2019-01-01 20:11     ` Zi Yan
  2019-01-02  0:58       ` Matthew Wilcox
  0 siblings, 1 reply; 24+ messages in thread
From: Zi Yan @ 2019-01-01 20:11 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: Kirill A. Shutemov, Andrew Morton, linux-mm

[-- Attachment #1: Type: text/plain, Size: 982 bytes --]

On 1 Jan 2019, at 1:39, Matthew Wilcox wrote:

> On Tue, Jan 01, 2019 at 02:02:22AM +0300, Kirill A. Shutemov wrote:
>> On Mon, Dec 31, 2018 at 05:42:23AM -0800, Matthew Wilcox wrote:
>>> It's unnecessarily hard to find out the size of a potentially huge page.
>>> Replace 'PAGE_SIZE << compound_order(page)' with page_size(page).
>>
>> Good idea.
>>
>> Should we add page_mask() and page_shift() too?
>
> I'm not opposed to that at all.  I also have a patch to add compound_nr():
>
> +/* Returns the number of pages in this potentially compound page. */
> +static inline unsigned long compound_nr(struct page *page)
> +{
> +       return 1UL << compound_order(page);
> +}
>
> I just haven't sent it yet ;-)  It should, perhaps, be called page_count()
> or nr_pages() or something.  That covers most of the remaining users of
> compound_order() which look awkward.

We already have hpage_nr_pages() to show the number of pages. Why do we need
another one?


--
Best Regards,
Yan Zi

[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 516 bytes --]

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH] mm: Introduce page_size()
  2019-01-01 20:11     ` Zi Yan
@ 2019-01-02  0:58       ` Matthew Wilcox
  2019-01-02  1:16         ` Zi Yan
  0 siblings, 1 reply; 24+ messages in thread
From: Matthew Wilcox @ 2019-01-02  0:58 UTC (permalink / raw)
  To: Zi Yan; +Cc: Kirill A. Shutemov, Andrew Morton, linux-mm

On Tue, Jan 01, 2019 at 03:11:04PM -0500, Zi Yan wrote:
> On 1 Jan 2019, at 1:39, Matthew Wilcox wrote:
> 
> > On Tue, Jan 01, 2019 at 02:02:22AM +0300, Kirill A. Shutemov wrote:
> >> On Mon, Dec 31, 2018 at 05:42:23AM -0800, Matthew Wilcox wrote:
> >>> It's unnecessarily hard to find out the size of a potentially huge page.
> >>> Replace 'PAGE_SIZE << compound_order(page)' with page_size(page).
> >>
> >> Good idea.
> >>
> >> Should we add page_mask() and page_shift() too?
> >
> > I'm not opposed to that at all.  I also have a patch to add compound_nr():
> >
> > +/* Returns the number of pages in this potentially compound page. */
> > +static inline unsigned long compound_nr(struct page *page)
> > +{
> > +       return 1UL << compound_order(page);
> > +}
> >
> > I just haven't sent it yet ;-)  It should, perhaps, be called page_count()
> > or nr_pages() or something.  That covers most of the remaining users of
> > compound_order() which look awkward.
> 
> We already have hpage_nr_pages() to show the number of pages. Why do we need
> another one?

Not all compound pages are PMD sized.

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH] mm: Introduce page_size()
  2019-01-02  0:58       ` Matthew Wilcox
@ 2019-01-02  1:16         ` Zi Yan
  0 siblings, 0 replies; 24+ messages in thread
From: Zi Yan @ 2019-01-02  1:16 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: Kirill A. Shutemov, Andrew Morton, linux-mm

[-- Attachment #1: Type: text/plain, Size: 1344 bytes --]

On 1 Jan 2019, at 19:58, Matthew Wilcox wrote:

> On Tue, Jan 01, 2019 at 03:11:04PM -0500, Zi Yan wrote:
>> On 1 Jan 2019, at 1:39, Matthew Wilcox wrote:
>>
>>> On Tue, Jan 01, 2019 at 02:02:22AM +0300, Kirill A. Shutemov wrote:
>>>> On Mon, Dec 31, 2018 at 05:42:23AM -0800, Matthew Wilcox wrote:
>>>>> It's unnecessarily hard to find out the size of a potentially huge page.
>>>>> Replace 'PAGE_SIZE << compound_order(page)' with page_size(page).
>>>>
>>>> Good idea.
>>>>
>>>> Should we add page_mask() and page_shift() too?
>>>
>>> I'm not opposed to that at all.  I also have a patch to add compound_nr():
>>>
>>> +/* Returns the number of pages in this potentially compound page. */
>>> +static inline unsigned long compound_nr(struct page *page)
>>> +{
>>> +       return 1UL << compound_order(page);
>>> +}
>>>
>>> I just haven't sent it yet ;-)  It should, perhaps, be called page_count()
>>> or nr_pages() or something.  That covers most of the remaining users of
>>> compound_order() which look awkward.
>>
>> We already have hpage_nr_pages() to show the number of pages. Why do we need
>> another one?
>
> Not all compound pages are PMD sized.

Right, and THPs are also compound pages. Maybe using your compound_nr() in
hpage_nr_pages() to factor out the common code if compound_nr() is going to
be added?

--
Best Regards,
Yan Zi

[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 516 bytes --]

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH] mm: Introduce page_size()
  2019-01-01 10:11     ` Aneesh Kumar K.V
@ 2019-01-02  3:14       ` Matthew Wilcox
  2019-01-02 11:46         ` William Kucharski
  0 siblings, 1 reply; 24+ messages in thread
From: Matthew Wilcox @ 2019-01-02  3:14 UTC (permalink / raw)
  To: Aneesh Kumar K.V; +Cc: Andrew Morton, linux-mm

On Tue, Jan 01, 2019 at 03:41:00PM +0530, Aneesh Kumar K.V wrote:
> Matthew Wilcox <willy@infradead.org> writes:
> > On Tue, Jan 01, 2019 at 08:57:53AM +0530, Aneesh Kumar K.V wrote:
> >> Matthew Wilcox <willy@infradead.org> writes:
> >> > +/* Returns the number of bytes in this potentially compound page. */
> >> > +static inline unsigned long page_size(struct page *page)
> >> > +{
> >> > +	return (unsigned long)PAGE_SIZE << compound_order(page);
> >> > +}
> >> > +
> >> 
> >> How about compound_page_size() to make it clear this is for
> >> compound_pages? Should we make it work with Tail pages by doing
> >> compound_head(page)?
> >
> > I think that's a terrible idea.  Actually, I think the whole way we handle
> > compound pages is terrible; we should only ever see head pages.  Doing
> > page cache lookups should only give us head pages.  Calling pfn_to_page()
> > should give us the head page.  We should only put head pages into SG lists.
> > Everywhere you see a struct page should only be a head page.
> >
> > I know we're far from that today, and there's lots of work to be done
> > to get there.  But the current state of handling compound pages is awful
> > and confusing.
> >
> > Also, page_size() isn't just for compound pages.  It works for regular
> > pages too.  I'd be open to putting a VM_BUG_ON(PageTail(page)) in it
> > to catch people who misuse it.
> 
> Adding VM_BUG_ON is a good idea.

I'm no longer sure about that.  If someone has a tail page and asks for
page_size(page), I think they want to get PAGE_SIZE back.  Just look at the current users in that patch; they all process page_size() number of bytes, then
move on to the next struct page.

If they somehow happen to have a tail page, then we want them to process
PAGE_SIZE bytes at a time, then move onto the next page, until they hit
a head page.  If calling page_size() on a tail page returned the size
of the entire compound page, then it would process some bytes from pages
which weren't part of this compound page.

So I think the current definition of page_size() is right.

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH] mm: Introduce page_size()
  2019-01-02  3:14       ` Matthew Wilcox
@ 2019-01-02 11:46         ` William Kucharski
  2019-01-02 13:09           ` Matthew Wilcox
  0 siblings, 1 reply; 24+ messages in thread
From: William Kucharski @ 2019-01-02 11:46 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: Aneesh Kumar K.V, Andrew Morton, linux-mm

It's tricky, simply because if someone doesn't know the size of their
current page, they would generally want to know what size the current
page is mapped as, based upon what is currently extant within that address
space.

So for example, assuming read-only pages, if an as has a PMD-sized THP
mapped, it seems as if page_size() for any address within that PMD
address range should return the PMD size as compound page head/tail is
an implementation issue, not a VM one per se.

On the other hand, if another as has a portion of the physical space
the THP occupies mapped as a PAGESIZE page, a page_size() for and address
within that range should return PAGESIZE.

Forgive me if I'm being impossibly naive here.

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH] mm: Introduce page_size()
  2019-01-02 11:46         ` William Kucharski
@ 2019-01-02 13:09           ` Matthew Wilcox
  2019-01-03 10:47             ` William Kucharski
  0 siblings, 1 reply; 24+ messages in thread
From: Matthew Wilcox @ 2019-01-02 13:09 UTC (permalink / raw)
  To: William Kucharski; +Cc: Aneesh Kumar K.V, Andrew Morton, linux-mm

On Wed, Jan 02, 2019 at 04:46:27AM -0700, William Kucharski wrote:
> It's tricky, simply because if someone doesn't know the size of their
> current page, they would generally want to know what size the current
> page is mapped as, based upon what is currently extant within that address
> space.

I'm not sure I agree with that.  It's going to depend on exactly what this
code is doing; I can definitely see there being places in the VM where we
care about how this page is currently mapped, but I think those places
are probably using the wrong interface (get_user_pages()) and should
really be using an interface which doesn't exist yet (get_user_sg()).

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH] mm: Introduce page_size()
  2019-01-02 13:09           ` Matthew Wilcox
@ 2019-01-03 10:47             ` William Kucharski
  0 siblings, 0 replies; 24+ messages in thread
From: William Kucharski @ 2019-01-03 10:47 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: Aneesh Kumar K.V, Andrew Morton, linux-mm



> On Jan 2, 2019, at 6:09 AM, Matthew Wilcox <willy@infradead.org> wrote:
> 
> I'm not sure I agree with that.  It's going to depend on exactly what this
> code is doing; I can definitely see there being places in the VM where we
> care about how this page is currently mapped, but I think those places
> are probably using the wrong interface (get_user_pages()) and should
> really be using an interface which doesn't exist yet (get_user_sg()).

Fair enough; I also agree the VM_BUG_ON for tail pages might be a good safety
measure, at least to see if anyone ends up calling page_size() that way at present.

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH] mm: Introduce page_size()
  2019-05-23 21:44         ` Matthew Wilcox
@ 2019-05-24  6:34           ` Christoph Hellwig
  0 siblings, 0 replies; 24+ messages in thread
From: Christoph Hellwig @ 2019-05-24  6:34 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: Andrew Morton, Kirill Tkhai, linux-mm

On Thu, May 23, 2019 at 02:44:02PM -0700, Matthew Wilcox wrote:
> > I think you'll find that PAGE_SIZE is unsigned long on all
> > architectures.
> 
> arch/openrisc/include/asm/page.h:#define PAGE_SIZE       (1 << PAGE_SHIFT)

Well, the whole context is:

ifdef __ASSEMBLY__
#define PAGE_SIZE       (1 << PAGE_SHIFT)
#else
#define PAGE_SIZE       (1UL << PAGE_SHIFT)
#endif

Which reminds me that there is absolutely not point in letting
architectures even defined this.

Add a Kconfig PAGE_SHIFT symbol, and let common code define
PAGE_SHIFT/PAGE_SIZE/PAGE_MASK..


^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH] mm: Introduce page_size()
  2019-05-23 21:33       ` Andrew Morton
@ 2019-05-23 21:44         ` Matthew Wilcox
  2019-05-24  6:34           ` Christoph Hellwig
  0 siblings, 1 reply; 24+ messages in thread
From: Matthew Wilcox @ 2019-05-23 21:44 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Kirill Tkhai, linux-mm

On Thu, May 23, 2019 at 02:33:15PM -0700, Andrew Morton wrote:
> On Wed, 22 May 2019 18:55:11 -0700 Matthew Wilcox <willy@infradead.org> wrote:
> 
> > > > +	return (unsigned long)PAGE_SIZE << compound_order(page);
> > > > + }
> > > 
> > > Also, I suspect the cast here is unneeded.  Architectures used to
> > > differe in the type of PAGE_SIZE but please tell me that's been fixed
> > > for a lomng time...
> > 
> > It's an unsigned int for most, if not all architectures.  For, eg,
> > PowerPC, a PUD page is larger than 4GB.  So let's just include the cast
> > and not have to worry about undefined semantics screwing us over.
> 
> I think you'll find that PAGE_SIZE is unsigned long on all
> architectures.

arch/openrisc/include/asm/page.h:#define PAGE_SIZE       (1 << PAGE_SHIFT)

The others are a miscellany of different defines, but I think you're
right for every other architecture.


^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH] mm: Introduce page_size()
  2019-05-23  1:55     ` Matthew Wilcox
@ 2019-05-23 21:33       ` Andrew Morton
  2019-05-23 21:44         ` Matthew Wilcox
  0 siblings, 1 reply; 24+ messages in thread
From: Andrew Morton @ 2019-05-23 21:33 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: Kirill Tkhai, linux-mm

On Wed, 22 May 2019 18:55:11 -0700 Matthew Wilcox <willy@infradead.org> wrote:

> > > +	return (unsigned long)PAGE_SIZE << compound_order(page);
> > > + }
> > 
> > Also, I suspect the cast here is unneeded.  Architectures used to
> > differe in the type of PAGE_SIZE but please tell me that's been fixed
> > for a lomng time...
> 
> It's an unsigned int for most, if not all architectures.  For, eg,
> PowerPC, a PUD page is larger than 4GB.  So let's just include the cast
> and not have to worry about undefined semantics screwing us over.

I think you'll find that PAGE_SIZE is unsigned long on all
architectures.


^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH] mm: Introduce page_size()
  2019-05-22 20:03   ` Andrew Morton
@ 2019-05-23  1:55     ` Matthew Wilcox
  2019-05-23 21:33       ` Andrew Morton
  0 siblings, 1 reply; 24+ messages in thread
From: Matthew Wilcox @ 2019-05-23  1:55 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Kirill Tkhai, linux-mm

On Wed, May 22, 2019 at 01:03:18PM -0700, Andrew Morton wrote:
> On Mon, 13 May 2019 15:43:08 +0300 Kirill Tkhai <ktkhai@virtuozzo.com> wrote:
> > > +/*
> > > + * Returns the number of bytes in this potentially compound page.
> > > + * Must be called with the head page, not a tail page.
> > > + */
> > > +static inline unsigned long page_size(struct page *page)
> > > +{
> > 
> > Maybe we should underline commented head page limitation with VM_BUG_ON()?
> 
> VM_WARN_ONCE() if poss, please.
> 
> The code bloatage from that is likely to be distressing.  Perhaps
> adding an out-of-line compound_order_head_only() for this reason would
> help.  In which case, just uninline the whole thing...

I think this is unnecessary.  Nobody's currently calling the code it
replaces on a tail page, and the plan is to reduce or eliminate the
amount of places that parts of the system see tail pages.  I strongly
oppose adding any kind of check here.

> > +	return (unsigned long)PAGE_SIZE << compound_order(page);
> > + }
> 
> Also, I suspect the cast here is unneeded.  Architectures used to
> differe in the type of PAGE_SIZE but please tell me that's been fixed
> for a lomng time...

It's an unsigned int for most, if not all architectures.  For, eg,
PowerPC, a PUD page is larger than 4GB.  So let's just include the cast
and not have to worry about undefined semantics screwing us over.


^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH] mm: Introduce page_size()
  2019-05-13 12:43 ` Kirill Tkhai
  2019-05-14 11:53   ` William Kucharski
@ 2019-05-22 20:03   ` Andrew Morton
  2019-05-23  1:55     ` Matthew Wilcox
  1 sibling, 1 reply; 24+ messages in thread
From: Andrew Morton @ 2019-05-22 20:03 UTC (permalink / raw)
  To: Kirill Tkhai; +Cc: Matthew Wilcox, linux-mm

On Mon, 13 May 2019 15:43:08 +0300 Kirill Tkhai <ktkhai@virtuozzo.com> wrote:

> > +/*
> > + * Returns the number of bytes in this potentially compound page.
> > + * Must be called with the head page, not a tail page.
> > + */
> > +static inline unsigned long page_size(struct page *page)
> > +{
> 
> Maybe we should underline commented head page limitation with VM_BUG_ON()?

VM_WARN_ONCE() if poss, please.

The code bloatage from that is likely to be distressing.  Perhaps
adding an out-of-line compound_order_head_only() for this reason would
help.  In which case, just uninline the whole thing...

> +	return (unsigned long)PAGE_SIZE << compound_order(page);
> + }

Also, I suspect the cast here is unneeded.  Architectures used to
differe in the type of PAGE_SIZE but please tell me that's been fixed
for a lomng time...


^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH] mm: Introduce page_size()
  2019-05-13 12:43 ` Kirill Tkhai
@ 2019-05-14 11:53   ` William Kucharski
  2019-05-22 20:03   ` Andrew Morton
  1 sibling, 0 replies; 24+ messages in thread
From: William Kucharski @ 2019-05-14 11:53 UTC (permalink / raw)
  To: Kirill Tkhai; +Cc: Matthew Wilcox, linux-mm



> On May 13, 2019, at 6:43 AM, Kirill Tkhai <ktkhai@virtuozzo.com> wrote:
> 
> Hi, Matthew,
> 
> Maybe we should underline commented head page limitation with VM_BUG_ON()?
> 
> Kirill

I like that idea as well; even if all the present callers are well-vetted, it's
inevitable someone will come along and call page_size() without reading the
head-only comment first.



^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH] mm: Introduce page_size()
  2019-05-10 18:12 Matthew Wilcox
  2019-05-13 10:56 ` Michal Hocko
@ 2019-05-13 12:43 ` Kirill Tkhai
  2019-05-14 11:53   ` William Kucharski
  2019-05-22 20:03   ` Andrew Morton
  1 sibling, 2 replies; 24+ messages in thread
From: Kirill Tkhai @ 2019-05-13 12:43 UTC (permalink / raw)
  To: Matthew Wilcox, linux-mm

Hi, Matthew,

On 10.05.2019 21:12, Matthew Wilcox wrote:
> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
> 
> It's unnecessarily hard to find out the size of a potentially large page.
> Replace 'PAGE_SIZE << compound_order(page)' with 'page_size(page)'.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>  arch/arm/mm/flush.c                           | 3 +--
>  arch/arm64/mm/flush.c                         | 3 +--
>  arch/ia64/mm/init.c                           | 2 +-
>  drivers/staging/android/ion/ion_system_heap.c | 4 ++--
>  drivers/target/tcm_fc/tfc_io.c                | 3 +--
>  fs/io_uring.c                                 | 2 +-
>  include/linux/hugetlb.h                       | 2 +-
>  include/linux/mm.h                            | 9 +++++++++
>  lib/iov_iter.c                                | 2 +-
>  mm/kasan/common.c                             | 8 +++-----
>  mm/nommu.c                                    | 2 +-
>  mm/page_vma_mapped.c                          | 3 +--
>  mm/rmap.c                                     | 6 ++----
>  mm/slob.c                                     | 2 +-
>  mm/slub.c                                     | 4 ++--
>  net/xdp/xsk.c                                 | 2 +-
>  16 files changed, 29 insertions(+), 28 deletions(-)
> 
> diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
> index 58469623b015..c68a120de28b 100644
> --- a/arch/arm/mm/flush.c
> +++ b/arch/arm/mm/flush.c
> @@ -207,8 +207,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
>  	 * coherent with the kernels mapping.
>  	 */
>  	if (!PageHighMem(page)) {
> -		size_t page_size = PAGE_SIZE << compound_order(page);
> -		__cpuc_flush_dcache_area(page_address(page), page_size);
> +		__cpuc_flush_dcache_area(page_address(page), page_size(page));
>  	} else {
>  		unsigned long i;
>  		if (cache_is_vipt_nonaliasing()) {
> diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
> index 5c9073bace83..280fdbc3bfa5 100644
> --- a/arch/arm64/mm/flush.c
> +++ b/arch/arm64/mm/flush.c
> @@ -67,8 +67,7 @@ void __sync_icache_dcache(pte_t pte)
>  	struct page *page = pte_page(pte);
>  
>  	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
> -		sync_icache_aliases(page_address(page),
> -				    PAGE_SIZE << compound_order(page));
> +		sync_icache_aliases(page_address(page), page_size(page));
>  }
>  EXPORT_SYMBOL_GPL(__sync_icache_dcache);
>  
> diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
> index d28e29103bdb..cc4061cd9899 100644
> --- a/arch/ia64/mm/init.c
> +++ b/arch/ia64/mm/init.c
> @@ -63,7 +63,7 @@ __ia64_sync_icache_dcache (pte_t pte)
>  	if (test_bit(PG_arch_1, &page->flags))
>  		return;				/* i-cache is already coherent with d-cache */
>  
> -	flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
> +	flush_icache_range(addr, addr + page_size(page));
>  	set_bit(PG_arch_1, &page->flags);	/* mark page as clean */
>  }
>  
> diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
> index aa8d8425be25..b83a1d16bd89 100644
> --- a/drivers/staging/android/ion/ion_system_heap.c
> +++ b/drivers/staging/android/ion/ion_system_heap.c
> @@ -120,7 +120,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
>  		if (!page)
>  			goto free_pages;
>  		list_add_tail(&page->lru, &pages);
> -		size_remaining -= PAGE_SIZE << compound_order(page);
> +		size_remaining -= page_size(page);
>  		max_order = compound_order(page);
>  		i++;
>  	}
> @@ -133,7 +133,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
>  
>  	sg = table->sgl;
>  	list_for_each_entry_safe(page, tmp_page, &pages, lru) {
> -		sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
> +		sg_set_page(sg, page, page_size(page), 0);
>  		sg = sg_next(sg);
>  		list_del(&page->lru);
>  	}
> diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
> index 1eb1f58e00e4..83c1ec65dbcc 100644
> --- a/drivers/target/tcm_fc/tfc_io.c
> +++ b/drivers/target/tcm_fc/tfc_io.c
> @@ -148,8 +148,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
>  					   page, off_in_page, tlen);
>  			fr_len(fp) += tlen;
>  			fp_skb(fp)->data_len += tlen;
> -			fp_skb(fp)->truesize +=
> -					PAGE_SIZE << compound_order(page);
> +			fp_skb(fp)->truesize += page_size(page);
>  		} else {
>  			BUG_ON(!page);
>  			from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
> diff --git a/fs/io_uring.c b/fs/io_uring.c
> index fdc18321d70c..2c37da095517 100644
> --- a/fs/io_uring.c
> +++ b/fs/io_uring.c
> @@ -2891,7 +2891,7 @@ static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
>  	}
>  
>  	page = virt_to_head_page(ptr);
> -	if (sz > (PAGE_SIZE << compound_order(page)))
> +	if (sz > page_size(page))
>  		return -EINVAL;
>  
>  	pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
> diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
> index edf476c8cfb9..2e909072a41f 100644
> --- a/include/linux/hugetlb.h
> +++ b/include/linux/hugetlb.h
> @@ -472,7 +472,7 @@ static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
>  static inline struct hstate *page_hstate(struct page *page)
>  {
>  	VM_BUG_ON_PAGE(!PageHuge(page), page);
> -	return size_to_hstate(PAGE_SIZE << compound_order(page));
> +	return size_to_hstate(page_size(page));
>  }
>  
>  static inline unsigned hstate_index_to_shift(unsigned index)
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 0e8834ac32b7..0208f77bab63 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -772,6 +772,15 @@ static inline void set_compound_order(struct page *page, unsigned int order)
>  	page[1].compound_order = order;
>  }
>  
> +/*
> + * Returns the number of bytes in this potentially compound page.
> + * Must be called with the head page, not a tail page.
> + */
> +static inline unsigned long page_size(struct page *page)
> +{

Maybe we should underline commented head page limitation with VM_BUG_ON()?

Kirill


^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH] mm: Introduce page_size()
  2019-05-10 18:12 Matthew Wilcox
@ 2019-05-13 10:56 ` Michal Hocko
  2019-05-13 12:43 ` Kirill Tkhai
  1 sibling, 0 replies; 24+ messages in thread
From: Michal Hocko @ 2019-05-13 10:56 UTC (permalink / raw)
  To: Matthew Wilcox; +Cc: linux-mm

On Fri 10-05-19 11:12:42, Matthew Wilcox wrote:
> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
> 
> It's unnecessarily hard to find out the size of a potentially large page.
> Replace 'PAGE_SIZE << compound_order(page)' with 'page_size(page)'.

I like the new helper. The conversion looks like something for
coccinelle.

> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

I haven't checked for other potential places to convert but the ones in
the patch looks ok to me.

Acked-by: Michal Hocko <mhocko@suse.com>

> ---
>  arch/arm/mm/flush.c                           | 3 +--
>  arch/arm64/mm/flush.c                         | 3 +--
>  arch/ia64/mm/init.c                           | 2 +-
>  drivers/staging/android/ion/ion_system_heap.c | 4 ++--
>  drivers/target/tcm_fc/tfc_io.c                | 3 +--
>  fs/io_uring.c                                 | 2 +-
>  include/linux/hugetlb.h                       | 2 +-
>  include/linux/mm.h                            | 9 +++++++++
>  lib/iov_iter.c                                | 2 +-
>  mm/kasan/common.c                             | 8 +++-----
>  mm/nommu.c                                    | 2 +-
>  mm/page_vma_mapped.c                          | 3 +--
>  mm/rmap.c                                     | 6 ++----
>  mm/slob.c                                     | 2 +-
>  mm/slub.c                                     | 4 ++--
>  net/xdp/xsk.c                                 | 2 +-
>  16 files changed, 29 insertions(+), 28 deletions(-)
> 
> diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
> index 58469623b015..c68a120de28b 100644
> --- a/arch/arm/mm/flush.c
> +++ b/arch/arm/mm/flush.c
> @@ -207,8 +207,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
>  	 * coherent with the kernels mapping.
>  	 */
>  	if (!PageHighMem(page)) {
> -		size_t page_size = PAGE_SIZE << compound_order(page);
> -		__cpuc_flush_dcache_area(page_address(page), page_size);
> +		__cpuc_flush_dcache_area(page_address(page), page_size(page));
>  	} else {
>  		unsigned long i;
>  		if (cache_is_vipt_nonaliasing()) {
> diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
> index 5c9073bace83..280fdbc3bfa5 100644
> --- a/arch/arm64/mm/flush.c
> +++ b/arch/arm64/mm/flush.c
> @@ -67,8 +67,7 @@ void __sync_icache_dcache(pte_t pte)
>  	struct page *page = pte_page(pte);
>  
>  	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
> -		sync_icache_aliases(page_address(page),
> -				    PAGE_SIZE << compound_order(page));
> +		sync_icache_aliases(page_address(page), page_size(page));
>  }
>  EXPORT_SYMBOL_GPL(__sync_icache_dcache);
>  
> diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
> index d28e29103bdb..cc4061cd9899 100644
> --- a/arch/ia64/mm/init.c
> +++ b/arch/ia64/mm/init.c
> @@ -63,7 +63,7 @@ __ia64_sync_icache_dcache (pte_t pte)
>  	if (test_bit(PG_arch_1, &page->flags))
>  		return;				/* i-cache is already coherent with d-cache */
>  
> -	flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
> +	flush_icache_range(addr, addr + page_size(page));
>  	set_bit(PG_arch_1, &page->flags);	/* mark page as clean */
>  }
>  
> diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
> index aa8d8425be25..b83a1d16bd89 100644
> --- a/drivers/staging/android/ion/ion_system_heap.c
> +++ b/drivers/staging/android/ion/ion_system_heap.c
> @@ -120,7 +120,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
>  		if (!page)
>  			goto free_pages;
>  		list_add_tail(&page->lru, &pages);
> -		size_remaining -= PAGE_SIZE << compound_order(page);
> +		size_remaining -= page_size(page);
>  		max_order = compound_order(page);
>  		i++;
>  	}
> @@ -133,7 +133,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
>  
>  	sg = table->sgl;
>  	list_for_each_entry_safe(page, tmp_page, &pages, lru) {
> -		sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
> +		sg_set_page(sg, page, page_size(page), 0);
>  		sg = sg_next(sg);
>  		list_del(&page->lru);
>  	}
> diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
> index 1eb1f58e00e4..83c1ec65dbcc 100644
> --- a/drivers/target/tcm_fc/tfc_io.c
> +++ b/drivers/target/tcm_fc/tfc_io.c
> @@ -148,8 +148,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
>  					   page, off_in_page, tlen);
>  			fr_len(fp) += tlen;
>  			fp_skb(fp)->data_len += tlen;
> -			fp_skb(fp)->truesize +=
> -					PAGE_SIZE << compound_order(page);
> +			fp_skb(fp)->truesize += page_size(page);
>  		} else {
>  			BUG_ON(!page);
>  			from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
> diff --git a/fs/io_uring.c b/fs/io_uring.c
> index fdc18321d70c..2c37da095517 100644
> --- a/fs/io_uring.c
> +++ b/fs/io_uring.c
> @@ -2891,7 +2891,7 @@ static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
>  	}
>  
>  	page = virt_to_head_page(ptr);
> -	if (sz > (PAGE_SIZE << compound_order(page)))
> +	if (sz > page_size(page))
>  		return -EINVAL;
>  
>  	pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
> diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
> index edf476c8cfb9..2e909072a41f 100644
> --- a/include/linux/hugetlb.h
> +++ b/include/linux/hugetlb.h
> @@ -472,7 +472,7 @@ static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
>  static inline struct hstate *page_hstate(struct page *page)
>  {
>  	VM_BUG_ON_PAGE(!PageHuge(page), page);
> -	return size_to_hstate(PAGE_SIZE << compound_order(page));
> +	return size_to_hstate(page_size(page));
>  }
>  
>  static inline unsigned hstate_index_to_shift(unsigned index)
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 0e8834ac32b7..0208f77bab63 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -772,6 +772,15 @@ static inline void set_compound_order(struct page *page, unsigned int order)
>  	page[1].compound_order = order;
>  }
>  
> +/*
> + * Returns the number of bytes in this potentially compound page.
> + * Must be called with the head page, not a tail page.
> + */
> +static inline unsigned long page_size(struct page *page)
> +{
> +	return (unsigned long)PAGE_SIZE << compound_order(page);
> +}
> +
>  void free_compound_page(struct page *page);
>  
>  #ifdef CONFIG_MMU
> diff --git a/lib/iov_iter.c b/lib/iov_iter.c
> index f74fa832f3aa..d4349c9d0c7e 100644
> --- a/lib/iov_iter.c
> +++ b/lib/iov_iter.c
> @@ -877,7 +877,7 @@ static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
>  	head = compound_head(page);
>  	v += (page - head) << PAGE_SHIFT;
>  
> -	if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head))))
> +	if (likely(n <= v && v <= page_size(head)))
>  		return true;
>  	WARN_ON(1);
>  	return false;
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index 36afcf64e016..dd1d3d88ac9e 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -323,8 +323,7 @@ void kasan_poison_slab(struct page *page)
>  
>  	for (i = 0; i < (1 << compound_order(page)); i++)
>  		page_kasan_tag_reset(page + i);
> -	kasan_poison_shadow(page_address(page),
> -			PAGE_SIZE << compound_order(page),
> +	kasan_poison_shadow(page_address(page), page_size(page),
>  			KASAN_KMALLOC_REDZONE);
>  }
>  
> @@ -520,7 +519,7 @@ void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
>  	page = virt_to_page(ptr);
>  	redzone_start = round_up((unsigned long)(ptr + size),
>  				KASAN_SHADOW_SCALE_SIZE);
> -	redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
> +	redzone_end = (unsigned long)ptr + page_size(page);
>  
>  	kasan_unpoison_shadow(ptr, size);
>  	kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
> @@ -556,8 +555,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip)
>  			kasan_report_invalid_free(ptr, ip);
>  			return;
>  		}
> -		kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
> -				KASAN_FREE_PAGE);
> +		kasan_poison_shadow(ptr, page_size(page), KASAN_FREE_PAGE);
>  	} else {
>  		__kasan_slab_free(page->slab_cache, ptr, ip, false);
>  	}
> diff --git a/mm/nommu.c b/mm/nommu.c
> index b492fd1fcf9f..6dbd5251b366 100644
> --- a/mm/nommu.c
> +++ b/mm/nommu.c
> @@ -107,7 +107,7 @@ unsigned int kobjsize(const void *objp)
>  	 * The ksize() function is only guaranteed to work for pointers
>  	 * returned by kmalloc(). So handle arbitrary pointers here.
>  	 */
> -	return PAGE_SIZE << compound_order(page);
> +	return page_size(page);
>  }
>  
>  static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
> diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
> index 11df03e71288..eff4b4520c8d 100644
> --- a/mm/page_vma_mapped.c
> +++ b/mm/page_vma_mapped.c
> @@ -153,8 +153,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
>  
>  	if (unlikely(PageHuge(pvmw->page))) {
>  		/* when pud is not present, pte will be NULL */
> -		pvmw->pte = huge_pte_offset(mm, pvmw->address,
> -					    PAGE_SIZE << compound_order(page));
> +		pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
>  		if (!pvmw->pte)
>  			return false;
>  
> diff --git a/mm/rmap.c b/mm/rmap.c
> index e5dfe2ae6b0d..09ce05c481fc 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -898,8 +898,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
>  	 */
>  	mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
>  				0, vma, vma->vm_mm, address,
> -				min(vma->vm_end, address +
> -				    (PAGE_SIZE << compound_order(page))));
> +				min(vma->vm_end, address + page_size(page)));
>  	mmu_notifier_invalidate_range_start(&range);
>  
>  	while (page_vma_mapped_walk(&pvmw)) {
> @@ -1374,8 +1373,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
>  	 */
>  	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
>  				address,
> -				min(vma->vm_end, address +
> -				    (PAGE_SIZE << compound_order(page))));
> +				min(vma->vm_end, address + page_size(page)));
>  	if (PageHuge(page)) {
>  		/*
>  		 * If sharing is possible, start and end will be adjusted
> diff --git a/mm/slob.c b/mm/slob.c
> index 510f0941d032..e7104d1ce92b 100644
> --- a/mm/slob.c
> +++ b/mm/slob.c
> @@ -539,7 +539,7 @@ size_t ksize(const void *block)
>  
>  	sp = virt_to_page(block);
>  	if (unlikely(!PageSlab(sp)))
> -		return PAGE_SIZE << compound_order(sp);
> +		return page_size(sp);
>  
>  	align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
>  	m = (unsigned int *)(block - align);
> diff --git a/mm/slub.c b/mm/slub.c
> index 51453216a1ed..fe2098f95e05 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -829,7 +829,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
>  		return 1;
>  
>  	start = page_address(page);
> -	length = PAGE_SIZE << compound_order(page);
> +	length = page_size(page);
>  	end = start + length;
>  	remainder = length % s->size;
>  	if (!remainder)
> @@ -3912,7 +3912,7 @@ static size_t __ksize(const void *object)
>  
>  	if (unlikely(!PageSlab(page))) {
>  		WARN_ON(!PageCompound(page));
> -		return PAGE_SIZE << compound_order(page);
> +		return page_size(page);
>  	}
>  
>  	return slab_ksize(page->slab_cache);
> diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
> index a14e8864e4fa..1e7f5dcaefad 100644
> --- a/net/xdp/xsk.c
> +++ b/net/xdp/xsk.c
> @@ -685,7 +685,7 @@ static int xsk_mmap(struct file *file, struct socket *sock,
>  	/* Matches the smp_wmb() in xsk_init_queue */
>  	smp_rmb();
>  	qpg = virt_to_head_page(q->ring);
> -	if (size > (PAGE_SIZE << compound_order(qpg)))
> +	if (size > page_size(qpg))
>  		return -EINVAL;
>  
>  	pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
> -- 
> 2.20.1

-- 
Michal Hocko
SUSE Labs


^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH] mm: Introduce page_size()
@ 2019-05-10 18:12 Matthew Wilcox
  2019-05-13 10:56 ` Michal Hocko
  2019-05-13 12:43 ` Kirill Tkhai
  0 siblings, 2 replies; 24+ messages in thread
From: Matthew Wilcox @ 2019-05-10 18:12 UTC (permalink / raw)
  To: linux-mm; +Cc: Matthew Wilcox (Oracle)

From: "Matthew Wilcox (Oracle)" <willy@infradead.org>

It's unnecessarily hard to find out the size of a potentially large page.
Replace 'PAGE_SIZE << compound_order(page)' with 'page_size(page)'.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 arch/arm/mm/flush.c                           | 3 +--
 arch/arm64/mm/flush.c                         | 3 +--
 arch/ia64/mm/init.c                           | 2 +-
 drivers/staging/android/ion/ion_system_heap.c | 4 ++--
 drivers/target/tcm_fc/tfc_io.c                | 3 +--
 fs/io_uring.c                                 | 2 +-
 include/linux/hugetlb.h                       | 2 +-
 include/linux/mm.h                            | 9 +++++++++
 lib/iov_iter.c                                | 2 +-
 mm/kasan/common.c                             | 8 +++-----
 mm/nommu.c                                    | 2 +-
 mm/page_vma_mapped.c                          | 3 +--
 mm/rmap.c                                     | 6 ++----
 mm/slob.c                                     | 2 +-
 mm/slub.c                                     | 4 ++--
 net/xdp/xsk.c                                 | 2 +-
 16 files changed, 29 insertions(+), 28 deletions(-)

diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 58469623b015..c68a120de28b 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -207,8 +207,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
 	 * coherent with the kernels mapping.
 	 */
 	if (!PageHighMem(page)) {
-		size_t page_size = PAGE_SIZE << compound_order(page);
-		__cpuc_flush_dcache_area(page_address(page), page_size);
+		__cpuc_flush_dcache_area(page_address(page), page_size(page));
 	} else {
 		unsigned long i;
 		if (cache_is_vipt_nonaliasing()) {
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index 5c9073bace83..280fdbc3bfa5 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -67,8 +67,7 @@ void __sync_icache_dcache(pte_t pte)
 	struct page *page = pte_page(pte);
 
 	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
-		sync_icache_aliases(page_address(page),
-				    PAGE_SIZE << compound_order(page));
+		sync_icache_aliases(page_address(page), page_size(page));
 }
 EXPORT_SYMBOL_GPL(__sync_icache_dcache);
 
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index d28e29103bdb..cc4061cd9899 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -63,7 +63,7 @@ __ia64_sync_icache_dcache (pte_t pte)
 	if (test_bit(PG_arch_1, &page->flags))
 		return;				/* i-cache is already coherent with d-cache */
 
-	flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
+	flush_icache_range(addr, addr + page_size(page));
 	set_bit(PG_arch_1, &page->flags);	/* mark page as clean */
 }
 
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index aa8d8425be25..b83a1d16bd89 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -120,7 +120,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
 		if (!page)
 			goto free_pages;
 		list_add_tail(&page->lru, &pages);
-		size_remaining -= PAGE_SIZE << compound_order(page);
+		size_remaining -= page_size(page);
 		max_order = compound_order(page);
 		i++;
 	}
@@ -133,7 +133,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
 
 	sg = table->sgl;
 	list_for_each_entry_safe(page, tmp_page, &pages, lru) {
-		sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
+		sg_set_page(sg, page, page_size(page), 0);
 		sg = sg_next(sg);
 		list_del(&page->lru);
 	}
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 1eb1f58e00e4..83c1ec65dbcc 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -148,8 +148,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
 					   page, off_in_page, tlen);
 			fr_len(fp) += tlen;
 			fp_skb(fp)->data_len += tlen;
-			fp_skb(fp)->truesize +=
-					PAGE_SIZE << compound_order(page);
+			fp_skb(fp)->truesize += page_size(page);
 		} else {
 			BUG_ON(!page);
 			from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
diff --git a/fs/io_uring.c b/fs/io_uring.c
index fdc18321d70c..2c37da095517 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2891,7 +2891,7 @@ static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
 	}
 
 	page = virt_to_head_page(ptr);
-	if (sz > (PAGE_SIZE << compound_order(page)))
+	if (sz > page_size(page))
 		return -EINVAL;
 
 	pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index edf476c8cfb9..2e909072a41f 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -472,7 +472,7 @@ static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
 static inline struct hstate *page_hstate(struct page *page)
 {
 	VM_BUG_ON_PAGE(!PageHuge(page), page);
-	return size_to_hstate(PAGE_SIZE << compound_order(page));
+	return size_to_hstate(page_size(page));
 }
 
 static inline unsigned hstate_index_to_shift(unsigned index)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0e8834ac32b7..0208f77bab63 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -772,6 +772,15 @@ static inline void set_compound_order(struct page *page, unsigned int order)
 	page[1].compound_order = order;
 }
 
+/*
+ * Returns the number of bytes in this potentially compound page.
+ * Must be called with the head page, not a tail page.
+ */
+static inline unsigned long page_size(struct page *page)
+{
+	return (unsigned long)PAGE_SIZE << compound_order(page);
+}
+
 void free_compound_page(struct page *page);
 
 #ifdef CONFIG_MMU
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index f74fa832f3aa..d4349c9d0c7e 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -877,7 +877,7 @@ static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
 	head = compound_head(page);
 	v += (page - head) << PAGE_SHIFT;
 
-	if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head))))
+	if (likely(n <= v && v <= page_size(head)))
 		return true;
 	WARN_ON(1);
 	return false;
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 36afcf64e016..dd1d3d88ac9e 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -323,8 +323,7 @@ void kasan_poison_slab(struct page *page)
 
 	for (i = 0; i < (1 << compound_order(page)); i++)
 		page_kasan_tag_reset(page + i);
-	kasan_poison_shadow(page_address(page),
-			PAGE_SIZE << compound_order(page),
+	kasan_poison_shadow(page_address(page), page_size(page),
 			KASAN_KMALLOC_REDZONE);
 }
 
@@ -520,7 +519,7 @@ void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
 	page = virt_to_page(ptr);
 	redzone_start = round_up((unsigned long)(ptr + size),
 				KASAN_SHADOW_SCALE_SIZE);
-	redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
+	redzone_end = (unsigned long)ptr + page_size(page);
 
 	kasan_unpoison_shadow(ptr, size);
 	kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
@@ -556,8 +555,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip)
 			kasan_report_invalid_free(ptr, ip);
 			return;
 		}
-		kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
-				KASAN_FREE_PAGE);
+		kasan_poison_shadow(ptr, page_size(page), KASAN_FREE_PAGE);
 	} else {
 		__kasan_slab_free(page->slab_cache, ptr, ip, false);
 	}
diff --git a/mm/nommu.c b/mm/nommu.c
index b492fd1fcf9f..6dbd5251b366 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -107,7 +107,7 @@ unsigned int kobjsize(const void *objp)
 	 * The ksize() function is only guaranteed to work for pointers
 	 * returned by kmalloc(). So handle arbitrary pointers here.
 	 */
-	return PAGE_SIZE << compound_order(page);
+	return page_size(page);
 }
 
 static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index 11df03e71288..eff4b4520c8d 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -153,8 +153,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
 
 	if (unlikely(PageHuge(pvmw->page))) {
 		/* when pud is not present, pte will be NULL */
-		pvmw->pte = huge_pte_offset(mm, pvmw->address,
-					    PAGE_SIZE << compound_order(page));
+		pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
 		if (!pvmw->pte)
 			return false;
 
diff --git a/mm/rmap.c b/mm/rmap.c
index e5dfe2ae6b0d..09ce05c481fc 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -898,8 +898,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
 	 */
 	mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
 				0, vma, vma->vm_mm, address,
-				min(vma->vm_end, address +
-				    (PAGE_SIZE << compound_order(page))));
+				min(vma->vm_end, address + page_size(page)));
 	mmu_notifier_invalidate_range_start(&range);
 
 	while (page_vma_mapped_walk(&pvmw)) {
@@ -1374,8 +1373,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
 	 */
 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
 				address,
-				min(vma->vm_end, address +
-				    (PAGE_SIZE << compound_order(page))));
+				min(vma->vm_end, address + page_size(page)));
 	if (PageHuge(page)) {
 		/*
 		 * If sharing is possible, start and end will be adjusted
diff --git a/mm/slob.c b/mm/slob.c
index 510f0941d032..e7104d1ce92b 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -539,7 +539,7 @@ size_t ksize(const void *block)
 
 	sp = virt_to_page(block);
 	if (unlikely(!PageSlab(sp)))
-		return PAGE_SIZE << compound_order(sp);
+		return page_size(sp);
 
 	align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
 	m = (unsigned int *)(block - align);
diff --git a/mm/slub.c b/mm/slub.c
index 51453216a1ed..fe2098f95e05 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -829,7 +829,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
 		return 1;
 
 	start = page_address(page);
-	length = PAGE_SIZE << compound_order(page);
+	length = page_size(page);
 	end = start + length;
 	remainder = length % s->size;
 	if (!remainder)
@@ -3912,7 +3912,7 @@ static size_t __ksize(const void *object)
 
 	if (unlikely(!PageSlab(page))) {
 		WARN_ON(!PageCompound(page));
-		return PAGE_SIZE << compound_order(page);
+		return page_size(page);
 	}
 
 	return slab_ksize(page->slab_cache);
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index a14e8864e4fa..1e7f5dcaefad 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -685,7 +685,7 @@ static int xsk_mmap(struct file *file, struct socket *sock,
 	/* Matches the smp_wmb() in xsk_init_queue */
 	smp_rmb();
 	qpg = virt_to_head_page(q->ring);
-	if (size > (PAGE_SIZE << compound_order(qpg)))
+	if (size > page_size(qpg))
 		return -EINVAL;
 
 	pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
-- 
2.20.1


^ permalink raw reply related	[flat|nested] 24+ messages in thread

end of thread, other threads:[~2019-05-24  6:34 UTC | newest]

Thread overview: 24+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-12-31 13:42 [PATCH] mm: Introduce page_size() Matthew Wilcox
2018-12-31 23:02 ` Kirill A. Shutemov
2019-01-01  6:39   ` Matthew Wilcox
2019-01-01 20:11     ` Zi Yan
2019-01-02  0:58       ` Matthew Wilcox
2019-01-02  1:16         ` Zi Yan
2019-01-01  3:27 ` Aneesh Kumar K.V
2019-01-01  3:27   ` Aneesh Kumar K.V
2019-01-01  6:30   ` Matthew Wilcox
2019-01-01 10:11     ` Aneesh Kumar K.V
2019-01-02  3:14       ` Matthew Wilcox
2019-01-02 11:46         ` William Kucharski
2019-01-02 13:09           ` Matthew Wilcox
2019-01-03 10:47             ` William Kucharski
2019-01-01 10:15     ` Aneesh Kumar K.V
2019-05-10 18:12 Matthew Wilcox
2019-05-13 10:56 ` Michal Hocko
2019-05-13 12:43 ` Kirill Tkhai
2019-05-14 11:53   ` William Kucharski
2019-05-22 20:03   ` Andrew Morton
2019-05-23  1:55     ` Matthew Wilcox
2019-05-23 21:33       ` Andrew Morton
2019-05-23 21:44         ` Matthew Wilcox
2019-05-24  6:34           ` Christoph Hellwig

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.