netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [net-next PATCH] page_pool: Refactor page_pool to enable fragmenting after allocation
@ 2022-01-24 21:23 Alexander Duyck
  2022-01-26  7:50 ` Ilias Apalodimas
  0 siblings, 1 reply; 3+ messages in thread
From: Alexander Duyck @ 2022-01-24 21:23 UTC (permalink / raw)
  To: netdev
  Cc: alexander.duyck, hawk, ilias.apalodimas, davem, kuba, alexanderduyck

From: Alexander Duyck <alexanderduyck@fb.com>

This change is meant to permit a driver to perform "fragmenting" of the
page from within the driver instead of the current model which requires
pre-partitioning the page. The main motivation behind this is to support
use cases where the page will be split up by the driver after DMA instead
of before.

With this change it becomes possible to start using page pool to replace
some of the existing use cases where multiple references were being used
for a single page, but the number needed was unknown as the size could be
dynamic.

For example, with this code it would be possible to do something like
the following to handle allocation:
  page = page_pool_alloc_pages();
  if (!page)
    return NULL;
  page_pool_fragment_page(page, DRIVER_PAGECNT_BIAS_MAX);
  rx_buf->page = page;
  rx_buf->pagecnt_bias = DRIVER_PAGECNT_BIAS_MAX;

Then we would process a received buffer by handling it with:
  rx_buf->pagecnt_bias--;

Once the page has been fully consumed we could then flush the remaining
instances with:
  if (page_pool_defrag_page(page, rx_buf->pagecnt_bias))
    continue;
  page_pool_put_defragged_page(pool, page -1, !!budget);

The general idea is that we want to have the ability to allocate a page
with excess fragment count and then trim off the unneeded fragments.

Signed-off-by: Alexander Duyck <alexanderduyck@fb.com>
---
 include/net/page_pool.h |   71 ++++++++++++++++++++++++++++-------------------
 net/core/page_pool.c    |   24 +++++++---------
 2 files changed, 54 insertions(+), 41 deletions(-)

diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index 79a805542d0f..a437c0383889 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -201,8 +201,49 @@ static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
 }
 #endif
 
-void page_pool_put_page(struct page_pool *pool, struct page *page,
-			unsigned int dma_sync_size, bool allow_direct);
+void page_pool_put_defragged_page(struct page_pool *pool, struct page *page,
+				  unsigned int dma_sync_size,
+				  bool allow_direct);
+
+static inline void page_pool_fragment_page(struct page *page, long nr)
+{
+	atomic_long_set(&page->pp_frag_count, nr);
+}
+
+static inline long page_pool_defrag_page(struct page *page, long nr)
+{
+	long ret;
+
+	/* If nr == pp_frag_count then we are have cleared all remaining
+	 * references to the page. No need to actually overwrite it, instead
+	 * we can leave this to be overwritten by the calling function.
+	 *
+	 * The main advantage to doing this is that an atomic_read is
+	 * generally a much cheaper operation than an atomic update,
+	 * especially when dealing with a page that may be parititioned
+	 * into only 2 or 3 pieces.
+	 */
+	if (atomic_long_read(&page->pp_frag_count) == nr)
+		return 0;
+
+	ret = atomic_long_sub_return(nr, &page->pp_frag_count);
+	WARN_ON(ret < 0);
+	return ret;
+}
+
+static inline void page_pool_put_page(struct page_pool *pool,
+				      struct page *page,
+				      unsigned int dma_sync_size,
+				      bool allow_direct)
+{
+#ifdef CONFIG_PAGE_POOL
+	/* It is not the last user for the page frag case */
+	if (pool->p.flags & PP_FLAG_PAGE_FRAG && page_pool_defrag_page(page, 1))
+		return;
+
+	page_pool_put_defragged_page(pool, page, dma_sync_size, allow_direct);
+#endif
+}
 
 /* Same as above but will try to sync the entire area pool->max_len */
 static inline void page_pool_put_full_page(struct page_pool *pool,
@@ -211,9 +252,7 @@ static inline void page_pool_put_full_page(struct page_pool *pool,
 	/* When page_pool isn't compiled-in, net/core/xdp.c doesn't
 	 * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
 	 */
-#ifdef CONFIG_PAGE_POOL
 	page_pool_put_page(pool, page, -1, allow_direct);
-#endif
 }
 
 /* Same as above but the caller must guarantee safe context. e.g NAPI */
@@ -243,30 +282,6 @@ static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
 		page->dma_addr_upper = upper_32_bits(addr);
 }
 
-static inline void page_pool_set_frag_count(struct page *page, long nr)
-{
-	atomic_long_set(&page->pp_frag_count, nr);
-}
-
-static inline long page_pool_atomic_sub_frag_count_return(struct page *page,
-							  long nr)
-{
-	long ret;
-
-	/* As suggested by Alexander, atomic_long_read() may cover up the
-	 * reference count errors, so avoid calling atomic_long_read() in
-	 * the cases of freeing or draining the page_frags, where we would
-	 * not expect it to match or that are slowpath anyway.
-	 */
-	if (__builtin_constant_p(nr) &&
-	    atomic_long_read(&page->pp_frag_count) == nr)
-		return 0;
-
-	ret = atomic_long_sub_return(nr, &page->pp_frag_count);
-	WARN_ON(ret < 0);
-	return ret;
-}
-
 static inline bool is_page_pool_compiled_in(void)
 {
 #ifdef CONFIG_PAGE_POOL
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index bd62c01a2ec3..74fda40da51e 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -423,11 +423,6 @@ static __always_inline struct page *
 __page_pool_put_page(struct page_pool *pool, struct page *page,
 		     unsigned int dma_sync_size, bool allow_direct)
 {
-	/* It is not the last user for the page frag case */
-	if (pool->p.flags & PP_FLAG_PAGE_FRAG &&
-	    page_pool_atomic_sub_frag_count_return(page, 1))
-		return NULL;
-
 	/* This allocator is optimized for the XDP mode that uses
 	 * one-frame-per-page, but have fallbacks that act like the
 	 * regular page allocator APIs.
@@ -471,8 +466,8 @@ __page_pool_put_page(struct page_pool *pool, struct page *page,
 	return NULL;
 }
 
-void page_pool_put_page(struct page_pool *pool, struct page *page,
-			unsigned int dma_sync_size, bool allow_direct)
+void page_pool_put_defragged_page(struct page_pool *pool, struct page *page,
+				  unsigned int dma_sync_size, bool allow_direct)
 {
 	page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct);
 	if (page && !page_pool_recycle_in_ring(pool, page)) {
@@ -480,7 +475,7 @@ void page_pool_put_page(struct page_pool *pool, struct page *page,
 		page_pool_return_page(pool, page);
 	}
 }
-EXPORT_SYMBOL(page_pool_put_page);
+EXPORT_SYMBOL(page_pool_put_defragged_page);
 
 /* Caller must not use data area after call, as this function overwrites it */
 void page_pool_put_page_bulk(struct page_pool *pool, void **data,
@@ -491,6 +486,11 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data,
 	for (i = 0; i < count; i++) {
 		struct page *page = virt_to_head_page(data[i]);
 
+		/* It is not the last user for the page frag case */
+		if (pool->p.flags & PP_FLAG_PAGE_FRAG &&
+		    page_pool_defrag_page(page, 1))
+			continue;
+
 		page = __page_pool_put_page(pool, page, -1, false);
 		/* Approved for bulk recycling in ptr_ring cache */
 		if (page)
@@ -526,8 +526,7 @@ static struct page *page_pool_drain_frag(struct page_pool *pool,
 	long drain_count = BIAS_MAX - pool->frag_users;
 
 	/* Some user is still using the page frag */
-	if (likely(page_pool_atomic_sub_frag_count_return(page,
-							  drain_count)))
+	if (likely(page_pool_defrag_page(page, drain_count)))
 		return NULL;
 
 	if (page_ref_count(page) == 1 && !page_is_pfmemalloc(page)) {
@@ -548,8 +547,7 @@ static void page_pool_free_frag(struct page_pool *pool)
 
 	pool->frag_page = NULL;
 
-	if (!page ||
-	    page_pool_atomic_sub_frag_count_return(page, drain_count))
+	if (!page || page_pool_defrag_page(page, drain_count))
 		return;
 
 	page_pool_return_page(pool, page);
@@ -588,7 +586,7 @@ struct page *page_pool_alloc_frag(struct page_pool *pool,
 		pool->frag_users = 1;
 		*offset = 0;
 		pool->frag_offset = size;
-		page_pool_set_frag_count(page, BIAS_MAX);
+		page_pool_fragment_page(page, BIAS_MAX);
 		return page;
 	}
 



^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [net-next PATCH] page_pool: Refactor page_pool to enable fragmenting after allocation
  2022-01-24 21:23 [net-next PATCH] page_pool: Refactor page_pool to enable fragmenting after allocation Alexander Duyck
@ 2022-01-26  7:50 ` Ilias Apalodimas
  2022-01-26 16:22   ` Alexander Duyck
  0 siblings, 1 reply; 3+ messages in thread
From: Ilias Apalodimas @ 2022-01-26  7:50 UTC (permalink / raw)
  To: Alexander Duyck; +Cc: netdev, hawk, davem, kuba, alexanderduyck

Hi Alexander, 

Thanks for the patch

On Mon, Jan 24, 2022 at 01:23:04PM -0800, Alexander Duyck wrote:
> From: Alexander Duyck <alexanderduyck@fb.com>
> 
> This change is meant to permit a driver to perform "fragmenting" of the
> page from within the driver instead of the current model which requires
> pre-partitioning the page. The main motivation behind this is to support
> use cases where the page will be split up by the driver after DMA instead
> of before.
> 
> With this change it becomes possible to start using page pool to replace
> some of the existing use cases where multiple references were being used
> for a single page, but the number needed was unknown as the size could be
> dynamic.
> 

Any specific use cases you have in mind?

> For example, with this code it would be possible to do something like
> the following to handle allocation:
>   page = page_pool_alloc_pages();
>   if (!page)
>     return NULL;
>   page_pool_fragment_page(page, DRIVER_PAGECNT_BIAS_MAX);
>   rx_buf->page = page;
>   rx_buf->pagecnt_bias = DRIVER_PAGECNT_BIAS_MAX;
> 
> Then we would process a received buffer by handling it with:
>   rx_buf->pagecnt_bias--;
> 
> Once the page has been fully consumed we could then flush the remaining
> instances with:
>   if (page_pool_defrag_page(page, rx_buf->pagecnt_bias))
>     continue;
>   page_pool_put_defragged_page(pool, page -1, !!budget);
> 
> The general idea is that we want to have the ability to allocate a page
> with excess fragment count and then trim off the unneeded fragments.
> 
> Signed-off-by: Alexander Duyck <alexanderduyck@fb.com>
> ---
>  include/net/page_pool.h |   71 ++++++++++++++++++++++++++++-------------------
>  net/core/page_pool.c    |   24 +++++++---------
>  2 files changed, 54 insertions(+), 41 deletions(-)
> 
> diff --git a/include/net/page_pool.h b/include/net/page_pool.h
> index 79a805542d0f..a437c0383889 100644
> --- a/include/net/page_pool.h
> +++ b/include/net/page_pool.h
> @@ -201,8 +201,49 @@ static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
>  }
>  #endif
>  
> -void page_pool_put_page(struct page_pool *pool, struct page *page,
> -			unsigned int dma_sync_size, bool allow_direct);
> +void page_pool_put_defragged_page(struct page_pool *pool, struct page *page,
> +				  unsigned int dma_sync_size,
> +				  bool allow_direct);
> +
> +static inline void page_pool_fragment_page(struct page *page, long nr)
> +{
> +	atomic_long_set(&page->pp_frag_count, nr);
> +}
> +
> +static inline long page_pool_defrag_page(struct page *page, long nr)
> +{
> +	long ret;
> +
> +	/* If nr == pp_frag_count then we are have cleared all remaining
> +	 * references to the page. No need to actually overwrite it, instead
> +	 * we can leave this to be overwritten by the calling function.
> +	 *
> +	 * The main advantage to doing this is that an atomic_read is
> +	 * generally a much cheaper operation than an atomic update,
> +	 * especially when dealing with a page that may be parititioned
> +	 * into only 2 or 3 pieces.
> +	 */
> +	if (atomic_long_read(&page->pp_frag_count) == nr)
> +		return 0;
> +
> +	ret = atomic_long_sub_return(nr, &page->pp_frag_count);
> +	WARN_ON(ret < 0);
> +	return ret;
> +}
> +
> +static inline void page_pool_put_page(struct page_pool *pool,
> +				      struct page *page,
> +				      unsigned int dma_sync_size,
> +				      bool allow_direct)
> +{
> +#ifdef CONFIG_PAGE_POOL
> +	/* It is not the last user for the page frag case */
> +	if (pool->p.flags & PP_FLAG_PAGE_FRAG && page_pool_defrag_page(page, 1))
> +		return;
> +
> +	page_pool_put_defragged_page(pool, page, dma_sync_size, allow_direct);
> +#endif
> +}
>  
>  /* Same as above but will try to sync the entire area pool->max_len */
>  static inline void page_pool_put_full_page(struct page_pool *pool,
> @@ -211,9 +252,7 @@ static inline void page_pool_put_full_page(struct page_pool *pool,
>  	/* When page_pool isn't compiled-in, net/core/xdp.c doesn't
>  	 * allow registering MEM_TYPE_PAGE_POOL, but shield linker.

nit, but the comment can either go away or move to the new
page_pool_put_page()

>  	 */
> -#ifdef CONFIG_PAGE_POOL
>  	page_pool_put_page(pool, page, -1, allow_direct);
> -#endif
>  }
>  
>  /* Same as above but the caller must guarantee safe context. e.g NAPI */
> @@ -243,30 +282,6 @@ static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
>  		page->dma_addr_upper = upper_32_bits(addr);
>  }
>  
> -static inline void page_pool_set_frag_count(struct page *page, long nr)
> -{
> -	atomic_long_set(&page->pp_frag_count, nr);
> -}
> -
> -static inline long page_pool_atomic_sub_frag_count_return(struct page *page,
> -							  long nr)
> -{
> -	long ret;
> -
> -	/* As suggested by Alexander, atomic_long_read() may cover up the
> -	 * reference count errors, so avoid calling atomic_long_read() in
> -	 * the cases of freeing or draining the page_frags, where we would
> -	 * not expect it to match or that are slowpath anyway.
> -	 */
> -	if (__builtin_constant_p(nr) &&
> -	    atomic_long_read(&page->pp_frag_count) == nr)
> -		return 0;
> -
> -	ret = atomic_long_sub_return(nr, &page->pp_frag_count);
> -	WARN_ON(ret < 0);
> -	return ret;
> -}
> -
>  static inline bool is_page_pool_compiled_in(void)
>  {
>  #ifdef CONFIG_PAGE_POOL
> diff --git a/net/core/page_pool.c b/net/core/page_pool.c
> index bd62c01a2ec3..74fda40da51e 100644
> --- a/net/core/page_pool.c
> +++ b/net/core/page_pool.c
> @@ -423,11 +423,6 @@ static __always_inline struct page *
>  __page_pool_put_page(struct page_pool *pool, struct page *page,
>  		     unsigned int dma_sync_size, bool allow_direct)
>  {
> -	/* It is not the last user for the page frag case */
> -	if (pool->p.flags & PP_FLAG_PAGE_FRAG &&
> -	    page_pool_atomic_sub_frag_count_return(page, 1))
> -		return NULL;
> -
>  	/* This allocator is optimized for the XDP mode that uses
>  	 * one-frame-per-page, but have fallbacks that act like the
>  	 * regular page allocator APIs.
> @@ -471,8 +466,8 @@ __page_pool_put_page(struct page_pool *pool, struct page *page,
>  	return NULL;
>  }
>  
> -void page_pool_put_page(struct page_pool *pool, struct page *page,
> -			unsigned int dma_sync_size, bool allow_direct)
> +void page_pool_put_defragged_page(struct page_pool *pool, struct page *page,
> +				  unsigned int dma_sync_size, bool allow_direct)
>  {
>  	page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct);
>  	if (page && !page_pool_recycle_in_ring(pool, page)) {
> @@ -480,7 +475,7 @@ void page_pool_put_page(struct page_pool *pool, struct page *page,
>  		page_pool_return_page(pool, page);
>  	}
>  }
> -EXPORT_SYMBOL(page_pool_put_page);
> +EXPORT_SYMBOL(page_pool_put_defragged_page);
>  
>  /* Caller must not use data area after call, as this function overwrites it */
>  void page_pool_put_page_bulk(struct page_pool *pool, void **data,
> @@ -491,6 +486,11 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data,
>  	for (i = 0; i < count; i++) {
>  		struct page *page = virt_to_head_page(data[i]);
>  
> +		/* It is not the last user for the page frag case */
> +		if (pool->p.flags & PP_FLAG_PAGE_FRAG &&
> +		    page_pool_defrag_page(page, 1))
> +			continue;

Would it make sense to have this check on a function?  Something like
page_pool_is_last_frag() or similar? Also for for readability switch do 
(pool->p.flags & PP_FLAG_PAGE_FRAG) && ...

> +
>  		page = __page_pool_put_page(pool, page, -1, false);
>  		/* Approved for bulk recycling in ptr_ring cache */
>  		if (page)
> @@ -526,8 +526,7 @@ static struct page *page_pool_drain_frag(struct page_pool *pool,
>  	long drain_count = BIAS_MAX - pool->frag_users;
>  
>  	/* Some user is still using the page frag */
> -	if (likely(page_pool_atomic_sub_frag_count_return(page,
> -							  drain_count)))
> +	if (likely(page_pool_defrag_page(page, drain_count)))
>  		return NULL;
>  
>  	if (page_ref_count(page) == 1 && !page_is_pfmemalloc(page)) {
> @@ -548,8 +547,7 @@ static void page_pool_free_frag(struct page_pool *pool)
>  
>  	pool->frag_page = NULL;
>  
> -	if (!page ||
> -	    page_pool_atomic_sub_frag_count_return(page, drain_count))
> +	if (!page || page_pool_defrag_page(page, drain_count))
>  		return;
>  
>  	page_pool_return_page(pool, page);
> @@ -588,7 +586,7 @@ struct page *page_pool_alloc_frag(struct page_pool *pool,
>  		pool->frag_users = 1;
>  		*offset = 0;
>  		pool->frag_offset = size;
> -		page_pool_set_frag_count(page, BIAS_MAX);
> +		page_pool_fragment_page(page, BIAS_MAX);
>  		return page;
>  	}
>  
> 
> 

Thanks!
/Ilias

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [net-next PATCH] page_pool: Refactor page_pool to enable fragmenting after allocation
  2022-01-26  7:50 ` Ilias Apalodimas
@ 2022-01-26 16:22   ` Alexander Duyck
  0 siblings, 0 replies; 3+ messages in thread
From: Alexander Duyck @ 2022-01-26 16:22 UTC (permalink / raw)
  To: Ilias Apalodimas
  Cc: Netdev, hawk, David Miller, Jakub Kicinski, Alexander Duyck

On Tue, Jan 25, 2022 at 11:50 PM Ilias Apalodimas
<ilias.apalodimas@linaro.org> wrote:
>
> Hi Alexander,
>
> Thanks for the patch
>
> On Mon, Jan 24, 2022 at 01:23:04PM -0800, Alexander Duyck wrote:
> > From: Alexander Duyck <alexanderduyck@fb.com>
> >
> > This change is meant to permit a driver to perform "fragmenting" of the
> > page from within the driver instead of the current model which requires
> > pre-partitioning the page. The main motivation behind this is to support
> > use cases where the page will be split up by the driver after DMA instead
> > of before.
> >
> > With this change it becomes possible to start using page pool to replace
> > some of the existing use cases where multiple references were being used
> > for a single page, but the number needed was unknown as the size could be
> > dynamic.
> >
>
> Any specific use cases you have in mind?

For example with the mlx5e we could probably do away with a number of
page_ref_inc calls and have the page pool take care of the DMA ops
instead of what happens right now where the DMA unmapping and refcount
are having to be manipulated by the driver.

The basic idea is to make it so that the drivers can make better use
of page_pool instead of working around it by disabling functionality,
cheating the page count, or implementing their own recycling schemes
on top of page pool.

> > For example, with this code it would be possible to do something like
> > the following to handle allocation:
> >   page = page_pool_alloc_pages();
> >   if (!page)
> >     return NULL;
> >   page_pool_fragment_page(page, DRIVER_PAGECNT_BIAS_MAX);
> >   rx_buf->page = page;
> >   rx_buf->pagecnt_bias = DRIVER_PAGECNT_BIAS_MAX;
> >
> > Then we would process a received buffer by handling it with:
> >   rx_buf->pagecnt_bias--;
> >
> > Once the page has been fully consumed we could then flush the remaining
> > instances with:
> >   if (page_pool_defrag_page(page, rx_buf->pagecnt_bias))
> >     continue;
> >   page_pool_put_defragged_page(pool, page -1, !!budget);
> >
> > The general idea is that we want to have the ability to allocate a page
> > with excess fragment count and then trim off the unneeded fragments.
> >
> > Signed-off-by: Alexander Duyck <alexanderduyck@fb.com>
> > ---
> >  include/net/page_pool.h |   71 ++++++++++++++++++++++++++++-------------------
> >  net/core/page_pool.c    |   24 +++++++---------
> >  2 files changed, 54 insertions(+), 41 deletions(-)
> >
> > diff --git a/include/net/page_pool.h b/include/net/page_pool.h
> > index 79a805542d0f..a437c0383889 100644
> > --- a/include/net/page_pool.h
> > +++ b/include/net/page_pool.h
> > @@ -201,8 +201,49 @@ static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
> >  }
> >  #endif
> >
> > -void page_pool_put_page(struct page_pool *pool, struct page *page,
> > -                     unsigned int dma_sync_size, bool allow_direct);
> > +void page_pool_put_defragged_page(struct page_pool *pool, struct page *page,
> > +                               unsigned int dma_sync_size,
> > +                               bool allow_direct);
> > +
> > +static inline void page_pool_fragment_page(struct page *page, long nr)
> > +{
> > +     atomic_long_set(&page->pp_frag_count, nr);
> > +}
> > +
> > +static inline long page_pool_defrag_page(struct page *page, long nr)
> > +{
> > +     long ret;
> > +
> > +     /* If nr == pp_frag_count then we are have cleared all remaining
> > +      * references to the page. No need to actually overwrite it, instead
> > +      * we can leave this to be overwritten by the calling function.
> > +      *
> > +      * The main advantage to doing this is that an atomic_read is
> > +      * generally a much cheaper operation than an atomic update,
> > +      * especially when dealing with a page that may be parititioned
> > +      * into only 2 or 3 pieces.
> > +      */
> > +     if (atomic_long_read(&page->pp_frag_count) == nr)
> > +             return 0;
> > +
> > +     ret = atomic_long_sub_return(nr, &page->pp_frag_count);
> > +     WARN_ON(ret < 0);
> > +     return ret;
> > +}
> > +
> > +static inline void page_pool_put_page(struct page_pool *pool,
> > +                                   struct page *page,
> > +                                   unsigned int dma_sync_size,
> > +                                   bool allow_direct)
> > +{
> > +#ifdef CONFIG_PAGE_POOL
> > +     /* It is not the last user for the page frag case */
> > +     if (pool->p.flags & PP_FLAG_PAGE_FRAG && page_pool_defrag_page(page, 1))
> > +             return;
> > +
> > +     page_pool_put_defragged_page(pool, page, dma_sync_size, allow_direct);
> > +#endif
> > +}
> >
> >  /* Same as above but will try to sync the entire area pool->max_len */
> >  static inline void page_pool_put_full_page(struct page_pool *pool,
> > @@ -211,9 +252,7 @@ static inline void page_pool_put_full_page(struct page_pool *pool,
> >       /* When page_pool isn't compiled-in, net/core/xdp.c doesn't
> >        * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
>
> nit, but the comment can either go away or move to the new
> page_pool_put_page()

Okay, I will move the comment.

> > diff --git a/net/core/page_pool.c b/net/core/page_pool.c
> > index bd62c01a2ec3..74fda40da51e 100644
> > --- a/net/core/page_pool.c
> > +++ b/net/core/page_pool.c
> > @@ -423,11 +423,6 @@ static __always_inline struct page *
> >  __page_pool_put_page(struct page_pool *pool, struct page *page,
> >                    unsigned int dma_sync_size, bool allow_direct)
> >  {
> > -     /* It is not the last user for the page frag case */
> > -     if (pool->p.flags & PP_FLAG_PAGE_FRAG &&
> > -         page_pool_atomic_sub_frag_count_return(page, 1))
> > -             return NULL;
> > -
> >       /* This allocator is optimized for the XDP mode that uses
> >        * one-frame-per-page, but have fallbacks that act like the
> >        * regular page allocator APIs.
> > @@ -471,8 +466,8 @@ __page_pool_put_page(struct page_pool *pool, struct page *page,
> >       return NULL;
> >  }
> >
> > -void page_pool_put_page(struct page_pool *pool, struct page *page,
> > -                     unsigned int dma_sync_size, bool allow_direct)
> > +void page_pool_put_defragged_page(struct page_pool *pool, struct page *page,
> > +                               unsigned int dma_sync_size, bool allow_direct)
> >  {
> >       page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct);
> >       if (page && !page_pool_recycle_in_ring(pool, page)) {
> > @@ -480,7 +475,7 @@ void page_pool_put_page(struct page_pool *pool, struct page *page,
> >               page_pool_return_page(pool, page);
> >       }
> >  }
> > -EXPORT_SYMBOL(page_pool_put_page);
> > +EXPORT_SYMBOL(page_pool_put_defragged_page);
> >
> >  /* Caller must not use data area after call, as this function overwrites it */
> >  void page_pool_put_page_bulk(struct page_pool *pool, void **data,
> > @@ -491,6 +486,11 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data,
> >       for (i = 0; i < count; i++) {
> >               struct page *page = virt_to_head_page(data[i]);
> >
> > +             /* It is not the last user for the page frag case */
> > +             if (pool->p.flags & PP_FLAG_PAGE_FRAG &&
> > +                 page_pool_defrag_page(page, 1))
> > +                     continue;
>
> Would it make sense to have this check on a function?  Something like
> page_pool_is_last_frag() or similar? Also for for readability switch do
> (pool->p.flags & PP_FLAG_PAGE_FRAG) && ...

I will address the readability issue by wrapping the check in
parenthesis when I move it to a function. I will likely be inverting
it anyway so it will be: !(flags & FRAG) || (defrag_page() == 0)

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2022-01-26 16:22 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-01-24 21:23 [net-next PATCH] page_pool: Refactor page_pool to enable fragmenting after allocation Alexander Duyck
2022-01-26  7:50 ` Ilias Apalodimas
2022-01-26 16:22   ` Alexander Duyck

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).