All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: Jesper Dangaard Brouer <hawk@kernel.org>,
	Ilias Apalodimas <ilias.apalodimas@linaro.org>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
	netdev@vger.kernel.org, linux-mm@kvack.org,
	Shakeel Butt <shakeelb@google.com>
Subject: [PATCH v2 18/24] page_pool: Convert frag_page to frag_nmem
Date: Thu,  5 Jan 2023 21:46:25 +0000	[thread overview]
Message-ID: <20230105214631.3939268-19-willy@infradead.org> (raw)
In-Reply-To: <20230105214631.3939268-1-willy@infradead.org>

Remove page_pool_defrag_page() and page_pool_return_page() as they have
no more callers.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/net/page_pool.h | 17 ++++++---------
 net/core/page_pool.c    | 47 ++++++++++++++++++-----------------------
 2 files changed, 26 insertions(+), 38 deletions(-)

diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index 126c04315929..a9dae4b5f2f7 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -262,7 +262,7 @@ struct page_pool {
 
 	u32 pages_state_hold_cnt;
 	unsigned int frag_offset;
-	struct page *frag_page;
+	struct netmem *frag_nmem;
 	long frag_users;
 
 #ifdef CONFIG_PAGE_POOL_STATS
@@ -334,8 +334,8 @@ static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
 	return page_pool_alloc_pages(pool, gfp);
 }
 
-struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
-				  unsigned int size, gfp_t gfp);
+struct netmem *page_pool_alloc_frag(struct page_pool *pool,
+		unsigned int *offset, unsigned int size, gfp_t gfp);
 
 static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool,
 						    unsigned int *offset,
@@ -343,7 +343,7 @@ static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool,
 {
 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
 
-	return page_pool_alloc_frag(pool, offset, size, gfp);
+	return netmem_page(page_pool_alloc_frag(pool, offset, size, gfp));
 }
 
 /* get the stored dma direction. A driver might decide to treat this locally and
@@ -399,9 +399,9 @@ void page_pool_put_defragged_netmem(struct page_pool *pool, struct netmem *nmem,
 				  unsigned int dma_sync_size,
 				  bool allow_direct);
 
-static inline void page_pool_fragment_page(struct page *page, long nr)
+static inline void page_pool_fragment_netmem(struct netmem *nmem, long nr)
 {
-	atomic_long_set(&page->pp_frag_count, nr);
+	atomic_long_set(&nmem->pp_frag_count, nr);
 }
 
 static inline long page_pool_defrag_netmem(struct netmem *nmem, long nr)
@@ -425,11 +425,6 @@ static inline long page_pool_defrag_netmem(struct netmem *nmem, long nr)
 	return ret;
 }
 
-static inline long page_pool_defrag_page(struct page *page, long nr)
-{
-	return page_pool_defrag_netmem(page_netmem(page), nr);
-}
-
 static inline bool page_pool_is_last_frag(struct page_pool *pool,
 					  struct netmem *nmem)
 {
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index ddf9f2bb85f7..5624cdae1f4e 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -222,12 +222,6 @@ EXPORT_SYMBOL(page_pool_create);
 
 static void page_pool_return_netmem(struct page_pool *pool, struct netmem *nm);
 
-static inline
-void page_pool_return_page(struct page_pool *pool, struct page *page)
-{
-	page_pool_return_netmem(pool, page_netmem(page));
-}
-
 noinline
 static struct netmem *page_pool_refill_alloc_cache(struct page_pool *pool)
 {
@@ -665,10 +659,9 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data,
 }
 EXPORT_SYMBOL(page_pool_put_page_bulk);
 
-static struct page *page_pool_drain_frag(struct page_pool *pool,
-					 struct page *page)
+static struct netmem *page_pool_drain_frag(struct page_pool *pool,
+					 struct netmem *nmem)
 {
-	struct netmem *nmem = page_netmem(page);
 	long drain_count = BIAS_MAX - pool->frag_users;
 
 	/* Some user is still using the page frag */
@@ -679,7 +672,7 @@ static struct page *page_pool_drain_frag(struct page_pool *pool,
 		if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
 			page_pool_dma_sync_for_device(pool, nmem, -1);
 
-		return page;
+		return nmem;
 	}
 
 	page_pool_return_netmem(pool, nmem);
@@ -689,22 +682,22 @@ static struct page *page_pool_drain_frag(struct page_pool *pool,
 static void page_pool_free_frag(struct page_pool *pool)
 {
 	long drain_count = BIAS_MAX - pool->frag_users;
-	struct page *page = pool->frag_page;
+	struct netmem *nmem = pool->frag_nmem;
 
-	pool->frag_page = NULL;
+	pool->frag_nmem = NULL;
 
-	if (!page || page_pool_defrag_page(page, drain_count))
+	if (!nmem || page_pool_defrag_netmem(nmem, drain_count))
 		return;
 
-	page_pool_return_page(pool, page);
+	page_pool_return_netmem(pool, nmem);
 }
 
-struct page *page_pool_alloc_frag(struct page_pool *pool,
+struct netmem *page_pool_alloc_frag(struct page_pool *pool,
 				  unsigned int *offset,
 				  unsigned int size, gfp_t gfp)
 {
 	unsigned int max_size = PAGE_SIZE << pool->p.order;
-	struct page *page = pool->frag_page;
+	struct netmem *nmem = pool->frag_nmem;
 
 	if (WARN_ON(!(pool->p.flags & PP_FLAG_PAGE_FRAG) ||
 		    size > max_size))
@@ -713,35 +706,35 @@ struct page *page_pool_alloc_frag(struct page_pool *pool,
 	size = ALIGN(size, dma_get_cache_alignment());
 	*offset = pool->frag_offset;
 
-	if (page && *offset + size > max_size) {
-		page = page_pool_drain_frag(pool, page);
-		if (page) {
+	if (nmem && *offset + size > max_size) {
+		nmem = page_pool_drain_frag(pool, nmem);
+		if (nmem) {
 			alloc_stat_inc(pool, fast);
 			goto frag_reset;
 		}
 	}
 
-	if (!page) {
-		page = page_pool_alloc_pages(pool, gfp);
-		if (unlikely(!page)) {
-			pool->frag_page = NULL;
+	if (!nmem) {
+		nmem = page_pool_alloc_netmem(pool, gfp);
+		if (unlikely(!nmem)) {
+			pool->frag_nmem = NULL;
 			return NULL;
 		}
 
-		pool->frag_page = page;
+		pool->frag_nmem = nmem;
 
 frag_reset:
 		pool->frag_users = 1;
 		*offset = 0;
 		pool->frag_offset = size;
-		page_pool_fragment_page(page, BIAS_MAX);
-		return page;
+		page_pool_fragment_netmem(nmem, BIAS_MAX);
+		return nmem;
 	}
 
 	pool->frag_users++;
 	pool->frag_offset = *offset + size;
 	alloc_stat_inc(pool, fast);
-	return page;
+	return nmem;
 }
 EXPORT_SYMBOL(page_pool_alloc_frag);
 
-- 
2.35.1


  parent reply	other threads:[~2023-01-05 21:47 UTC|newest]

Thread overview: 84+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-01-05 21:46 [PATCH v2 00/24] Split netmem from struct page Matthew Wilcox (Oracle)
2023-01-05 21:46 ` [PATCH v2 01/24] netmem: Create new type Matthew Wilcox (Oracle)
2023-01-06 13:07   ` Jesper Dangaard Brouer
2023-01-09 17:20   ` Ilias Apalodimas
2023-01-05 21:46 ` [PATCH v2 02/24] netmem: Add utility functions Matthew Wilcox (Oracle)
2023-01-06  2:24   ` kernel test robot
2023-01-06 20:35     ` Matthew Wilcox
2023-01-06 13:35   ` Jesper Dangaard Brouer
2023-01-05 21:46 ` [PATCH v2 03/24] page_pool: Add netmem_set_dma_addr() and netmem_get_dma_addr() Matthew Wilcox (Oracle)
2023-01-06 13:43   ` Jesper Dangaard Brouer
2023-01-09 17:30   ` Ilias Apalodimas
2023-01-10  9:17     ` Ilias Apalodimas
2023-01-10 18:16       ` Matthew Wilcox
2023-01-10 18:15     ` Matthew Wilcox
2023-01-05 21:46 ` [PATCH v2 04/24] page_pool: Convert page_pool_release_page() to page_pool_release_netmem() Matthew Wilcox (Oracle)
2023-01-06 13:46   ` Jesper Dangaard Brouer
2023-01-10  9:28   ` Ilias Apalodimas
2023-01-10 18:47     ` Matthew Wilcox
2023-01-11 13:56       ` Ilias Apalodimas
2023-01-05 21:46 ` [PATCH v2 05/24] page_pool: Start using netmem in allocation path Matthew Wilcox (Oracle)
2023-01-06  2:34   ` kernel test robot
2023-01-06 13:59   ` Jesper Dangaard Brouer
2023-01-06 15:36     ` Matthew Wilcox
2023-01-10  9:30   ` Ilias Apalodimas
2023-01-05 21:46 ` [PATCH v2 06/24] page_pool: Convert page_pool_return_page() to page_pool_return_netmem() Matthew Wilcox (Oracle)
2023-01-06 14:10   ` Jesper Dangaard Brouer
2023-01-10  9:39   ` Ilias Apalodimas
2023-01-05 21:46 ` [PATCH v2 07/24] page_pool: Convert __page_pool_put_page() to __page_pool_put_netmem() Matthew Wilcox (Oracle)
2023-01-06 14:14   ` Jesper Dangaard Brouer
2023-01-10  9:47   ` Ilias Apalodimas
2023-01-05 21:46 ` [PATCH v2 08/24] page_pool: Convert pp_alloc_cache to contain netmem Matthew Wilcox (Oracle)
2023-01-06 14:18   ` Jesper Dangaard Brouer
2023-01-10  9:58   ` Ilias Apalodimas
2023-01-05 21:46 ` [PATCH v2 09/24] page_pool: Convert page_pool_defrag_page() to page_pool_defrag_netmem() Matthew Wilcox (Oracle)
2023-01-06 14:29   ` Jesper Dangaard Brouer
2023-01-10 10:27   ` Ilias Apalodimas
2023-01-05 21:46 ` [PATCH v2 10/24] page_pool: Convert page_pool_put_defragged_page() to netmem Matthew Wilcox (Oracle)
2023-01-06 14:32   ` Jesper Dangaard Brouer
2023-01-10 10:36   ` Ilias Apalodimas
2023-01-05 21:46 ` [PATCH v2 11/24] page_pool: Convert page_pool_empty_ring() to use netmem Matthew Wilcox (Oracle)
2023-01-06 15:22   ` Jesper Dangaard Brouer
2023-01-10 10:38   ` Ilias Apalodimas
2023-01-05 21:46 ` [PATCH v2 12/24] page_pool: Convert page_pool_alloc_pages() to page_pool_alloc_netmem() Matthew Wilcox (Oracle)
2023-01-06 15:27   ` Jesper Dangaard Brouer
2023-01-10 10:45   ` Ilias Apalodimas
2023-01-05 21:46 ` [PATCH v2 13/24] page_pool: Convert page_pool_dma_sync_for_device() to take a netmem Matthew Wilcox (Oracle)
2023-01-06 15:28   ` Jesper Dangaard Brouer
2023-01-10 10:47   ` Ilias Apalodimas
2023-01-05 21:46 ` [PATCH v2 14/24] page_pool: Convert page_pool_recycle_in_cache() to netmem Matthew Wilcox (Oracle)
2023-01-06 15:29   ` Jesper Dangaard Brouer
2023-01-10 10:48   ` Ilias Apalodimas
2023-01-05 21:46 ` [PATCH v2 15/24] page_pool: Remove page_pool_defrag_page() Matthew Wilcox (Oracle)
2023-01-06 15:29   ` Jesper Dangaard Brouer
2023-01-10  9:47   ` Ilias Apalodimas
2023-01-10 22:00     ` Matthew Wilcox
2023-01-11 13:58       ` Ilias Apalodimas
2023-01-05 21:46 ` [PATCH v2 16/24] page_pool: Use netmem in page_pool_drain_frag() Matthew Wilcox (Oracle)
2023-01-06 15:30   ` Jesper Dangaard Brouer
2023-01-10 11:00   ` Ilias Apalodimas
2023-01-05 21:46 ` [PATCH v2 17/24] page_pool: Convert page_pool_return_skb_page() to use netmem Matthew Wilcox (Oracle)
2023-01-06 15:49   ` Jesper Dangaard Brouer
2023-01-06 16:53     ` Matthew Wilcox
2023-01-06 20:16       ` Jesper Dangaard Brouer
2023-01-09 18:36         ` Matthew Wilcox
2023-01-10 10:04           ` Jesper Dangaard Brouer
2023-01-05 21:46 ` Matthew Wilcox (Oracle) [this message]
2023-01-06 15:51   ` [PATCH v2 18/24] page_pool: Convert frag_page to frag_nmem Jesper Dangaard Brouer
2023-01-10 11:36   ` Ilias Apalodimas
2023-01-05 21:46 ` [PATCH v2 19/24] xdp: Convert to netmem Matthew Wilcox (Oracle)
2023-01-06 15:53   ` Jesper Dangaard Brouer
2023-01-10 11:50   ` Ilias Apalodimas
2023-01-05 21:46 ` [PATCH v2 20/24] mm: Remove page pool members from struct page Matthew Wilcox (Oracle)
2023-01-06 15:56   ` Jesper Dangaard Brouer
2023-01-10 11:51   ` Ilias Apalodimas
2023-01-05 21:46 ` [PATCH v2 21/24] page_pool: Pass a netmem to init_callback() Matthew Wilcox (Oracle)
2023-01-06 16:02   ` Jesper Dangaard Brouer
2023-01-10 11:32   ` Ilias Apalodimas
2023-01-05 21:46 ` [PATCH v2 22/24] net: Add support for netmem in skb_frag Matthew Wilcox (Oracle)
2023-01-05 21:46 ` [PATCH v2 23/24] mvneta: Convert to netmem Matthew Wilcox (Oracle)
2023-01-05 21:46 ` [PATCH v2 24/24] mlx5: " Matthew Wilcox (Oracle)
2023-01-06 16:31   ` Jesper Dangaard Brouer
2023-01-09 11:46     ` Tariq Toukan
2023-01-09 12:27   ` Tariq Toukan
2023-01-06  1:20 ` [PATCH v2 00/24] Split netmem from struct page Jesse Brandeburg

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230105214631.3939268-19-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=hawk@kernel.org \
    --cc=ilias.apalodimas@linaro.org \
    --cc=linux-mm@kvack.org \
    --cc=netdev@vger.kernel.org \
    --cc=shakeelb@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.