From: Ilias Apalodimas <ilias.apalodimas@linaro.org>
To: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: Jesper Dangaard Brouer <hawk@kernel.org>,
netdev@vger.kernel.org, linux-mm@kvack.org,
Shakeel Butt <shakeelb@google.com>
Subject: Re: [PATCH v2 08/24] page_pool: Convert pp_alloc_cache to contain netmem
Date: Tue, 10 Jan 2023 11:58:03 +0200 [thread overview]
Message-ID: <Y702q7mSaunHCyhS@hera> (raw)
In-Reply-To: <20230105214631.3939268-9-willy@infradead.org>
On Thu, Jan 05, 2023 at 09:46:15PM +0000, Matthew Wilcox (Oracle) wrote:
> Change the type here from page to netmem. It works out well to
> convert page_pool_refill_alloc_cache() to return a netmem instead
> of a page as part of this commit.
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
> include/net/page_pool.h | 2 +-
> net/core/page_pool.c | 52 ++++++++++++++++++++---------------------
> 2 files changed, 27 insertions(+), 27 deletions(-)
>
> diff --git a/include/net/page_pool.h b/include/net/page_pool.h
> index 480baa22bc50..63aa530922de 100644
> --- a/include/net/page_pool.h
> +++ b/include/net/page_pool.h
> @@ -173,7 +173,7 @@ static inline bool netmem_is_pfmemalloc(const struct netmem *nmem)
> #define PP_ALLOC_CACHE_REFILL 64
> struct pp_alloc_cache {
> u32 count;
> - struct page *cache[PP_ALLOC_CACHE_SIZE];
> + struct netmem *cache[PP_ALLOC_CACHE_SIZE];
> };
>
> struct page_pool_params {
> diff --git a/net/core/page_pool.c b/net/core/page_pool.c
> index 8f3f7cc5a2d5..c54217ce6b77 100644
> --- a/net/core/page_pool.c
> +++ b/net/core/page_pool.c
> @@ -229,10 +229,10 @@ void page_pool_return_page(struct page_pool *pool, struct page *page)
> }
>
> noinline
> -static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
> +static struct netmem *page_pool_refill_alloc_cache(struct page_pool *pool)
> {
> struct ptr_ring *r = &pool->ring;
> - struct page *page;
> + struct netmem *nmem;
> int pref_nid; /* preferred NUMA node */
>
> /* Quicker fallback, avoid locks when ring is empty */
> @@ -253,49 +253,49 @@ static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
>
> /* Refill alloc array, but only if NUMA match */
> do {
> - page = __ptr_ring_consume(r);
> - if (unlikely(!page))
> + nmem = __ptr_ring_consume(r);
> + if (unlikely(!nmem))
> break;
>
> - if (likely(page_to_nid(page) == pref_nid)) {
> - pool->alloc.cache[pool->alloc.count++] = page;
> + if (likely(netmem_nid(nmem) == pref_nid)) {
> + pool->alloc.cache[pool->alloc.count++] = nmem;
> } else {
> /* NUMA mismatch;
> * (1) release 1 page to page-allocator and
> * (2) break out to fallthrough to alloc_pages_node.
> * This limit stress on page buddy alloactor.
> */
> - page_pool_return_page(pool, page);
> + page_pool_return_netmem(pool, nmem);
> alloc_stat_inc(pool, waive);
> - page = NULL;
> + nmem = NULL;
> break;
> }
> } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
>
> /* Return last page */
> if (likely(pool->alloc.count > 0)) {
> - page = pool->alloc.cache[--pool->alloc.count];
> + nmem = pool->alloc.cache[--pool->alloc.count];
> alloc_stat_inc(pool, refill);
> }
>
> - return page;
> + return nmem;
> }
>
> /* fast path */
> static struct page *__page_pool_get_cached(struct page_pool *pool)
> {
> - struct page *page;
> + struct netmem *nmem;
>
> /* Caller MUST guarantee safe non-concurrent access, e.g. softirq */
> if (likely(pool->alloc.count)) {
> /* Fast-path */
> - page = pool->alloc.cache[--pool->alloc.count];
> + nmem = pool->alloc.cache[--pool->alloc.count];
> alloc_stat_inc(pool, fast);
> } else {
> - page = page_pool_refill_alloc_cache(pool);
> + nmem = page_pool_refill_alloc_cache(pool);
> }
>
> - return page;
> + return netmem_page(nmem);
> }
>
> static void page_pool_dma_sync_for_device(struct page_pool *pool,
> @@ -391,13 +391,13 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
>
> /* Unnecessary as alloc cache is empty, but guarantees zero count */
> if (unlikely(pool->alloc.count > 0))
> - return pool->alloc.cache[--pool->alloc.count];
> + return netmem_page(pool->alloc.cache[--pool->alloc.count]);
>
> /* Mark empty alloc.cache slots "empty" for alloc_pages_bulk_array */
> memset(&pool->alloc.cache, 0, sizeof(void *) * bulk);
>
> nr_pages = alloc_pages_bulk_array_node(gfp, pool->p.nid, bulk,
> - pool->alloc.cache);
> + (struct page **)pool->alloc.cache);
> if (unlikely(!nr_pages))
> return NULL;
>
> @@ -405,7 +405,7 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
> * page element have not been (possibly) DMA mapped.
> */
> for (i = 0; i < nr_pages; i++) {
> - struct netmem *nmem = page_netmem(pool->alloc.cache[i]);
> + struct netmem *nmem = pool->alloc.cache[i];
> if ((pp_flags & PP_FLAG_DMA_MAP) &&
> unlikely(!page_pool_dma_map(pool, nmem))) {
> netmem_put(nmem);
> @@ -413,7 +413,7 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
> }
>
> page_pool_set_pp_info(pool, nmem);
> - pool->alloc.cache[pool->alloc.count++] = netmem_page(nmem);
> + pool->alloc.cache[pool->alloc.count++] = nmem;
> /* Track how many pages are held 'in-flight' */
> pool->pages_state_hold_cnt++;
> trace_page_pool_state_hold(pool, nmem,
> @@ -422,7 +422,7 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
>
> /* Return last page */
> if (likely(pool->alloc.count > 0)) {
> - page = pool->alloc.cache[--pool->alloc.count];
> + page = netmem_page(pool->alloc.cache[--pool->alloc.count]);
> alloc_stat_inc(pool, slow);
> } else {
> page = NULL;
> @@ -547,7 +547,7 @@ static bool page_pool_recycle_in_cache(struct page *page,
> }
>
> /* Caller MUST have verified/know (page_ref_count(page) == 1) */
> - pool->alloc.cache[pool->alloc.count++] = page;
> + pool->alloc.cache[pool->alloc.count++] = page_netmem(page);
> recycle_stat_inc(pool, cached);
> return true;
> }
> @@ -785,7 +785,7 @@ static void page_pool_free(struct page_pool *pool)
>
> static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
> {
> - struct page *page;
> + struct netmem *nmem;
>
> if (pool->destroy_cnt)
> return;
> @@ -795,8 +795,8 @@ static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
> * call concurrently.
> */
> while (pool->alloc.count) {
> - page = pool->alloc.cache[--pool->alloc.count];
> - page_pool_return_page(pool, page);
> + nmem = pool->alloc.cache[--pool->alloc.count];
> + page_pool_return_netmem(pool, nmem);
> }
> }
>
> @@ -878,15 +878,15 @@ EXPORT_SYMBOL(page_pool_destroy);
> /* Caller must provide appropriate safe context, e.g. NAPI. */
> void page_pool_update_nid(struct page_pool *pool, int new_nid)
> {
> - struct page *page;
> + struct netmem *nmem;
>
> trace_page_pool_update_nid(pool, new_nid);
> pool->p.nid = new_nid;
>
> /* Flush pool alloc cache, as refill will check NUMA node */
> while (pool->alloc.count) {
> - page = pool->alloc.cache[--pool->alloc.count];
> - page_pool_return_page(pool, page);
> + nmem = pool->alloc.cache[--pool->alloc.count];
> + page_pool_return_netmem(pool, nmem);
> }
> }
> EXPORT_SYMBOL(page_pool_update_nid);
> --
> 2.35.1
>
Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
next prev parent reply other threads:[~2023-01-10 9:58 UTC|newest]
Thread overview: 84+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-01-05 21:46 [PATCH v2 00/24] Split netmem from struct page Matthew Wilcox (Oracle)
2023-01-05 21:46 ` [PATCH v2 01/24] netmem: Create new type Matthew Wilcox (Oracle)
2023-01-06 13:07 ` Jesper Dangaard Brouer
2023-01-09 17:20 ` Ilias Apalodimas
2023-01-05 21:46 ` [PATCH v2 02/24] netmem: Add utility functions Matthew Wilcox (Oracle)
2023-01-06 2:24 ` kernel test robot
2023-01-06 20:35 ` Matthew Wilcox
2023-01-06 13:35 ` Jesper Dangaard Brouer
2023-01-05 21:46 ` [PATCH v2 03/24] page_pool: Add netmem_set_dma_addr() and netmem_get_dma_addr() Matthew Wilcox (Oracle)
2023-01-06 13:43 ` Jesper Dangaard Brouer
2023-01-09 17:30 ` Ilias Apalodimas
2023-01-10 9:17 ` Ilias Apalodimas
2023-01-10 18:16 ` Matthew Wilcox
2023-01-10 18:15 ` Matthew Wilcox
2023-01-05 21:46 ` [PATCH v2 04/24] page_pool: Convert page_pool_release_page() to page_pool_release_netmem() Matthew Wilcox (Oracle)
2023-01-06 13:46 ` Jesper Dangaard Brouer
2023-01-10 9:28 ` Ilias Apalodimas
2023-01-10 18:47 ` Matthew Wilcox
2023-01-11 13:56 ` Ilias Apalodimas
2023-01-05 21:46 ` [PATCH v2 05/24] page_pool: Start using netmem in allocation path Matthew Wilcox (Oracle)
2023-01-06 2:34 ` kernel test robot
2023-01-06 13:59 ` Jesper Dangaard Brouer
2023-01-06 15:36 ` Matthew Wilcox
2023-01-10 9:30 ` Ilias Apalodimas
2023-01-05 21:46 ` [PATCH v2 06/24] page_pool: Convert page_pool_return_page() to page_pool_return_netmem() Matthew Wilcox (Oracle)
2023-01-06 14:10 ` Jesper Dangaard Brouer
2023-01-10 9:39 ` Ilias Apalodimas
2023-01-05 21:46 ` [PATCH v2 07/24] page_pool: Convert __page_pool_put_page() to __page_pool_put_netmem() Matthew Wilcox (Oracle)
2023-01-06 14:14 ` Jesper Dangaard Brouer
2023-01-10 9:47 ` Ilias Apalodimas
2023-01-05 21:46 ` [PATCH v2 08/24] page_pool: Convert pp_alloc_cache to contain netmem Matthew Wilcox (Oracle)
2023-01-06 14:18 ` Jesper Dangaard Brouer
2023-01-10 9:58 ` Ilias Apalodimas [this message]
2023-01-05 21:46 ` [PATCH v2 09/24] page_pool: Convert page_pool_defrag_page() to page_pool_defrag_netmem() Matthew Wilcox (Oracle)
2023-01-06 14:29 ` Jesper Dangaard Brouer
2023-01-10 10:27 ` Ilias Apalodimas
2023-01-05 21:46 ` [PATCH v2 10/24] page_pool: Convert page_pool_put_defragged_page() to netmem Matthew Wilcox (Oracle)
2023-01-06 14:32 ` Jesper Dangaard Brouer
2023-01-10 10:36 ` Ilias Apalodimas
2023-01-05 21:46 ` [PATCH v2 11/24] page_pool: Convert page_pool_empty_ring() to use netmem Matthew Wilcox (Oracle)
2023-01-06 15:22 ` Jesper Dangaard Brouer
2023-01-10 10:38 ` Ilias Apalodimas
2023-01-05 21:46 ` [PATCH v2 12/24] page_pool: Convert page_pool_alloc_pages() to page_pool_alloc_netmem() Matthew Wilcox (Oracle)
2023-01-06 15:27 ` Jesper Dangaard Brouer
2023-01-10 10:45 ` Ilias Apalodimas
2023-01-05 21:46 ` [PATCH v2 13/24] page_pool: Convert page_pool_dma_sync_for_device() to take a netmem Matthew Wilcox (Oracle)
2023-01-06 15:28 ` Jesper Dangaard Brouer
2023-01-10 10:47 ` Ilias Apalodimas
2023-01-05 21:46 ` [PATCH v2 14/24] page_pool: Convert page_pool_recycle_in_cache() to netmem Matthew Wilcox (Oracle)
2023-01-06 15:29 ` Jesper Dangaard Brouer
2023-01-10 10:48 ` Ilias Apalodimas
2023-01-05 21:46 ` [PATCH v2 15/24] page_pool: Remove page_pool_defrag_page() Matthew Wilcox (Oracle)
2023-01-06 15:29 ` Jesper Dangaard Brouer
2023-01-10 9:47 ` Ilias Apalodimas
2023-01-10 22:00 ` Matthew Wilcox
2023-01-11 13:58 ` Ilias Apalodimas
2023-01-05 21:46 ` [PATCH v2 16/24] page_pool: Use netmem in page_pool_drain_frag() Matthew Wilcox (Oracle)
2023-01-06 15:30 ` Jesper Dangaard Brouer
2023-01-10 11:00 ` Ilias Apalodimas
2023-01-05 21:46 ` [PATCH v2 17/24] page_pool: Convert page_pool_return_skb_page() to use netmem Matthew Wilcox (Oracle)
2023-01-06 15:49 ` Jesper Dangaard Brouer
2023-01-06 16:53 ` Matthew Wilcox
2023-01-06 20:16 ` Jesper Dangaard Brouer
2023-01-09 18:36 ` Matthew Wilcox
2023-01-10 10:04 ` Jesper Dangaard Brouer
2023-01-05 21:46 ` [PATCH v2 18/24] page_pool: Convert frag_page to frag_nmem Matthew Wilcox (Oracle)
2023-01-06 15:51 ` Jesper Dangaard Brouer
2023-01-10 11:36 ` Ilias Apalodimas
2023-01-05 21:46 ` [PATCH v2 19/24] xdp: Convert to netmem Matthew Wilcox (Oracle)
2023-01-06 15:53 ` Jesper Dangaard Brouer
2023-01-10 11:50 ` Ilias Apalodimas
2023-01-05 21:46 ` [PATCH v2 20/24] mm: Remove page pool members from struct page Matthew Wilcox (Oracle)
2023-01-06 15:56 ` Jesper Dangaard Brouer
2023-01-10 11:51 ` Ilias Apalodimas
2023-01-05 21:46 ` [PATCH v2 21/24] page_pool: Pass a netmem to init_callback() Matthew Wilcox (Oracle)
2023-01-06 16:02 ` Jesper Dangaard Brouer
2023-01-10 11:32 ` Ilias Apalodimas
2023-01-05 21:46 ` [PATCH v2 22/24] net: Add support for netmem in skb_frag Matthew Wilcox (Oracle)
2023-01-05 21:46 ` [PATCH v2 23/24] mvneta: Convert to netmem Matthew Wilcox (Oracle)
2023-01-05 21:46 ` [PATCH v2 24/24] mlx5: " Matthew Wilcox (Oracle)
2023-01-06 16:31 ` Jesper Dangaard Brouer
2023-01-09 11:46 ` Tariq Toukan
2023-01-09 12:27 ` Tariq Toukan
2023-01-06 1:20 ` [PATCH v2 00/24] Split netmem from struct page Jesse Brandeburg
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=Y702q7mSaunHCyhS@hera \
--to=ilias.apalodimas@linaro.org \
--cc=hawk@kernel.org \
--cc=linux-mm@kvack.org \
--cc=netdev@vger.kernel.org \
--cc=shakeelb@google.com \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).