netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
To: davem@davemloft.net, grygorii.strashko@ti.com, hawk@kernel.org,
	brouer@redhat.com, saeedm@mellanox.com, leon@kernel.org
Cc: ast@kernel.org, linux-kernel@vger.kernel.org,
	linux-omap@vger.kernel.org, xdp-newbies@vger.kernel.org,
	ilias.apalodimas@linaro.org, netdev@vger.kernel.org,
	daniel@iogearbox.net, jakub.kicinski@netronome.com,
	john.fastabend@gmail.com,
	Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
Subject: [PATCH v4 net-next 1/4] net: core: page_pool: add user cnt preventing pool deletion
Date: Tue, 25 Jun 2019 20:59:45 +0300	[thread overview]
Message-ID: <20190625175948.24771-2-ivan.khoronzhuk@linaro.org> (raw)
In-Reply-To: <20190625175948.24771-1-ivan.khoronzhuk@linaro.org>

Add user counter allowing to delete pool only when no users.
It doesn't prevent pool from flush, only prevents freeing the
pool instance. Helps when no need to delete the pool and now
it's user responsibility to free it by calling page_pool_free()
while destroying procedure. It also makes to use page_pool_free()
explicitly, not fully hidden in xdp unreg, which looks more
correct after page pool "create" routine.

Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
---
 drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 8 +++++---
 include/net/page_pool.h                           | 7 +++++++
 net/core/page_pool.c                              | 7 +++++++
 net/core/xdp.c                                    | 3 +++
 4 files changed, 22 insertions(+), 3 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 5e40db8f92e6..cb028de64a1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -545,10 +545,8 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
 	}
 	err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
 					 MEM_TYPE_PAGE_POOL, rq->page_pool);
-	if (err) {
-		page_pool_free(rq->page_pool);
+	if (err)
 		goto err_free;
-	}
 
 	for (i = 0; i < wq_sz; i++) {
 		if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
@@ -613,6 +611,8 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
 	if (rq->xdp_prog)
 		bpf_prog_put(rq->xdp_prog);
 	xdp_rxq_info_unreg(&rq->xdp_rxq);
+	if (rq->page_pool)
+		page_pool_free(rq->page_pool);
 	mlx5_wq_destroy(&rq->wq_ctrl);
 
 	return err;
@@ -643,6 +643,8 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
 	}
 
 	xdp_rxq_info_unreg(&rq->xdp_rxq);
+	if (rq->page_pool)
+		page_pool_free(rq->page_pool);
 	mlx5_wq_destroy(&rq->wq_ctrl);
 }
 
diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index f07c518ef8a5..1ec838e9927e 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -101,6 +101,7 @@ struct page_pool {
 	struct ptr_ring ring;
 
 	atomic_t pages_state_release_cnt;
+	atomic_t user_cnt;
 };
 
 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
@@ -183,6 +184,12 @@ static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
 	return page->dma_addr;
 }
 
+/* used to prevent pool from deallocation */
+static inline void page_pool_get(struct page_pool *pool)
+{
+	atomic_inc(&pool->user_cnt);
+}
+
 static inline bool is_page_pool_compiled_in(void)
 {
 #ifdef CONFIG_PAGE_POOL
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index b366f59885c1..169b0e3c870e 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -48,6 +48,7 @@ static int page_pool_init(struct page_pool *pool,
 		return -ENOMEM;
 
 	atomic_set(&pool->pages_state_release_cnt, 0);
+	atomic_set(&pool->user_cnt, 0);
 
 	if (pool->p.flags & PP_FLAG_DMA_MAP)
 		get_device(pool->p.dev);
@@ -70,6 +71,8 @@ struct page_pool *page_pool_create(const struct page_pool_params *params)
 		kfree(pool);
 		return ERR_PTR(err);
 	}
+
+	page_pool_get(pool);
 	return pool;
 }
 EXPORT_SYMBOL(page_pool_create);
@@ -356,6 +359,10 @@ static void __warn_in_flight(struct page_pool *pool)
 
 void __page_pool_free(struct page_pool *pool)
 {
+	/* free only if no users */
+	if (!atomic_dec_and_test(&pool->user_cnt))
+		return;
+
 	WARN(pool->alloc.count, "API usage violation");
 	WARN(!ptr_ring_empty(&pool->ring), "ptr_ring is not empty");
 
diff --git a/net/core/xdp.c b/net/core/xdp.c
index 829377cc83db..04bdcd784d2e 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -372,6 +372,9 @@ int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
 
 	mutex_unlock(&mem_id_lock);
 
+	if (type == MEM_TYPE_PAGE_POOL)
+		page_pool_get(xdp_alloc->page_pool);
+
 	trace_mem_connect(xdp_alloc, xdp_rxq);
 	return 0;
 err:
-- 
2.17.1


  reply	other threads:[~2019-06-25 18:00 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-06-25 17:59 [PATCH v4 net-next 0/4] net: ethernet: ti: cpsw: Add XDP support Ivan Khoronzhuk
2019-06-25 17:59 ` Ivan Khoronzhuk [this message]
2019-06-26  1:36   ` [PATCH v4 net-next 1/4] net: core: page_pool: add user cnt preventing pool deletion Willem de Bruijn
2019-06-26 14:01     ` Ivan Khoronzhuk
2019-06-26 10:42   ` Jesper Dangaard Brouer
2019-06-26 10:49     ` Ivan Khoronzhuk
2019-06-26 11:51       ` Jesper Dangaard Brouer
2019-06-26 12:39         ` Ivan Khoronzhuk
2019-06-27 19:44   ` Jesper Dangaard Brouer
2019-06-27 22:02     ` Ivan Khoronzhuk
2019-06-28  6:35       ` Jesper Dangaard Brouer
2019-06-28  8:53         ` Ivan Khoronzhuk
2019-06-25 17:59 ` [PATCH v4 net-next 2/4] net: ethernet: ti: davinci_cpdma: add dma mapped submit Ivan Khoronzhuk
2019-06-25 17:59 ` [PATCH v4 net-next 3/4] net: ethernet: ti: davinci_cpdma: return handler status Ivan Khoronzhuk
2019-06-26  2:17   ` Willem de Bruijn
2019-06-25 17:59 ` [PATCH v4 net-next 4/4] net: ethernet: ti: cpsw: add XDP support Ivan Khoronzhuk
2019-06-25 20:46 ` [PATCH v4 net-next 0/4] net: ethernet: ti: cpsw: Add " David Miller

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190625175948.24771-2-ivan.khoronzhuk@linaro.org \
    --to=ivan.khoronzhuk@linaro.org \
    --cc=ast@kernel.org \
    --cc=brouer@redhat.com \
    --cc=daniel@iogearbox.net \
    --cc=davem@davemloft.net \
    --cc=grygorii.strashko@ti.com \
    --cc=hawk@kernel.org \
    --cc=ilias.apalodimas@linaro.org \
    --cc=jakub.kicinski@netronome.com \
    --cc=john.fastabend@gmail.com \
    --cc=leon@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-omap@vger.kernel.org \
    --cc=netdev@vger.kernel.org \
    --cc=saeedm@mellanox.com \
    --cc=xdp-newbies@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).