bpf.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Björn Töpel" <bjorn.topel@intel.com>
To: Magnus Karlsson <magnus.karlsson@intel.com>,
	ast@kernel.org, daniel@iogearbox.net, netdev@vger.kernel.org,
	jonathan.lemon@gmail.com, maximmi@mellanox.com
Cc: bpf@vger.kernel.org, jeffrey.t.kirsher@intel.com,
	anthony.l.nguyen@intel.com, maciej.fijalkowski@intel.com,
	maciejromanfijalkowski@gmail.com, cristian.dumitrescu@intel.com
Subject: Re: [PATCH bpf-next v4 06/14] xsk: move xsk_tx_list and its lock to buffer pool
Date: Tue, 28 Jul 2020 09:10:42 +0200	[thread overview]
Message-ID: <319507fc-eef9-11bb-5e79-7578ce21bd6f@intel.com> (raw)
In-Reply-To: <1595307848-20719-7-git-send-email-magnus.karlsson@intel.com>



On 2020-07-21 07:04, Magnus Karlsson wrote:
> Move the xsk_tx_list and the xsk_tx_list_lock from the umem to
> the buffer pool. This so that we in a later commit can share the
> umem between multiple HW queues. There is one xsk_tx_list per
> device and queue id, so it should be located in the buffer pool.
> 
> Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com>

Acked-by: Björn Töpel <bjorn.topel@intel.com>

> ---
>   include/net/xdp_sock.h      |  4 +---
>   include/net/xsk_buff_pool.h |  5 +++++
>   net/xdp/xdp_umem.c          | 26 --------------------------
>   net/xdp/xdp_umem.h          |  2 --
>   net/xdp/xsk.c               | 13 ++++++-------
>   net/xdp/xsk_buff_pool.c     | 26 ++++++++++++++++++++++++++
>   6 files changed, 38 insertions(+), 38 deletions(-)
> 
> diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
> index b052f1c..9a61d05 100644
> --- a/include/net/xdp_sock.h
> +++ b/include/net/xdp_sock.h
> @@ -29,8 +29,6 @@ struct xdp_umem {
>   	u8 flags;
>   	int id;
>   	bool zc;
> -	spinlock_t xsk_tx_list_lock;
> -	struct list_head xsk_tx_list;
>   };
>   
>   struct xsk_map {
> @@ -57,7 +55,7 @@ struct xdp_sock {
>   	/* Protects multiple processes in the control path */
>   	struct mutex mutex;
>   	struct xsk_queue *tx ____cacheline_aligned_in_smp;
> -	struct list_head list;
> +	struct list_head tx_list;
>   	/* Mutual exclusion of NAPI TX thread and sendmsg error paths
>   	 * in the SKB destructor callback.
>   	 */
> diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
> index 2d94890..83f100c 100644
> --- a/include/net/xsk_buff_pool.h
> +++ b/include/net/xsk_buff_pool.h
> @@ -52,6 +52,9 @@ struct xsk_buff_pool {
>   	void *addrs;
>   	struct device *dev;
>   	struct net_device *netdev;
> +	struct list_head xsk_tx_list;
> +	/* Protects modifications to the xsk_tx_list */
> +	spinlock_t xsk_tx_list_lock;
>   	refcount_t users;
>   	struct work_struct work;
>   	struct xdp_buff_xsk *free_heads[];
> @@ -67,6 +70,8 @@ void xp_release(struct xdp_buff_xsk *xskb);
>   void xp_get_pool(struct xsk_buff_pool *pool);
>   void xp_put_pool(struct xsk_buff_pool *pool);
>   void xp_clear_dev(struct xsk_buff_pool *pool);
> +void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
> +void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
>   
>   /* AF_XDP, and XDP core. */
>   void xp_free(struct xdp_buff_xsk *xskb);
> diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
> index b1699d0..a871c75 100644
> --- a/net/xdp/xdp_umem.c
> +++ b/net/xdp/xdp_umem.c
> @@ -23,30 +23,6 @@
>   
>   static DEFINE_IDA(umem_ida);
>   
> -void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
> -{
> -	unsigned long flags;
> -
> -	if (!xs->tx)
> -		return;
> -
> -	spin_lock_irqsave(&umem->xsk_tx_list_lock, flags);
> -	list_add_rcu(&xs->list, &umem->xsk_tx_list);
> -	spin_unlock_irqrestore(&umem->xsk_tx_list_lock, flags);
> -}
> -
> -void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
> -{
> -	unsigned long flags;
> -
> -	if (!xs->tx)
> -		return;
> -
> -	spin_lock_irqsave(&umem->xsk_tx_list_lock, flags);
> -	list_del_rcu(&xs->list);
> -	spin_unlock_irqrestore(&umem->xsk_tx_list_lock, flags);
> -}
> -
>   static void xdp_umem_unpin_pages(struct xdp_umem *umem)
>   {
>   	unpin_user_pages_dirty_lock(umem->pgs, umem->npgs, true);
> @@ -206,8 +182,6 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
>   	umem->pgs = NULL;
>   	umem->user = NULL;
>   	umem->flags = mr->flags;
> -	INIT_LIST_HEAD(&umem->xsk_tx_list);
> -	spin_lock_init(&umem->xsk_tx_list_lock);
>   
>   	refcount_set(&umem->users, 1);
>   
> diff --git a/net/xdp/xdp_umem.h b/net/xdp/xdp_umem.h
> index 67bf3f3..181fdda 100644
> --- a/net/xdp/xdp_umem.h
> +++ b/net/xdp/xdp_umem.h
> @@ -10,8 +10,6 @@
>   
>   void xdp_get_umem(struct xdp_umem *umem);
>   void xdp_put_umem(struct xdp_umem *umem);
> -void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs);
> -void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs);
>   struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr);
>   
>   #endif /* XDP_UMEM_H_ */
> diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
> index 624d0fc..d0ff5e8 100644
> --- a/net/xdp/xsk.c
> +++ b/net/xdp/xsk.c
> @@ -57,7 +57,7 @@ void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
>   		return;
>   
>   	rcu_read_lock();
> -	list_for_each_entry_rcu(xs, &xs->umem->xsk_tx_list, list) {
> +	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
>   		xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
>   	}
>   	rcu_read_unlock();
> @@ -84,7 +84,7 @@ void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
>   		return;
>   
>   	rcu_read_lock();
> -	list_for_each_entry_rcu(xs, &xs->umem->xsk_tx_list, list) {
> +	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
>   		xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
>   	}
>   	rcu_read_unlock();
> @@ -300,7 +300,7 @@ void xsk_tx_release(struct xsk_buff_pool *pool)
>   	struct xdp_sock *xs;
>   
>   	rcu_read_lock();
> -	list_for_each_entry_rcu(xs, &pool->umem->xsk_tx_list, list) {
> +	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
>   		__xskq_cons_release(xs->tx);
>   		xs->sk.sk_write_space(&xs->sk);
>   	}
> @@ -310,11 +310,10 @@ EXPORT_SYMBOL(xsk_tx_release);
>   
>   bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
>   {
> -	struct xdp_umem *umem = pool->umem;
>   	struct xdp_sock *xs;
>   
>   	rcu_read_lock();
> -	list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) {
> +	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
>   		if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
>   			xs->tx->queue_empty_descs++;
>   			continue;
> @@ -522,7 +521,7 @@ static void xsk_unbind_dev(struct xdp_sock *xs)
>   	WRITE_ONCE(xs->state, XSK_UNBOUND);
>   
>   	/* Wait for driver to stop using the xdp socket. */
> -	xdp_del_sk_umem(xs->umem, xs);
> +	xp_del_xsk(xs->pool, xs);
>   	xs->dev = NULL;
>   	synchronize_net();
>   	dev_put(dev);
> @@ -742,7 +741,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
>   	xs->dev = dev;
>   	xs->zc = xs->umem->zc;
>   	xs->queue_id = qid;
> -	xdp_add_sk_umem(xs->umem, xs);
> +	xp_add_xsk(xs->pool, xs);
>   
>   out_unlock:
>   	if (err) {
> diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
> index 436648a..dbd913e 100644
> --- a/net/xdp/xsk_buff_pool.c
> +++ b/net/xdp/xsk_buff_pool.c
> @@ -11,6 +11,30 @@
>   #include "xdp_umem.h"
>   #include "xsk.h"
>   
> +void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs)
> +{
> +	unsigned long flags;
> +
> +	if (!xs->tx)
> +		return;
> +
> +	spin_lock_irqsave(&pool->xsk_tx_list_lock, flags);
> +	list_add_rcu(&xs->tx_list, &pool->xsk_tx_list);
> +	spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags);
> +}
> +
> +void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs)
> +{
> +	unsigned long flags;
> +
> +	if (!xs->tx)
> +		return;
> +
> +	spin_lock_irqsave(&pool->xsk_tx_list_lock, flags);
> +	list_del_rcu(&xs->tx_list);
> +	spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags);
> +}
> +
>   static void xp_addr_unmap(struct xsk_buff_pool *pool)
>   {
>   	vunmap(pool->addrs);
> @@ -63,6 +87,8 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
>   		XDP_PACKET_HEADROOM;
>   	pool->umem = umem;
>   	INIT_LIST_HEAD(&pool->free_list);
> +	INIT_LIST_HEAD(&pool->xsk_tx_list);
> +	spin_lock_init(&pool->xsk_tx_list_lock);
>   	refcount_set(&pool->users, 1);
>   
>   	pool->fq = xs->fq_tmp;
> 

  reply	other threads:[~2020-07-28  7:10 UTC|newest]

Thread overview: 34+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-21  5:03 [PATCH bpf-next v4 00/14] xsk: support shared umems between devices and queues Magnus Karlsson
2020-07-21  5:03 ` [PATCH bpf-next v4 01/14] xsk: i40e: ice: ixgbe: mlx5: pass buffer pool to driver instead of umem Magnus Karlsson
2020-07-28  7:04   ` Björn Töpel
2020-07-21  5:03 ` [PATCH bpf-next v4 02/14] xsk: i40e: ice: ixgbe: mlx5: rename xsk zero-copy driver interfaces Magnus Karlsson
2020-07-28  7:04   ` Björn Töpel
2020-07-21  5:03 ` [PATCH bpf-next v4 03/14] xsk: create and free buffer pool independently from umem Magnus Karlsson
2020-07-28  7:05   ` Björn Töpel
2020-07-21  5:03 ` [PATCH bpf-next v4 04/14] xsk: move fill and completion rings to buffer pool Magnus Karlsson
2020-07-28  7:05   ` Björn Töpel
2020-07-21  5:03 ` [PATCH bpf-next v4 05/14] xsk: move queue_id, dev and need_wakeup " Magnus Karlsson
2020-07-28  7:09   ` Björn Töpel
2020-07-29 13:20     ` Magnus Karlsson
2020-07-28  9:21   ` Maxim Mikityanskiy
2020-07-29 13:21     ` Magnus Karlsson
2020-07-21  5:04 ` [PATCH bpf-next v4 06/14] xsk: move xsk_tx_list and its lock " Magnus Karlsson
2020-07-28  7:10   ` Björn Töpel [this message]
2020-07-21  5:04 ` [PATCH bpf-next v4 07/14] xsk: move addrs from buffer pool to umem Magnus Karlsson
2020-07-28  7:11   ` Björn Töpel
2020-07-21  5:04 ` [PATCH bpf-next v4 08/14] xsk: enable sharing of dma mappings Magnus Karlsson
2020-07-28  7:14   ` Björn Töpel
2020-07-28  8:59   ` Maxim Mikityanskiy
2020-07-29 13:22     ` Magnus Karlsson
2020-07-21  5:04 ` [PATCH bpf-next v4 09/14] xsk: rearrange internal structs for better performance Magnus Karlsson
2020-07-28  7:14   ` Björn Töpel
2020-07-21  5:04 ` [PATCH bpf-next v4 10/14] xsk: add shared umem support between queue ids Magnus Karlsson
2020-07-28  7:15   ` Björn Töpel
2020-07-21  5:04 ` [PATCH bpf-next v4 11/14] xsk: add shared umem support between devices Magnus Karlsson
2020-07-28  7:15   ` Björn Töpel
2020-07-21  5:04 ` [PATCH bpf-next v4 12/14] libbpf: support shared umems between queues and devices Magnus Karlsson
2020-07-28  7:18   ` Björn Töpel
2020-07-21  5:04 ` [PATCH bpf-next v4 13/14] samples/bpf: add new sample xsk_fwd.c Magnus Karlsson
2020-07-28  7:18   ` Björn Töpel
2020-07-21  5:04 ` [PATCH bpf-next v4 14/14] xsk: documentation for XDP_SHARED_UMEM between queues and netdevs Magnus Karlsson
2020-07-28  7:18   ` Björn Töpel

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=319507fc-eef9-11bb-5e79-7578ce21bd6f@intel.com \
    --to=bjorn.topel@intel.com \
    --cc=anthony.l.nguyen@intel.com \
    --cc=ast@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=cristian.dumitrescu@intel.com \
    --cc=daniel@iogearbox.net \
    --cc=jeffrey.t.kirsher@intel.com \
    --cc=jonathan.lemon@gmail.com \
    --cc=maciej.fijalkowski@intel.com \
    --cc=maciejromanfijalkowski@gmail.com \
    --cc=magnus.karlsson@intel.com \
    --cc=maximmi@mellanox.com \
    --cc=netdev@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).