bpf.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Björn Töpel" <bjorn.topel@intel.com>
To: Magnus Karlsson <magnus.karlsson@intel.com>,
	ast@kernel.org, daniel@iogearbox.net, netdev@vger.kernel.org,
	jonathan.lemon@gmail.com, maximmi@mellanox.com
Cc: bpf@vger.kernel.org, jeffrey.t.kirsher@intel.com,
	anthony.l.nguyen@intel.com, maciej.fijalkowski@intel.com,
	maciejromanfijalkowski@gmail.com, cristian.dumitrescu@intel.com
Subject: Re: [PATCH bpf-next v4 09/14] xsk: rearrange internal structs for better performance
Date: Tue, 28 Jul 2020 09:14:54 +0200	[thread overview]
Message-ID: <141bf975-52be-325b-7c48-a0c611695e19@intel.com> (raw)
In-Reply-To: <1595307848-20719-10-git-send-email-magnus.karlsson@intel.com>



On 2020-07-21 07:04, Magnus Karlsson wrote:
> Rearrange the xdp_sock, xdp_umem and xsk_buff_pool structures so
> that they get smaller and align better to the cache lines. In the
> previous commits of this patch set, these structs have been
> reordered with the focus on functionality and simplicity, not
> performance. This patch improves throughput performance by around
> 3%.
> 
> Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com>

Acked-by: Björn Töpel <bjorn.topel@intel.com>


> ---
>   include/net/xdp_sock.h      | 13 +++++++------
>   include/net/xsk_buff_pool.h | 27 +++++++++++++++------------
>   2 files changed, 22 insertions(+), 18 deletions(-)
> 
> diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
> index 282aeba..1a9559c 100644
> --- a/include/net/xdp_sock.h
> +++ b/include/net/xdp_sock.h
> @@ -23,13 +23,13 @@ struct xdp_umem {
>   	u32 headroom;
>   	u32 chunk_size;
>   	u32 chunks;
> +	u32 npgs;
>   	struct user_struct *user;
>   	refcount_t users;
> -	struct page **pgs;
> -	u32 npgs;
>   	u8 flags;
> -	int id;
>   	bool zc;
> +	struct page **pgs;
> +	int id;
>   	struct list_head xsk_dma_list;
>   };
>   
> @@ -42,7 +42,7 @@ struct xsk_map {
>   struct xdp_sock {
>   	/* struct sock must be the first member of struct xdp_sock */
>   	struct sock sk;
> -	struct xsk_queue *rx;
> +	struct xsk_queue *rx ____cacheline_aligned_in_smp;
>   	struct net_device *dev;
>   	struct xdp_umem *umem;
>   	struct list_head flush_node;
> @@ -54,8 +54,7 @@ struct xdp_sock {
>   		XSK_BOUND,
>   		XSK_UNBOUND,
>   	} state;
> -	/* Protects multiple processes in the control path */
> -	struct mutex mutex;
> +
>   	struct xsk_queue *tx ____cacheline_aligned_in_smp;
>   	struct list_head tx_list;
>   	/* Mutual exclusion of NAPI TX thread and sendmsg error paths
> @@ -72,6 +71,8 @@ struct xdp_sock {
>   	struct list_head map_list;
>   	/* Protects map_list */
>   	spinlock_t map_list_lock;
> +	/* Protects multiple processes in the control path */
> +	struct mutex mutex;
>   	struct xsk_queue *fq_tmp; /* Only as tmp storage before bind */
>   	struct xsk_queue *cq_tmp; /* Only as tmp storage before bind */
>   };
> diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
> index 8f1dc4c..b4d6307 100644
> --- a/include/net/xsk_buff_pool.h
> +++ b/include/net/xsk_buff_pool.h
> @@ -36,34 +36,37 @@ struct xsk_dma_map {
>   };
>   
>   struct xsk_buff_pool {
> -	struct xsk_queue *fq;
> -	struct xsk_queue *cq;
> +	/* Members only used in the control path first. */
> +	struct device *dev;
> +	struct net_device *netdev;
> +	struct list_head xsk_tx_list;
> +	/* Protects modifications to the xsk_tx_list */
> +	spinlock_t xsk_tx_list_lock;
> +	refcount_t users;
> +	struct xdp_umem *umem;
> +	struct work_struct work;
>   	struct list_head free_list;
> +	u32 heads_cnt;
> +	u16 queue_id;
> +
> +	/* Data path members as close to free_heads at the end as possible. */
> +	struct xsk_queue *fq ____cacheline_aligned_in_smp;
> +	struct xsk_queue *cq;
>   	dma_addr_t *dma_pages;
>   	struct xdp_buff_xsk *heads;
>   	u64 chunk_mask;
>   	u64 addrs_cnt;
>   	u32 free_list_cnt;
>   	u32 dma_pages_cnt;
> -	u32 heads_cnt;
>   	u32 free_heads_cnt;
>   	u32 headroom;
>   	u32 chunk_size;
>   	u32 frame_len;
> -	u16 queue_id;
>   	u8 cached_need_wakeup;
>   	bool uses_need_wakeup;
>   	bool dma_need_sync;
>   	bool unaligned;
> -	struct xdp_umem *umem;
>   	void *addrs;
> -	struct device *dev;
> -	struct net_device *netdev;
> -	struct list_head xsk_tx_list;
> -	/* Protects modifications to the xsk_tx_list */
> -	spinlock_t xsk_tx_list_lock;
> -	refcount_t users;
> -	struct work_struct work;
>   	struct xdp_buff_xsk *free_heads[];
>   };
>   
> 

  reply	other threads:[~2020-07-28  7:14 UTC|newest]

Thread overview: 34+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-21  5:03 [PATCH bpf-next v4 00/14] xsk: support shared umems between devices and queues Magnus Karlsson
2020-07-21  5:03 ` [PATCH bpf-next v4 01/14] xsk: i40e: ice: ixgbe: mlx5: pass buffer pool to driver instead of umem Magnus Karlsson
2020-07-28  7:04   ` Björn Töpel
2020-07-21  5:03 ` [PATCH bpf-next v4 02/14] xsk: i40e: ice: ixgbe: mlx5: rename xsk zero-copy driver interfaces Magnus Karlsson
2020-07-28  7:04   ` Björn Töpel
2020-07-21  5:03 ` [PATCH bpf-next v4 03/14] xsk: create and free buffer pool independently from umem Magnus Karlsson
2020-07-28  7:05   ` Björn Töpel
2020-07-21  5:03 ` [PATCH bpf-next v4 04/14] xsk: move fill and completion rings to buffer pool Magnus Karlsson
2020-07-28  7:05   ` Björn Töpel
2020-07-21  5:03 ` [PATCH bpf-next v4 05/14] xsk: move queue_id, dev and need_wakeup " Magnus Karlsson
2020-07-28  7:09   ` Björn Töpel
2020-07-29 13:20     ` Magnus Karlsson
2020-07-28  9:21   ` Maxim Mikityanskiy
2020-07-29 13:21     ` Magnus Karlsson
2020-07-21  5:04 ` [PATCH bpf-next v4 06/14] xsk: move xsk_tx_list and its lock " Magnus Karlsson
2020-07-28  7:10   ` Björn Töpel
2020-07-21  5:04 ` [PATCH bpf-next v4 07/14] xsk: move addrs from buffer pool to umem Magnus Karlsson
2020-07-28  7:11   ` Björn Töpel
2020-07-21  5:04 ` [PATCH bpf-next v4 08/14] xsk: enable sharing of dma mappings Magnus Karlsson
2020-07-28  7:14   ` Björn Töpel
2020-07-28  8:59   ` Maxim Mikityanskiy
2020-07-29 13:22     ` Magnus Karlsson
2020-07-21  5:04 ` [PATCH bpf-next v4 09/14] xsk: rearrange internal structs for better performance Magnus Karlsson
2020-07-28  7:14   ` Björn Töpel [this message]
2020-07-21  5:04 ` [PATCH bpf-next v4 10/14] xsk: add shared umem support between queue ids Magnus Karlsson
2020-07-28  7:15   ` Björn Töpel
2020-07-21  5:04 ` [PATCH bpf-next v4 11/14] xsk: add shared umem support between devices Magnus Karlsson
2020-07-28  7:15   ` Björn Töpel
2020-07-21  5:04 ` [PATCH bpf-next v4 12/14] libbpf: support shared umems between queues and devices Magnus Karlsson
2020-07-28  7:18   ` Björn Töpel
2020-07-21  5:04 ` [PATCH bpf-next v4 13/14] samples/bpf: add new sample xsk_fwd.c Magnus Karlsson
2020-07-28  7:18   ` Björn Töpel
2020-07-21  5:04 ` [PATCH bpf-next v4 14/14] xsk: documentation for XDP_SHARED_UMEM between queues and netdevs Magnus Karlsson
2020-07-28  7:18   ` Björn Töpel

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=141bf975-52be-325b-7c48-a0c611695e19@intel.com \
    --to=bjorn.topel@intel.com \
    --cc=anthony.l.nguyen@intel.com \
    --cc=ast@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=cristian.dumitrescu@intel.com \
    --cc=daniel@iogearbox.net \
    --cc=jeffrey.t.kirsher@intel.com \
    --cc=jonathan.lemon@gmail.com \
    --cc=maciej.fijalkowski@intel.com \
    --cc=maciejromanfijalkowski@gmail.com \
    --cc=magnus.karlsson@intel.com \
    --cc=maximmi@mellanox.com \
    --cc=netdev@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).