From: Jakub Kicinski <jakub.kicinski@netronome.com>
To: Kevin Laatz <kevin.laatz@intel.com>
Cc: netdev@vger.kernel.org, ast@kernel.org, daniel@iogearbox.net,
bjorn.topel@intel.com, magnus.karlsson@intel.com,
jonathan.lemon@gmail.com, saeedm@mellanox.com,
maximmi@mellanox.com, stephen@networkplumber.org,
bruce.richardson@intel.com, ciara.loftus@intel.com,
bpf@vger.kernel.org, intel-wired-lan@lists.osuosl.org
Subject: Re: [PATCH bpf-next v3 03/11] xsk: add support to allow unaligned chunk placement
Date: Wed, 24 Jul 2019 19:22:53 -0700 [thread overview]
Message-ID: <20190724192253.00ac07bd@cakuba.netronome.com> (raw)
In-Reply-To: <20190724051043.14348-4-kevin.laatz@intel.com>
On Wed, 24 Jul 2019 05:10:35 +0000, Kevin Laatz wrote:
> Currently, addresses are chunk size aligned. This means, we are very
> restricted in terms of where we can place chunk within the umem. For
> example, if we have a chunk size of 2k, then our chunks can only be placed
> at 0,2k,4k,6k,8k... and so on (ie. every 2k starting from 0).
>
> This patch introduces the ability to use unaligned chunks. With these
> changes, we are no longer bound to having to place chunks at a 2k (or
> whatever your chunk size is) interval. Since we are no longer dealing with
> aligned chunks, they can now cross page boundaries. Checks for page
> contiguity have been added in order to keep track of which pages are
> followed by a physically contiguous page.
>
> Signed-off-by: Kevin Laatz <kevin.laatz@intel.com>
> Signed-off-by: Ciara Loftus <ciara.loftus@intel.com>
> Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
>
> ---
> v2:
> - Add checks for the flags coming from userspace
> - Fix how we get chunk_size in xsk_diag.c
> - Add defines for masking the new descriptor format
> - Modified the rx functions to use new descriptor format
> - Modified the tx functions to use new descriptor format
>
> v3:
> - Add helper function to do address/offset masking/addition
> ---
> include/net/xdp_sock.h | 17 ++++++++
> include/uapi/linux/if_xdp.h | 9 ++++
> net/xdp/xdp_umem.c | 18 +++++---
> net/xdp/xsk.c | 86 ++++++++++++++++++++++++++++++-------
> net/xdp/xsk_diag.c | 2 +-
> net/xdp/xsk_queue.h | 68 +++++++++++++++++++++++++----
> 6 files changed, 170 insertions(+), 30 deletions(-)
>
> diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
> index 69796d264f06..738996c0f995 100644
> --- a/include/net/xdp_sock.h
> +++ b/include/net/xdp_sock.h
> @@ -19,6 +19,7 @@ struct xsk_queue;
> struct xdp_umem_page {
> void *addr;
> dma_addr_t dma;
> + bool next_pg_contig;
IIRC accesses to xdp_umem_page case a lot of cache misses, so having
this structure grow from 16 to 24B is a little unfortunate :(
Can we try to steal lower bits of addr or dma? Or perhaps not pre
compute this info at all?
> };
>
> struct xdp_umem_fq_reuse {
> @@ -48,6 +49,7 @@ struct xdp_umem {
> bool zc;
> spinlock_t xsk_list_lock;
> struct list_head xsk_list;
> + u32 flags;
> };
>
> struct xdp_sock {
> @@ -144,6 +146,15 @@ static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
>
> rq->handles[rq->length++] = addr;
> }
> +
> +static inline u64 xsk_umem_handle_offset(struct xdp_umem *umem, u64 handle,
> + u64 offset)
> +{
> + if (umem->flags & XDP_UMEM_UNALIGNED_CHUNKS)
> + return handle |= (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
> + else
> + return handle += offset;
> +}
> #else
> static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
> {
> @@ -241,6 +252,12 @@ static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
> {
> }
>
> +static inline u64 xsk_umem_handle_offset(struct xdp_umem *umem, u64 handle,
> + u64 offset)
> +{
> + return NULL;
return 0?
> +}
> +
> #endif /* CONFIG_XDP_SOCKETS */
>
> #endif /* _LINUX_XDP_SOCK_H */
> diff --git a/include/uapi/linux/if_xdp.h b/include/uapi/linux/if_xdp.h
> index faaa5ca2a117..f8dc68fcdf78 100644
> --- a/include/uapi/linux/if_xdp.h
> +++ b/include/uapi/linux/if_xdp.h
> @@ -17,6 +17,9 @@
> #define XDP_COPY (1 << 1) /* Force copy-mode */
> #define XDP_ZEROCOPY (1 << 2) /* Force zero-copy mode */
>
> +/* Flags for xsk_umem_config flags */
> +#define XDP_UMEM_UNALIGNED_CHUNKS (1 << 0)
> +
> struct sockaddr_xdp {
> __u16 sxdp_family;
> __u16 sxdp_flags;
> @@ -53,6 +56,7 @@ struct xdp_umem_reg {
> __u64 len; /* Length of packet data area */
> __u32 chunk_size;
> __u32 headroom;
> + __u32 flags;
> };
>
> struct xdp_statistics {
> @@ -74,6 +78,11 @@ struct xdp_options {
> #define XDP_UMEM_PGOFF_FILL_RING 0x100000000ULL
> #define XDP_UMEM_PGOFF_COMPLETION_RING 0x180000000ULL
>
> +/* Masks for unaligned chunks mode */
> +#define XSK_UNALIGNED_BUF_OFFSET_SHIFT 48
> +#define XSK_UNALIGNED_BUF_ADDR_MASK \
> + ((1ULL << XSK_UNALIGNED_BUF_OFFSET_SHIFT) - 1)
> +
> /* Rx/Tx descriptor */
> struct xdp_desc {
> __u64 addr;
> diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
> index 83de74ca729a..952ca22103e9 100644
> --- a/net/xdp/xdp_umem.c
> +++ b/net/xdp/xdp_umem.c
> @@ -299,6 +299,7 @@ static int xdp_umem_account_pages(struct xdp_umem *umem)
>
> static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
> {
> + bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNKS;
> u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
> unsigned int chunks, chunks_per_page;
> u64 addr = mr->addr, size = mr->len;
> @@ -314,7 +315,10 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
> return -EINVAL;
> }
>
> - if (!is_power_of_2(chunk_size))
> + if (mr->flags & ~(XDP_UMEM_UNALIGNED_CHUNKS))
parens unnecessary, consider adding a define for known flags.
> + return -EINVAL;
> +
> + if (!unaligned_chunks && !is_power_of_2(chunk_size))
> return -EINVAL;
>
> if (!PAGE_ALIGNED(addr)) {
> @@ -331,9 +335,11 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
> if (chunks == 0)
> return -EINVAL;
>
> - chunks_per_page = PAGE_SIZE / chunk_size;
> - if (chunks < chunks_per_page || chunks % chunks_per_page)
> - return -EINVAL;
> + if (!unaligned_chunks) {
> + chunks_per_page = PAGE_SIZE / chunk_size;
> + if (chunks < chunks_per_page || chunks % chunks_per_page)
> + return -EINVAL;
> + }
>
> headroom = ALIGN(headroom, 64);
>
> @@ -342,13 +348,15 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
> return -EINVAL;
>
> umem->address = (unsigned long)addr;
> - umem->chunk_mask = ~((u64)chunk_size - 1);
> + umem->chunk_mask = unaligned_chunks ? XSK_UNALIGNED_BUF_ADDR_MASK
> + : ~((u64)chunk_size - 1);
> umem->size = size;
> umem->headroom = headroom;
> umem->chunk_size_nohr = chunk_size - headroom;
> umem->npgs = size / PAGE_SIZE;
> umem->pgs = NULL;
> umem->user = NULL;
> + umem->flags = mr->flags;
> INIT_LIST_HEAD(&umem->xsk_list);
> spin_lock_init(&umem->xsk_list_lock);
>
> diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
> index 59b57d708697..b3ab653091c4 100644
> --- a/net/xdp/xsk.c
> +++ b/net/xdp/xsk.c
> @@ -45,7 +45,7 @@ EXPORT_SYMBOL(xsk_umem_has_addrs);
>
> u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
> {
> - return xskq_peek_addr(umem->fq, addr);
> + return xskq_peek_addr(umem->fq, addr, umem);
> }
> EXPORT_SYMBOL(xsk_umem_peek_addr);
>
> @@ -55,21 +55,42 @@ void xsk_umem_discard_addr(struct xdp_umem *umem)
> }
> EXPORT_SYMBOL(xsk_umem_discard_addr);
>
> +/* If a buffer crosses a page boundary, we need to do 2 memcpy's, one for
> + * each page. This is only required in copy mode.
> + */
> +static void __xsk_rcv_memcpy(struct xdp_umem *umem, u64 addr, void *from_buf,
> + u32 len, u32 metalen)
> +{
> + void *to_buf = xdp_umem_get_data(umem, addr);
> +
> + if (xskq_crosses_non_contig_pg(umem, addr, len + metalen)) {
> + void *next_pg_addr = umem->pages[(addr >> PAGE_SHIFT) + 1].addr;
> + u64 page_start = addr & (PAGE_SIZE - 1);
> + u64 first_len = PAGE_SIZE - (addr - page_start);
> +
> + memcpy(to_buf, from_buf, first_len + metalen);
> + memcpy(next_pg_addr, from_buf + first_len, len - first_len);
> +
> + return;
> + }
> +
> + memcpy(to_buf, from_buf, len + metalen);
> +}
Why handle this case gracefully? Real XSK use is the zero copy mode,
having extra code to make copy mode more permissive seems a little
counter productive IMHO.
> static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
> {
> - void *to_buf, *from_buf;
> + u64 offset = xs->umem->headroom;
> + void *from_buf;
> u32 metalen;
> u64 addr;
> int err;
>
> - if (!xskq_peek_addr(xs->umem->fq, &addr) ||
> + if (!xskq_peek_addr(xs->umem->fq, &addr, xs->umem) ||
> len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
> xs->rx_dropped++;
> return -ENOSPC;
> }
>
> - addr += xs->umem->headroom;
> -
> if (unlikely(xdp_data_meta_unsupported(xdp))) {
> from_buf = xdp->data;
> metalen = 0;
> @@ -78,9 +99,13 @@ static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
> metalen = xdp->data - xdp->data_meta;
> }
>
> - to_buf = xdp_umem_get_data(xs->umem, addr);
> - memcpy(to_buf, from_buf, len + metalen);
> - addr += metalen;
> + __xsk_rcv_memcpy(xs->umem, addr + offset, from_buf, len, metalen);
> +
> + offset += metalen;
> + if (xs->umem->flags & XDP_UMEM_UNALIGNED_CHUNKS)
> + addr |= offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
> + else
> + addr += offset;
> err = xskq_produce_batch_desc(xs->rx, addr, len);
> if (!err) {
> xskq_discard_addr(xs->umem->fq);
> @@ -127,6 +152,7 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
> u32 len = xdp->data_end - xdp->data;
> void *buffer;
> u64 addr;
> + u64 offset = xs->umem->headroom;
reverse xmas tree, please
> int err;
>
> spin_lock_bh(&xs->rx_lock);
next prev parent reply other threads:[~2019-07-25 2:23 UTC|newest]
Thread overview: 136+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-06-20 9:09 [PATCH 00/11] XDP unaligned chunk placement support Kevin Laatz
2019-06-20 9:09 ` [PATCH 01/11] i40e: simplify Rx buffer recycle Kevin Laatz
2019-06-24 14:29 ` Björn Töpel
2019-06-20 9:09 ` [PATCH 02/11] ixgbe: " Kevin Laatz
2019-06-24 14:30 ` Björn Töpel
2019-06-20 9:09 ` [PATCH 03/11] xdp: add offset param to zero_copy_allocator Kevin Laatz
2019-06-24 14:31 ` Björn Töpel
2019-06-24 19:23 ` Jakub Kicinski
2019-06-25 13:14 ` Laatz, Kevin
2019-06-20 9:09 ` [PATCH 04/11] i40e: add offset to zca_free Kevin Laatz
2019-06-24 14:32 ` Björn Töpel
2019-06-20 9:09 ` [PATCH 05/11] ixgbe: " Kevin Laatz
2019-06-24 14:32 ` Björn Töpel
2019-06-20 9:09 ` [PATCH 06/11] xsk: add support to allow unaligned chunk placement Kevin Laatz
2019-06-24 15:29 ` Björn Töpel
2019-06-20 9:09 ` [PATCH 07/11] libbpf: add flags to umem config Kevin Laatz
2019-06-24 15:30 ` Björn Töpel
2019-06-20 9:09 ` [PATCH 08/11] samples/bpf: add unaligned chunks mode support to xdpsock Kevin Laatz
2019-06-24 15:31 ` Björn Töpel
2019-06-20 9:09 ` [PATCH 09/11] samples/bpf: add buffer recycling for unaligned chunks " Kevin Laatz
2019-06-24 15:35 ` Björn Töpel
2019-06-20 9:09 ` [PATCH 10/11] samples/bpf: use hugepages in xdpsock app Kevin Laatz
2019-06-24 15:36 ` Björn Töpel
2019-06-20 9:09 ` [PATCH 11/11] doc/af_xdp: include unaligned chunk case Kevin Laatz
2019-06-24 15:34 ` Björn Töpel
2019-07-16 3:06 ` [PATCH v2 00/10] XDP unaligned chunk placement support Kevin Laatz
2019-07-16 3:06 ` [PATCH v2 01/10] i40e: simplify Rx buffer recycle Kevin Laatz
2019-07-19 17:19 ` [Intel-wired-lan] " Bowers, AndrewX
2019-07-16 3:06 ` [PATCH v2 02/10] ixgbe: " Kevin Laatz
2019-07-19 17:20 ` [Intel-wired-lan] " Bowers, AndrewX
2019-07-16 3:06 ` [PATCH v2 03/10] xsk: add support to allow unaligned chunk placement Kevin Laatz
2019-07-19 17:21 ` [Intel-wired-lan] " Bowers, AndrewX
2019-07-16 3:06 ` [PATCH v2 04/10] i40e: modify driver for handling offsets Kevin Laatz
2019-07-19 17:22 ` [Intel-wired-lan] " Bowers, AndrewX
2019-07-16 3:06 ` [PATCH v2 05/10] ixgbe: " Kevin Laatz
2019-07-19 17:22 ` [Intel-wired-lan] " Bowers, AndrewX
2019-07-16 3:06 ` [PATCH v2 06/10] libbpf: add flags to umem config Kevin Laatz
2019-07-16 3:06 ` [PATCH v2 07/10] samples/bpf: add unaligned chunks mode support to xdpsock Kevin Laatz
2019-07-16 3:06 ` [PATCH v2 08/10] samples/bpf: add buffer recycling for unaligned chunks " Kevin Laatz
2019-07-16 3:06 ` [PATCH v2 09/10] samples/bpf: use hugepages in xdpsock app Kevin Laatz
2019-07-16 3:06 ` [PATCH v2 10/10] doc/af_xdp: include unaligned chunk case Kevin Laatz
2019-07-23 21:08 ` [PATCH v2 00/10] XDP unaligned chunk placement support Alexei Starovoitov
2019-07-24 13:25 ` [Intel-wired-lan] " Magnus Karlsson
2019-07-25 15:43 ` Jonathan Lemon
2019-07-24 5:10 ` [PATCH bpf-next v3 00/11] " Kevin Laatz
2019-07-24 5:10 ` [PATCH bpf-next v3 01/11] i40e: simplify Rx buffer recycle Kevin Laatz
2019-07-24 5:10 ` [PATCH bpf-next v3 02/11] ixgbe: " Kevin Laatz
2019-07-24 5:10 ` [PATCH bpf-next v3 03/11] xsk: add support to allow unaligned chunk placement Kevin Laatz
2019-07-25 2:22 ` Jakub Kicinski [this message]
2019-07-25 17:01 ` Laatz, Kevin
2019-07-25 9:27 ` Maxim Mikityanskiy
2019-07-25 17:00 ` Laatz, Kevin
2019-07-25 10:08 ` Maxim Mikityanskiy
2019-07-25 15:39 ` Jonathan Lemon
2019-07-24 5:10 ` [PATCH bpf-next v3 04/11] i40e: modify driver for handling offsets Kevin Laatz
2019-07-24 5:10 ` [PATCH bpf-next v3 05/11] ixgbe: " Kevin Laatz
2019-07-24 5:10 ` [PATCH bpf-next v3 06/11] mlx5e: " Kevin Laatz
2019-07-25 10:15 ` Maxim Mikityanskiy
2019-07-25 17:00 ` Laatz, Kevin
2019-07-24 5:10 ` [PATCH bpf-next v3 07/11] libbpf: add flags to umem config Kevin Laatz
2019-07-24 5:10 ` [PATCH bpf-next v3 08/11] samples/bpf: add unaligned chunks mode support to xdpsock Kevin Laatz
2019-07-25 9:43 ` Maxim Mikityanskiy
2019-07-25 17:00 ` Laatz, Kevin
2019-07-24 5:10 ` [PATCH bpf-next v3 09/11] samples/bpf: add buffer recycling for unaligned chunks " Kevin Laatz
2019-07-24 5:10 ` [PATCH bpf-next v3 10/11] samples/bpf: use hugepages in xdpsock app Kevin Laatz
2019-07-24 5:10 ` [PATCH bpf-next v3 11/11] doc/af_xdp: include unaligned chunk case Kevin Laatz
2019-07-25 15:39 ` [PATCH bpf-next v3 00/11] XDP unaligned chunk placement support Jonathan Lemon
2019-07-25 15:56 ` Richardson, Bruce
2019-07-25 17:30 ` Jonathan Lemon
2019-07-26 8:41 ` Bruce Richardson
2019-07-30 8:53 ` [PATCH bpf-next v4 " Kevin Laatz
2019-07-30 8:53 ` [PATCH bpf-next v4 01/11] i40e: simplify Rx buffer recycle Kevin Laatz
2019-07-30 8:53 ` [PATCH bpf-next v4 02/11] ixgbe: " Kevin Laatz
2019-07-30 8:53 ` [PATCH bpf-next v4 03/11] libbpf: add flags to umem config Kevin Laatz
2019-07-31 12:45 ` [Intel-wired-lan] " Björn Töpel
2019-07-31 14:25 ` Björn Töpel
2019-08-01 6:59 ` Andrii Nakryiko
2019-08-01 7:34 ` Björn Töpel
2019-08-02 7:19 ` Andrii Nakryiko
2019-08-02 7:26 ` Björn Töpel
2019-07-30 8:53 ` [PATCH bpf-next v4 04/11] xsk: add support to allow unaligned chunk placement Kevin Laatz
2019-07-31 18:11 ` Jonathan Lemon
2019-07-30 8:53 ` [PATCH bpf-next v4 05/11] i40e: modify driver for handling offsets Kevin Laatz
2019-07-30 8:53 ` [PATCH bpf-next v4 06/11] ixgbe: " Kevin Laatz
2019-07-30 8:53 ` [PATCH bpf-next v4 07/11] mlx5e: " Kevin Laatz
2019-07-31 18:10 ` Jonathan Lemon
2019-08-01 10:05 ` Maxim Mikityanskiy
2019-08-19 14:36 ` Maxim Mikityanskiy
2019-08-19 14:47 ` Laatz, Kevin
2019-07-30 8:53 ` [PATCH bpf-next v4 08/11] samples/bpf: add unaligned chunks mode support to xdpsock Kevin Laatz
2019-07-30 8:53 ` [PATCH bpf-next v4 09/11] samples/bpf: add buffer recycling for unaligned chunks " Kevin Laatz
2019-07-31 18:26 ` Jonathan Lemon
2019-07-30 8:53 ` [PATCH bpf-next v4 10/11] samples/bpf: use hugepages in xdpsock app Kevin Laatz
2019-07-30 8:54 ` [PATCH bpf-next v4 11/11] doc/af_xdp: include unaligned chunk case Kevin Laatz
2019-08-22 1:44 ` [PATCH bpf-next v5 00/11] XDP unaligned chunk placement support Kevin Laatz
2019-08-22 1:44 ` [PATCH bpf-next v5 01/11] i40e: simplify Rx buffer recycle Kevin Laatz
2019-08-22 1:44 ` [PATCH bpf-next v5 02/11] ixgbe: " Kevin Laatz
2019-08-22 1:44 ` [PATCH bpf-next v5 03/11] xsk: add support to allow unaligned chunk placement Kevin Laatz
2019-08-22 18:43 ` Jonathan Lemon
2019-08-23 13:35 ` Laatz, Kevin
2019-08-27 7:36 ` Maxim Mikityanskiy
2019-08-22 1:44 ` [PATCH bpf-next v5 04/11] i40e: modify driver for handling offsets Kevin Laatz
2019-08-22 1:44 ` [PATCH bpf-next v5 05/11] ixgbe: " Kevin Laatz
2019-08-22 1:44 ` [PATCH bpf-next v5 06/11] mlx5e: " Kevin Laatz
2019-08-22 1:44 ` [PATCH bpf-next v5 07/11] libbpf: add flags to umem config Kevin Laatz
2019-08-22 1:44 ` [PATCH bpf-next v5 08/11] samples/bpf: add unaligned chunks mode support to xdpsock Kevin Laatz
2019-08-22 1:44 ` [PATCH bpf-next v5 09/11] samples/bpf: add buffer recycling for unaligned chunks " Kevin Laatz
2019-08-22 1:44 ` [PATCH bpf-next v5 10/11] samples/bpf: use hugepages in xdpsock app Kevin Laatz
2019-08-22 1:44 ` [PATCH bpf-next v5 11/11] doc/af_xdp: include unaligned chunk case Kevin Laatz
2019-08-27 2:25 ` [PATCH bpf-next v6 00/12] XDP unaligned chunk placement support Kevin Laatz
2019-08-27 2:25 ` [PATCH bpf-next v6 01/12] i40e: simplify Rx buffer recycle Kevin Laatz
2019-08-30 15:37 ` Jonathan Lemon
2019-08-27 2:25 ` [PATCH bpf-next v6 02/12] ixgbe: " Kevin Laatz
2019-08-30 15:39 ` Jonathan Lemon
2019-08-27 2:25 ` [PATCH bpf-next v6 03/12] xsk: add support to allow unaligned chunk placement Kevin Laatz
2019-08-30 15:41 ` Jonathan Lemon
2019-08-27 2:25 ` [PATCH bpf-next v6 04/12] i40e: modify driver for handling offsets Kevin Laatz
2019-08-30 15:42 ` Jonathan Lemon
2019-08-27 2:25 ` [PATCH bpf-next v6 05/12] ixgbe: " Kevin Laatz
2019-08-30 15:42 ` Jonathan Lemon
2019-08-27 2:25 ` [PATCH bpf-next v6 06/12] mlx5e: " Kevin Laatz
2019-08-30 15:43 ` Jonathan Lemon
2019-08-27 2:25 ` [PATCH bpf-next v6 07/12] net/mlx5e: Allow XSK frames smaller than a page Kevin Laatz
2019-08-30 15:45 ` Jonathan Lemon
2019-08-27 2:25 ` [PATCH bpf-next v6 08/12] libbpf: add flags to umem config Kevin Laatz
2019-08-30 15:46 ` Jonathan Lemon
2019-08-27 2:25 ` [PATCH bpf-next v6 09/12] samples/bpf: add unaligned chunks mode support to xdpsock Kevin Laatz
2019-08-30 15:47 ` Jonathan Lemon
2019-08-27 2:25 ` [PATCH bpf-next v6 10/12] samples/bpf: add buffer recycling for unaligned chunks " Kevin Laatz
2019-08-30 15:49 ` Jonathan Lemon
2019-08-27 2:25 ` [PATCH bpf-next v6 11/12] samples/bpf: use hugepages in xdpsock app Kevin Laatz
2019-08-30 15:51 ` Jonathan Lemon
2019-08-27 2:25 ` [PATCH bpf-next v6 12/12] doc/af_xdp: include unaligned chunk case Kevin Laatz
2019-08-30 15:51 ` Jonathan Lemon
2019-08-30 15:52 ` [PATCH bpf-next v6 00/12] XDP unaligned chunk placement support Jonathan Lemon
2019-08-30 23:29 ` Daniel Borkmann
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190724192253.00ac07bd@cakuba.netronome.com \
--to=jakub.kicinski@netronome.com \
--cc=ast@kernel.org \
--cc=bjorn.topel@intel.com \
--cc=bpf@vger.kernel.org \
--cc=bruce.richardson@intel.com \
--cc=ciara.loftus@intel.com \
--cc=daniel@iogearbox.net \
--cc=intel-wired-lan@lists.osuosl.org \
--cc=jonathan.lemon@gmail.com \
--cc=kevin.laatz@intel.com \
--cc=magnus.karlsson@intel.com \
--cc=maximmi@mellanox.com \
--cc=netdev@vger.kernel.org \
--cc=saeedm@mellanox.com \
--cc=stephen@networkplumber.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).