bpf.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH bpf-next 0/2] xsk: XSKMAP lookup improvements
@ 2019-10-24  7:19 Maciej Fijalkowski
  2019-10-24  7:19 ` [PATCH bpf-next 1/2] xsk: store struct xdp_sock as a flexible array member of the XSKMAP Maciej Fijalkowski
  2019-10-24  7:19 ` [PATCH bpf-next 2/2] xsk: implement map_gen_lookup() callback for XSKMAP Maciej Fijalkowski
  0 siblings, 2 replies; 4+ messages in thread
From: Maciej Fijalkowski @ 2019-10-24  7:19 UTC (permalink / raw)
  To: bpf, netdev, ast, daniel; +Cc: magnus.karlsson, bjorn.topel, Maciej Fijalkowski

Hi,

This small set consists of two patches from Bjorn and myself which are
optimizing the XSKMAP lookups.  In first patch, Bjorn converts the array
of pointers to struct xdp_sock to the flexible array member of XSKMAP
itself. Second patch, based on Bjorn's work, implements the
map_gen_lookup() for XSKMAP.

Based on the XDP program from tools/lib/bpf/xsk.c where
bpf_map_lookup_elem() is explicitly called, this work yields the 5%
improvement for xdpsock's rxdrop scenario.

Thanks!


Björn Töpel (1):
  xsk: store struct xdp_sock as a flexible array member of the XSKMAP

Maciej Fijalkowski (1):
  xsk: implement map_gen_lookup() callback for XSKMAP

 kernel/bpf/xskmap.c | 74 +++++++++++++++++++++++++--------------------
 1 file changed, 42 insertions(+), 32 deletions(-)

-- 
2.20.1


^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH bpf-next 1/2] xsk: store struct xdp_sock as a flexible array member of the XSKMAP
  2019-10-24  7:19 [PATCH bpf-next 0/2] xsk: XSKMAP lookup improvements Maciej Fijalkowski
@ 2019-10-24  7:19 ` Maciej Fijalkowski
  2019-10-24 20:12   ` Jakub Kicinski
  2019-10-24  7:19 ` [PATCH bpf-next 2/2] xsk: implement map_gen_lookup() callback for XSKMAP Maciej Fijalkowski
  1 sibling, 1 reply; 4+ messages in thread
From: Maciej Fijalkowski @ 2019-10-24  7:19 UTC (permalink / raw)
  To: bpf, netdev, ast, daniel; +Cc: magnus.karlsson, bjorn.topel

From: Björn Töpel <bjorn.topel@intel.com>

Prior this commit, the array storing XDP socket instances were stored
in a separate allocated array of the XSKMAP. Now, we store the sockets
as a flexible array member in a similar fashion as the arraymap. Doing
so, we do less pointer chasing in the lookup.

Signed-off-by: Björn Töpel <bjorn.topel@intel.com>
---
 kernel/bpf/xskmap.c | 57 ++++++++++++++++++++-------------------------
 1 file changed, 25 insertions(+), 32 deletions(-)

diff --git a/kernel/bpf/xskmap.c b/kernel/bpf/xskmap.c
index 82a1ffe15dfa..86e3027d4605 100644
--- a/kernel/bpf/xskmap.c
+++ b/kernel/bpf/xskmap.c
@@ -11,9 +11,9 @@
 
 struct xsk_map {
 	struct bpf_map map;
-	struct xdp_sock **xsk_map;
 	struct list_head __percpu *flush_list;
 	spinlock_t lock; /* Synchronize map updates */
+	struct xdp_sock *xsk_map[];
 };
 
 int xsk_map_inc(struct xsk_map *map)
@@ -80,9 +80,11 @@ static void xsk_map_sock_delete(struct xdp_sock *xs,
 
 static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
 {
+	int cpu, err, numa_node = bpf_map_attr_numa_node(attr);
+	struct bpf_map_memory mem;
 	struct xsk_map *m;
-	int cpu, err;
-	u64 cost;
+	u32 max_entries;
+	u64 cost, size;
 
 	if (!capable(CAP_NET_ADMIN))
 		return ERR_PTR(-EPERM);
@@ -92,44 +94,36 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
 	    attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY))
 		return ERR_PTR(-EINVAL);
 
-	m = kzalloc(sizeof(*m), GFP_USER);
-	if (!m)
-		return ERR_PTR(-ENOMEM);
+	max_entries = attr->max_entries;
 
-	bpf_map_init_from_attr(&m->map, attr);
-	spin_lock_init(&m->lock);
+	size = sizeof(*m) + max_entries * sizeof(m->xsk_map[0]);
+	cost = size + sizeof(struct list_head) * num_possible_cpus();
 
-	cost = (u64)m->map.max_entries * sizeof(struct xdp_sock *);
-	cost += sizeof(struct list_head) * num_possible_cpus();
+	err = bpf_map_charge_init(&mem, cost);
+	if (err < 0)
+		return ERR_PTR(err);
 
-	/* Notice returns -EPERM on if map size is larger than memlock limit */
-	err = bpf_map_charge_init(&m->map.memory, cost);
-	if (err)
-		goto free_m;
+	m = bpf_map_area_alloc(size, numa_node);
+	if (!m) {
+		bpf_map_charge_finish(&mem);
+		return ERR_PTR(-ENOMEM);
+	}
 
-	err = -ENOMEM;
+	bpf_map_init_from_attr(&m->map, attr);
+	bpf_map_charge_move(&m->map.memory, &mem);
+	spin_lock_init(&m->lock);
 
 	m->flush_list = alloc_percpu(struct list_head);
-	if (!m->flush_list)
-		goto free_charge;
+	if (!m->flush_list) {
+		bpf_map_charge_finish(&m->map.memory);
+		bpf_map_area_free(m);
+		return ERR_PTR(-ENOMEM);
+	}
 
 	for_each_possible_cpu(cpu)
 		INIT_LIST_HEAD(per_cpu_ptr(m->flush_list, cpu));
 
-	m->xsk_map = bpf_map_area_alloc(m->map.max_entries *
-					sizeof(struct xdp_sock *),
-					m->map.numa_node);
-	if (!m->xsk_map)
-		goto free_percpu;
 	return &m->map;
-
-free_percpu:
-	free_percpu(m->flush_list);
-free_charge:
-	bpf_map_charge_finish(&m->map.memory);
-free_m:
-	kfree(m);
-	return ERR_PTR(err);
 }
 
 static void xsk_map_free(struct bpf_map *map)
@@ -139,8 +133,7 @@ static void xsk_map_free(struct bpf_map *map)
 	bpf_clear_redirect_map(map);
 	synchronize_net();
 	free_percpu(m->flush_list);
-	bpf_map_area_free(m->xsk_map);
-	kfree(m);
+	bpf_map_area_free(m);
 }
 
 static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
-- 
2.20.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH bpf-next 2/2] xsk: implement map_gen_lookup() callback for XSKMAP
  2019-10-24  7:19 [PATCH bpf-next 0/2] xsk: XSKMAP lookup improvements Maciej Fijalkowski
  2019-10-24  7:19 ` [PATCH bpf-next 1/2] xsk: store struct xdp_sock as a flexible array member of the XSKMAP Maciej Fijalkowski
@ 2019-10-24  7:19 ` Maciej Fijalkowski
  1 sibling, 0 replies; 4+ messages in thread
From: Maciej Fijalkowski @ 2019-10-24  7:19 UTC (permalink / raw)
  To: bpf, netdev, ast, daniel; +Cc: magnus.karlsson, bjorn.topel, Maciej Fijalkowski

Inline the xsk_map_lookup_elem() via implementing the map_gen_lookup()
callback. This results in emitting the bpf instructions in place of
bpf_map_lookup_elem() helper call and better performance of bpf
programs.

Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
---
 kernel/bpf/xskmap.c | 17 +++++++++++++++++
 1 file changed, 17 insertions(+)

diff --git a/kernel/bpf/xskmap.c b/kernel/bpf/xskmap.c
index 86e3027d4605..0103405f639a 100644
--- a/kernel/bpf/xskmap.c
+++ b/kernel/bpf/xskmap.c
@@ -165,6 +165,22 @@ struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, u32 key)
 	return xs;
 }
 
+static u32 xsk_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
+{
+	const int ret = BPF_REG_0, mp = BPF_REG_1, index = BPF_REG_2;
+	struct bpf_insn *insn = insn_buf;
+
+	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
+	*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
+	*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(sizeof(struct xsk_sock *)));
+	*insn++ = BPF_ALU64_IMM(BPF_ADD, mp, offsetof(struct xsk_map, xsk_map));
+	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, mp);
+	*insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
+	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
+	*insn++ = BPF_MOV64_IMM(ret, 0);
+	return insn - insn_buf;
+}
+
 int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
 		       struct xdp_sock *xs)
 {
@@ -305,6 +321,7 @@ const struct bpf_map_ops xsk_map_ops = {
 	.map_free = xsk_map_free,
 	.map_get_next_key = xsk_map_get_next_key,
 	.map_lookup_elem = xsk_map_lookup_elem,
+	.map_gen_lookup = xsk_map_gen_lookup,
 	.map_lookup_elem_sys_only = xsk_map_lookup_elem_sys_only,
 	.map_update_elem = xsk_map_update_elem,
 	.map_delete_elem = xsk_map_delete_elem,
-- 
2.20.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH bpf-next 1/2] xsk: store struct xdp_sock as a flexible array member of the XSKMAP
  2019-10-24  7:19 ` [PATCH bpf-next 1/2] xsk: store struct xdp_sock as a flexible array member of the XSKMAP Maciej Fijalkowski
@ 2019-10-24 20:12   ` Jakub Kicinski
  0 siblings, 0 replies; 4+ messages in thread
From: Jakub Kicinski @ 2019-10-24 20:12 UTC (permalink / raw)
  To: Maciej Fijalkowski, Toke Høiland-Jørgensen
  Cc: bpf, netdev, ast, daniel, magnus.karlsson, bjorn.topel

On Thu, 24 Oct 2019 09:19:30 +0200, Maciej Fijalkowski wrote:
> @@ -92,44 +94,36 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
>  	    attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY))
>  		return ERR_PTR(-EINVAL);
>  
> -	m = kzalloc(sizeof(*m), GFP_USER);
> -	if (!m)
> -		return ERR_PTR(-ENOMEM);
> +	max_entries = attr->max_entries;
>  
> -	bpf_map_init_from_attr(&m->map, attr);
> -	spin_lock_init(&m->lock);
> +	size = sizeof(*m) + max_entries * sizeof(m->xsk_map[0]);

Maybe the array_size() I suggested to Toke was disputable, but this is
such an struct_size()...

Otherwise you probably need an explicit (u64) cast?

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2019-10-24 20:12 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-10-24  7:19 [PATCH bpf-next 0/2] xsk: XSKMAP lookup improvements Maciej Fijalkowski
2019-10-24  7:19 ` [PATCH bpf-next 1/2] xsk: store struct xdp_sock as a flexible array member of the XSKMAP Maciej Fijalkowski
2019-10-24 20:12   ` Jakub Kicinski
2019-10-24  7:19 ` [PATCH bpf-next 2/2] xsk: implement map_gen_lookup() callback for XSKMAP Maciej Fijalkowski

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).