BPF Archive on lore.kernel.org
 help / color / Atom feed
* [RFC bpf-next 0/2] introduce support for XDP programs in cpumaps
@ 2020-05-22 16:11 Lorenzo Bianconi
  2020-05-22 16:11 ` [RFC bpf-next 1/2] bpf: cpumap: add the possibility to attach a eBPF program to cpumap Lorenzo Bianconi
  2020-05-22 16:11 ` [RFC bpf-next 2/2] samples/bpf: xdp_redirect_cpu: load a eBPF program on cpu_map Lorenzo Bianconi
  0 siblings, 2 replies; 6+ messages in thread
From: Lorenzo Bianconi @ 2020-05-22 16:11 UTC (permalink / raw)
  To: bpf, netdev; +Cc: ast, davem, brouer, daniel, lorenzo.bianconi, dsahern

Similar to what David Ahern proposed here [1] for DEVMAPs, introduce the
capability to attach and run a XDP program to cpumap entries.
The idea behind this feature is to add the possibility to define on which CPU
run the eBPF program if the underlying hw does not support RSS.
Even if the series is functional, at the moment some bit are missing
(e.g XDP_REDIRECT support or defining a new attach type).
The goal of this series is to get feebacks to add missing features.

[1] https://patchwork.ozlabs.org/project/netdev/cover/20200522010526.14649-1-dsahern@kernel.org/

Lorenzo Bianconi (2):
  bpf: cpumap: add the possibility to attach a eBPF program to cpumap
  samples/bpf: xdp_redirect_cpu: load a eBPF program on cpu_map

 kernel/bpf/cpumap.c                 | 111 +++++++++++++++++++++++-----
 samples/bpf/xdp_redirect_cpu_kern.c |  24 +++++-
 samples/bpf/xdp_redirect_cpu_user.c |  83 +++++++++++++++++----
 3 files changed, 185 insertions(+), 33 deletions(-)

-- 
2.26.2


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [RFC bpf-next 1/2] bpf: cpumap: add the possibility to attach a eBPF program to cpumap
  2020-05-22 16:11 [RFC bpf-next 0/2] introduce support for XDP programs in cpumaps Lorenzo Bianconi
@ 2020-05-22 16:11 ` Lorenzo Bianconi
  2020-05-22 17:44   ` David Ahern
  2020-05-24 17:22   ` David Ahern
  2020-05-22 16:11 ` [RFC bpf-next 2/2] samples/bpf: xdp_redirect_cpu: load a eBPF program on cpu_map Lorenzo Bianconi
  1 sibling, 2 replies; 6+ messages in thread
From: Lorenzo Bianconi @ 2020-05-22 16:11 UTC (permalink / raw)
  To: bpf, netdev; +Cc: ast, davem, brouer, daniel, lorenzo.bianconi, dsahern

Introduce the capability to attach a eBPF program to cpumap entries.
The idea behind this feature is to add the possibility to define on
which CPU run the eBPF program if the underlying hw does not support
RSS. Current supported verdicts are XDP_DROP and XDP_PASS

Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
---
 kernel/bpf/cpumap.c | 111 ++++++++++++++++++++++++++++++++++++--------
 1 file changed, 92 insertions(+), 19 deletions(-)

diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index 8b85bfddfac7..38f738220b36 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -48,17 +48,25 @@ struct xdp_bulk_queue {
 	unsigned int count;
 };
 
+struct bpf_cpu_map_entry_value {
+	u32 prog_id;
+	u32 qsize;
+};
+
 /* Struct for every remote "destination" CPU in map */
 struct bpf_cpu_map_entry {
 	u32 cpu;    /* kthread CPU and map index */
 	int map_id; /* Back reference to map */
-	u32 qsize;  /* Queue size placeholder for map lookup */
+
+	struct bpf_cpu_map_entry_value value;
 
 	/* XDP can run multiple RX-ring queues, need __percpu enqueue store */
 	struct xdp_bulk_queue __percpu *bulkq;
 
 	struct bpf_cpu_map *cmap;
 
+	struct bpf_prog *prog;
+
 	/* Queue with potential multi-producers, and single-consumer kthread */
 	struct ptr_ring *queue;
 	struct task_struct *kthread;
@@ -90,7 +98,8 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
 
 	/* check sanity of attributes */
 	if (attr->max_entries == 0 || attr->key_size != 4 ||
-	    attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE)
+	    (attr->value_size != sizeof(struct bpf_cpu_map_entry_value) &&
+	     attr->value_size != 4) || attr->map_flags & ~BPF_F_NUMA_NODE)
 		return ERR_PTR(-EINVAL);
 
 	cmap = kzalloc(sizeof(*cmap), GFP_USER);
@@ -234,11 +243,13 @@ static int cpu_map_kthread_run(void *data)
 	 * kthread_stop signal until queue is empty.
 	 */
 	while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) {
+		gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
 		unsigned int drops = 0, sched = 0;
+		void *xdp_frames[CPUMAP_BATCH];
 		void *frames[CPUMAP_BATCH];
 		void *skbs[CPUMAP_BATCH];
-		gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
-		int i, n, m;
+		int i, n, m, nframes = 0;
+		struct bpf_prog *prog;
 
 		/* Release CPU reschedule checks */
 		if (__ptr_ring_empty(rcpu->queue)) {
@@ -259,28 +270,64 @@ static int cpu_map_kthread_run(void *data)
 		 * kthread CPU pinned. Lockless access to ptr_ring
 		 * consume side valid as no-resize allowed of queue.
 		 */
-		n = ptr_ring_consume_batched(rcpu->queue, frames, CPUMAP_BATCH);
+		n = ptr_ring_consume_batched(rcpu->queue, xdp_frames,
+					     CPUMAP_BATCH);
 
+		rcu_read_lock();
+
+		prog = READ_ONCE(rcpu->prog);
 		for (i = 0; i < n; i++) {
-			void *f = frames[i];
+			void *f = xdp_frames[i];
 			struct page *page = virt_to_page(f);
+			struct xdp_frame *xdpf;
+			struct xdp_buff xdp;
+			u32 act;
 
 			/* Bring struct page memory area to curr CPU. Read by
 			 * build_skb_around via page_is_pfmemalloc(), and when
 			 * freed written by page_frag_free call.
 			 */
 			prefetchw(page);
+			if (!prog) {
+				frames[nframes++] = xdp_frames[i];
+				continue;
+			}
+
+			xdpf = f;
+			xdp.data_hard_start = xdpf->data - xdpf->headroom;
+			xdp.data = xdpf->data;
+			xdp.data_end = xdpf->data + xdpf->len;
+			xdp.data_meta = xdpf->data - xdpf->metasize;
+			xdp.frame_sz = xdpf->frame_sz;
+			/* TODO: rxq */
+
+			act = bpf_prog_run_xdp(prog, &xdp);
+			switch (act) {
+			case XDP_PASS:
+				frames[nframes++] = xdp_frames[i];
+				break;
+			default:
+				bpf_warn_invalid_xdp_action(act);
+				/* fallthrough */
+			case XDP_DROP:
+				xdp_return_frame(xdpf);
+				drops++;
+				break;
+			}
 		}
 
-		m = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, n, skbs);
+		rcu_read_unlock();
+
+		m = kmem_cache_alloc_bulk(skbuff_head_cache, gfp,
+					  nframes, skbs);
 		if (unlikely(m == 0)) {
-			for (i = 0; i < n; i++)
+			for (i = 0; i < nframes; i++)
 				skbs[i] = NULL; /* effect: xdp_return_frame */
-			drops = n;
+			drops = nframes;
 		}
 
 		local_bh_disable();
-		for (i = 0; i < n; i++) {
+		for (i = 0; i < nframes; i++) {
 			struct xdp_frame *xdpf = frames[i];
 			struct sk_buff *skb = skbs[i];
 			int ret;
@@ -307,8 +354,23 @@ static int cpu_map_kthread_run(void *data)
 	return 0;
 }
 
-static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu,
-						       int map_id)
+static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu,
+				      u32 prog_id)
+{
+	struct bpf_prog *prog;
+
+	/* TODO attach type */
+	prog = bpf_prog_by_id(prog_id);
+	if (IS_ERR(prog) || prog->type != BPF_PROG_TYPE_XDP)
+		return -EINVAL;
+
+	rcpu->prog = prog;
+
+	return 0;
+}
+
+static struct bpf_cpu_map_entry *
+__cpu_map_entry_alloc(u32 qsize, u32 cpu, int map_id, u32 prog_id)
 {
 	gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
 	struct bpf_cpu_map_entry *rcpu;
@@ -344,7 +406,7 @@ static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu,
 
 	rcpu->cpu    = cpu;
 	rcpu->map_id = map_id;
-	rcpu->qsize  = qsize;
+	rcpu->value.qsize  = qsize;
 
 	/* Setup kthread */
 	rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa,
@@ -355,6 +417,9 @@ static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu,
 	get_cpu_map_entry(rcpu); /* 1-refcnt for being in cmap->cpu_map[] */
 	get_cpu_map_entry(rcpu); /* 1-refcnt for kthread */
 
+	if (prog_id && __cpu_map_load_bpf_program(rcpu, prog_id))
+		goto free_ptr_ring;
+
 	/* Make sure kthread runs on a single CPU */
 	kthread_bind(rcpu->kthread, cpu);
 	wake_up_process(rcpu->kthread);
@@ -414,6 +479,8 @@ static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap,
 
 	old_rcpu = xchg(&cmap->cpu_map[key_cpu], rcpu);
 	if (old_rcpu) {
+		if (old_rcpu->prog)
+			bpf_prog_put(old_rcpu->prog);
 		call_rcu(&old_rcpu->rcu, __cpu_map_entry_free);
 		INIT_WORK(&old_rcpu->kthread_stop_wq, cpu_map_kthread_stop);
 		schedule_work(&old_rcpu->kthread_stop_wq);
@@ -437,12 +504,18 @@ static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
 			       u64 map_flags)
 {
 	struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
+	struct bpf_cpu_map_entry_value *elem;
 	struct bpf_cpu_map_entry *rcpu;
-
 	/* Array index key correspond to CPU number */
-	u32 key_cpu = *(u32 *)key;
-	/* Value is the queue size */
-	u32 qsize = *(u32 *)value;
+	u32 qsize, key_cpu = *(u32 *)key, prog_id = 0;
+
+	if (map->value_size == sizeof(*elem)) {
+		elem = (struct bpf_cpu_map_entry_value *)value;
+		qsize = elem->qsize;
+		prog_id = elem->prog_id;
+	} else {
+		qsize = *(u32 *)value;
+	}
 
 	if (unlikely(map_flags > BPF_EXIST))
 		return -EINVAL;
@@ -461,7 +534,7 @@ static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
 		rcpu = NULL; /* Same as deleting */
 	} else {
 		/* Updating qsize cause re-allocation of bpf_cpu_map_entry */
-		rcpu = __cpu_map_entry_alloc(qsize, key_cpu, map->id);
+		rcpu = __cpu_map_entry_alloc(qsize, key_cpu, map->id, prog_id);
 		if (!rcpu)
 			return -ENOMEM;
 		rcpu->cmap = cmap;
@@ -523,7 +596,7 @@ static void *cpu_map_lookup_elem(struct bpf_map *map, void *key)
 	struct bpf_cpu_map_entry *rcpu =
 		__cpu_map_lookup_elem(map, *(u32 *)key);
 
-	return rcpu ? &rcpu->qsize : NULL;
+	return rcpu ? &rcpu->value.qsize : NULL;
 }
 
 static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
-- 
2.26.2


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [RFC bpf-next 2/2] samples/bpf: xdp_redirect_cpu: load a eBPF program on cpu_map
  2020-05-22 16:11 [RFC bpf-next 0/2] introduce support for XDP programs in cpumaps Lorenzo Bianconi
  2020-05-22 16:11 ` [RFC bpf-next 1/2] bpf: cpumap: add the possibility to attach a eBPF program to cpumap Lorenzo Bianconi
@ 2020-05-22 16:11 ` Lorenzo Bianconi
  1 sibling, 0 replies; 6+ messages in thread
From: Lorenzo Bianconi @ 2020-05-22 16:11 UTC (permalink / raw)
  To: bpf, netdev; +Cc: ast, davem, brouer, daniel, lorenzo.bianconi, dsahern

Extend xdp_redirect_cpu_{usr,kern}.c adding the possibility to load a
simple XDP program on each cpu_map entry that just returns XDP_PASS

Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
---
 samples/bpf/xdp_redirect_cpu_kern.c | 24 ++++++++-
 samples/bpf/xdp_redirect_cpu_user.c | 83 ++++++++++++++++++++++++-----
 2 files changed, 93 insertions(+), 14 deletions(-)

diff --git a/samples/bpf/xdp_redirect_cpu_kern.c b/samples/bpf/xdp_redirect_cpu_kern.c
index 2baf8db1f7e7..72d322ae295a 100644
--- a/samples/bpf/xdp_redirect_cpu_kern.c
+++ b/samples/bpf/xdp_redirect_cpu_kern.c
@@ -17,11 +17,17 @@
 
 #define MAX_CPUS NR_CPUS
 
+/* Special map type that can XDP_REDIRECT frames to another CPU */
+struct cpu_map_entry {
+	__u32 prog_id;
+	__u32 qsize;
+};
+
 /* Special map type that can XDP_REDIRECT frames to another CPU */
 struct {
 	__uint(type, BPF_MAP_TYPE_CPUMAP);
 	__uint(key_size, sizeof(u32));
-	__uint(value_size, sizeof(u32));
+	__uint(value_size, sizeof(struct cpu_map_entry));
 	__uint(max_entries, MAX_CPUS);
 } cpu_map SEC(".maps");
 
@@ -30,6 +36,9 @@ struct datarec {
 	__u64 processed;
 	__u64 dropped;
 	__u64 issue;
+	__u64 xdp_redirect;
+	__u64 xdp_pass;
+	__u64 xdp_drop;
 };
 
 /* Count RX packets, as XDP bpf_prog doesn't get direct TX-success
@@ -227,6 +236,19 @@ int  xdp_prognum0_no_touch(struct xdp_md *ctx)
 	return bpf_redirect_map(&cpu_map, cpu_dest, 0);
 }
 
+SEC("xdp_cpu_prog_pass")
+int xdp_cpumap_prog_pass(struct xdp_md *ctx)
+{
+	struct datarec *rec;
+	u32 key = 0;
+
+	rec = bpf_map_lookup_elem(&rx_cnt, &key);
+	if (rec)
+		rec->xdp_pass++;
+
+	return XDP_PASS;
+}
+
 SEC("xdp_cpu_map1_touch_data")
 int  xdp_prognum1_touch_data(struct xdp_md *ctx)
 {
diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c
index f3468168982e..f60875f32cd9 100644
--- a/samples/bpf/xdp_redirect_cpu_user.c
+++ b/samples/bpf/xdp_redirect_cpu_user.c
@@ -30,6 +30,11 @@ static const char *__doc__ =
 
 #include "bpf_util.h"
 
+struct cpu_map_entry {
+	__u32 prog_id;
+	__u32 qsize;
+};
+
 static int ifindex = -1;
 static char ifname_buf[IF_NAMESIZE];
 static char *ifname;
@@ -156,6 +161,9 @@ struct datarec {
 	__u64 processed;
 	__u64 dropped;
 	__u64 issue;
+	__u64 xdp_redirect;
+	__u64 xdp_pass;
+	__u64 xdp_drop;
 };
 struct record {
 	__u64 timestamp;
@@ -175,6 +183,9 @@ static bool map_collect_percpu(int fd, __u32 key, struct record *rec)
 	/* For percpu maps, userspace gets a value per possible CPU */
 	unsigned int nr_cpus = bpf_num_possible_cpus();
 	struct datarec values[nr_cpus];
+	__u64 sum_xdp_redirect = 0;
+	__u64 sum_xdp_pass = 0;
+	__u64 sum_xdp_drop = 0;
 	__u64 sum_processed = 0;
 	__u64 sum_dropped = 0;
 	__u64 sum_issue = 0;
@@ -196,10 +207,19 @@ static bool map_collect_percpu(int fd, __u32 key, struct record *rec)
 		sum_dropped        += values[i].dropped;
 		rec->cpu[i].issue = values[i].issue;
 		sum_issue        += values[i].issue;
+		rec->cpu[i].xdp_pass = values[i].xdp_pass;
+		sum_xdp_pass += values[i].xdp_pass;
+		rec->cpu[i].xdp_drop = values[i].xdp_drop;
+		sum_xdp_drop += values[i].xdp_drop;
+		rec->cpu[i].xdp_redirect = values[i].xdp_redirect;
+		sum_xdp_redirect += values[i].xdp_redirect;
 	}
 	rec->total.processed = sum_processed;
 	rec->total.dropped   = sum_dropped;
 	rec->total.issue     = sum_issue;
+	rec->total.xdp_redirect  = sum_xdp_redirect;
+	rec->total.xdp_pass  = sum_xdp_pass;
+	rec->total.xdp_drop  = sum_xdp_drop;
 	return true;
 }
 
@@ -340,11 +360,20 @@ static void stats_print(struct stats_record *stats_rec,
 			if (pps > 0)
 				printf(fmt_rx, "XDP-RX",
 					i, pps, drop, err, errstr);
+			printf("cpu%d: xdp_pass %llu "
+			       "xdp_drop %llu xdp_redirect %llu\n",
+			       i, r->xdp_pass - p->xdp_pass,
+			       r->xdp_drop - p->xdp_drop,
+			       r->xdp_redirect - p->xdp_redirect);
 		}
 		pps  = calc_pps(&rec->total, &prev->total, t);
 		drop = calc_drop_pps(&rec->total, &prev->total, t);
 		err  = calc_errs_pps(&rec->total, &prev->total, t);
 		printf(fm2_rx, "XDP-RX", "total", pps, drop);
+		printf("xdp_pass %llu xdp_drop %llu xdp_redirect %llu\n",
+		       rec->total.xdp_pass - prev->total.xdp_pass,
+		       rec->total.xdp_drop - prev->total.xdp_drop,
+		       rec->total.xdp_redirect - prev->total.xdp_redirect);
 	}
 
 	/* cpumap enqueue stats */
@@ -495,8 +524,13 @@ static inline void swap(struct stats_record **a, struct stats_record **b)
 }
 
 static int create_cpu_entry(__u32 cpu, __u32 queue_size,
-			    __u32 avail_idx, bool new)
+			    __u32 avail_idx, int prog_id,
+			    bool new)
 {
+	struct cpu_map_entry prog_map_entry = {
+		.qsize = queue_size,
+		.prog_id = prog_id,
+	};
 	__u32 curr_cpus_count = 0;
 	__u32 key = 0;
 	int ret;
@@ -504,7 +538,7 @@ static int create_cpu_entry(__u32 cpu, __u32 queue_size,
 	/* Add a CPU entry to cpumap, as this allocate a cpu entry in
 	 * the kernel for the cpu.
 	 */
-	ret = bpf_map_update_elem(cpu_map_fd, &cpu, &queue_size, 0);
+	ret = bpf_map_update_elem(cpu_map_fd, &cpu, &prog_map_entry, 0);
 	if (ret) {
 		fprintf(stderr, "Create CPU entry failed (err:%d)\n", ret);
 		exit(EXIT_FAIL_BPF);
@@ -561,19 +595,19 @@ static void mark_cpus_unavailable(void)
 }
 
 /* Stress cpumap management code by concurrently changing underlying cpumap */
-static void stress_cpumap(void)
+static void stress_cpumap(__u32 prog_id)
 {
 	/* Changing qsize will cause kernel to free and alloc a new
 	 * bpf_cpu_map_entry, with an associated/complicated tear-down
 	 * procedure.
 	 */
-	create_cpu_entry(1,  1024, 0, false);
-	create_cpu_entry(1,     8, 0, false);
-	create_cpu_entry(1, 16000, 0, false);
+	create_cpu_entry(1,  1024, 0, prog_id, false);
+	create_cpu_entry(1,     8, 0, prog_id, false);
+	create_cpu_entry(1, 16000, 0, prog_id, false);
 }
 
 static void stats_poll(int interval, bool use_separators, char *prog_name,
-		       bool stress_mode)
+		       bool stress_mode, __u32 prog_id)
 {
 	struct stats_record *record, *prev;
 
@@ -591,7 +625,7 @@ static void stats_poll(int interval, bool use_separators, char *prog_name,
 		stats_print(record, prev, prog_name);
 		sleep(interval);
 		if (stress_mode)
-			stress_cpumap();
+			stress_cpumap(prog_id);
 	}
 
 	free_stats_record(record);
@@ -666,16 +700,17 @@ static int init_map_fds(struct bpf_object *obj)
 
 int main(int argc, char **argv)
 {
+	__u32 info_len = sizeof(struct bpf_prog_info), cpu_map_prog_id;
 	struct rlimit r = {10 * 1024 * 1024, RLIM_INFINITY};
 	char *prog_name = "xdp_cpu_map5_lb_hash_ip_pairs";
 	struct bpf_prog_load_attr prog_load_attr = {
 		.prog_type	= BPF_PROG_TYPE_UNSPEC,
 	};
+	struct bpf_program *prog, *cpu_map_prog;
 	struct bpf_prog_info info = {};
-	__u32 info_len = sizeof(info);
+	int prog_fd, cpu_map_prog_fd;
 	bool use_separators = true;
 	bool stress_mode = false;
-	struct bpf_program *prog;
 	struct bpf_object *obj;
 	char filename[256];
 	int added_cpus = 0;
@@ -683,7 +718,6 @@ int main(int argc, char **argv)
 	int interval = 2;
 	int add_cpu = -1;
 	int opt, err;
-	int prog_fd;
 	__u32 qsize;
 
 	n_cpus = get_nprocs_conf();
@@ -719,6 +753,24 @@ int main(int argc, char **argv)
 	}
 	mark_cpus_unavailable();
 
+	cpu_map_prog = bpf_object__find_program_by_title(obj,
+							 "xdp_cpu_prog_pass");
+	if (!cpu_map_prog) {
+		fprintf(stderr, "bpf_object__find_program_by_title failed\n");
+		return EXIT_FAIL;
+	}
+	cpu_map_prog_fd = bpf_program__fd(cpu_map_prog);
+	if (cpu_map_prog_fd < 0) {
+		fprintf(stderr, "bpf_program__fd failed\n");
+		return EXIT_FAIL;
+	}
+	err = bpf_obj_get_info_by_fd(cpu_map_prog_fd, &info, &info_len);
+	if (err) {
+		printf("can't get prog info - %s\n", strerror(errno));
+		return err;
+	}
+	cpu_map_prog_id = info.id;
+
 	/* Parse commands line args */
 	while ((opt = getopt_long(argc, argv, "hSd:s:p:q:c:xzF",
 				  long_options, &longindex)) != -1) {
@@ -763,7 +815,8 @@ int main(int argc, char **argv)
 					errno, strerror(errno));
 				goto error;
 			}
-			create_cpu_entry(add_cpu, qsize, added_cpus, true);
+			create_cpu_entry(add_cpu, qsize, added_cpus,
+					 cpu_map_prog_id, true);
 			added_cpus++;
 			break;
 		case 'q':
@@ -818,6 +871,9 @@ int main(int argc, char **argv)
 		return EXIT_FAIL_XDP;
 	}
 
+	memset(&info, 0, sizeof(info));
+	info_len = sizeof(struct bpf_prog_info);
+
 	err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
 	if (err) {
 		printf("can't get prog info - %s\n", strerror(errno));
@@ -825,6 +881,7 @@ int main(int argc, char **argv)
 	}
 	prog_id = info.id;
 
-	stats_poll(interval, use_separators, prog_name, stress_mode);
+	stats_poll(interval, use_separators, prog_name, stress_mode,
+		   cpu_map_prog_id);
 	return EXIT_OK;
 }
-- 
2.26.2


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [RFC bpf-next 1/2] bpf: cpumap: add the possibility to attach a eBPF program to cpumap
  2020-05-22 16:11 ` [RFC bpf-next 1/2] bpf: cpumap: add the possibility to attach a eBPF program to cpumap Lorenzo Bianconi
@ 2020-05-22 17:44   ` David Ahern
  2020-05-22 17:54     ` Jesper Dangaard Brouer
  2020-05-24 17:22   ` David Ahern
  1 sibling, 1 reply; 6+ messages in thread
From: David Ahern @ 2020-05-22 17:44 UTC (permalink / raw)
  To: Lorenzo Bianconi, bpf, netdev
  Cc: ast, davem, brouer, daniel, lorenzo.bianconi, dsahern

On 5/22/20 10:11 AM, Lorenzo Bianconi wrote:
> @@ -259,28 +270,64 @@ static int cpu_map_kthread_run(void *data)
>  		 * kthread CPU pinned. Lockless access to ptr_ring
>  		 * consume side valid as no-resize allowed of queue.
>  		 */
> -		n = ptr_ring_consume_batched(rcpu->queue, frames, CPUMAP_BATCH);
> +		n = ptr_ring_consume_batched(rcpu->queue, xdp_frames,
> +					     CPUMAP_BATCH);
>  
> +		rcu_read_lock();
> +
> +		prog = READ_ONCE(rcpu->prog);
>  		for (i = 0; i < n; i++) {
> -			void *f = frames[i];
> +			void *f = xdp_frames[i];
>  			struct page *page = virt_to_page(f);
> +			struct xdp_frame *xdpf;
> +			struct xdp_buff xdp;
> +			u32 act;
>  
>  			/* Bring struct page memory area to curr CPU. Read by
>  			 * build_skb_around via page_is_pfmemalloc(), and when
>  			 * freed written by page_frag_free call.
>  			 */
>  			prefetchw(page);
> +			if (!prog) {
> +				frames[nframes++] = xdp_frames[i];
> +				continue;
> +			}
> +
> +			xdpf = f;
> +			xdp.data_hard_start = xdpf->data - xdpf->headroom;
> +			xdp.data = xdpf->data;
> +			xdp.data_end = xdpf->data + xdpf->len;
> +			xdp.data_meta = xdpf->data - xdpf->metasize;
> +			xdp.frame_sz = xdpf->frame_sz;
> +			/* TODO: rxq */
> +
> +			act = bpf_prog_run_xdp(prog, &xdp);

Why not run the program in cpu_map_enqueue before converting from
xdp_buff to xdp_frame?



^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [RFC bpf-next 1/2] bpf: cpumap: add the possibility to attach a eBPF program to cpumap
  2020-05-22 17:44   ` David Ahern
@ 2020-05-22 17:54     ` Jesper Dangaard Brouer
  0 siblings, 0 replies; 6+ messages in thread
From: Jesper Dangaard Brouer @ 2020-05-22 17:54 UTC (permalink / raw)
  To: David Ahern
  Cc: Lorenzo Bianconi, bpf, netdev, ast, davem, daniel,
	lorenzo.bianconi, dsahern, brouer

On Fri, 22 May 2020 11:44:04 -0600
David Ahern <dsahern@gmail.com> wrote:

> On 5/22/20 10:11 AM, Lorenzo Bianconi wrote:
> > @@ -259,28 +270,64 @@ static int cpu_map_kthread_run(void *data)
> >  		 * kthread CPU pinned. Lockless access to ptr_ring
> >  		 * consume side valid as no-resize allowed of queue.
> >  		 */
> > -		n = ptr_ring_consume_batched(rcpu->queue, frames, CPUMAP_BATCH);
> > +		n = ptr_ring_consume_batched(rcpu->queue, xdp_frames,
> > +					     CPUMAP_BATCH);
> >  
> > +		rcu_read_lock();
> > +
> > +		prog = READ_ONCE(rcpu->prog);
> >  		for (i = 0; i < n; i++) {
> > -			void *f = frames[i];
> > +			void *f = xdp_frames[i];
> >  			struct page *page = virt_to_page(f);
> > +			struct xdp_frame *xdpf;
> > +			struct xdp_buff xdp;
> > +			u32 act;
> >  
> >  			/* Bring struct page memory area to curr CPU. Read by
> >  			 * build_skb_around via page_is_pfmemalloc(), and when
> >  			 * freed written by page_frag_free call.
> >  			 */
> >  			prefetchw(page);
> > +			if (!prog) {
> > +				frames[nframes++] = xdp_frames[i];
> > +				continue;
> > +			}
> > +
> > +			xdpf = f;
> > +			xdp.data_hard_start = xdpf->data - xdpf->headroom;
> > +			xdp.data = xdpf->data;
> > +			xdp.data_end = xdpf->data + xdpf->len;
> > +			xdp.data_meta = xdpf->data - xdpf->metasize;
> > +			xdp.frame_sz = xdpf->frame_sz;
> > +			/* TODO: rxq */
> > +
> > +			act = bpf_prog_run_xdp(prog, &xdp);  
> 
> Why not run the program in cpu_map_enqueue before converting from
> xdp_buff to xdp_frame?

Because we want to run the XDP-prog on the remote-CPU.

-- 
Best regards,
  Jesper Dangaard Brouer
  MSc.CS, Principal Kernel Engineer at Red Hat
  LinkedIn: http://www.linkedin.com/in/brouer


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [RFC bpf-next 1/2] bpf: cpumap: add the possibility to attach a eBPF program to cpumap
  2020-05-22 16:11 ` [RFC bpf-next 1/2] bpf: cpumap: add the possibility to attach a eBPF program to cpumap Lorenzo Bianconi
  2020-05-22 17:44   ` David Ahern
@ 2020-05-24 17:22   ` David Ahern
  1 sibling, 0 replies; 6+ messages in thread
From: David Ahern @ 2020-05-24 17:22 UTC (permalink / raw)
  To: Lorenzo Bianconi, bpf, netdev
  Cc: ast, davem, brouer, daniel, lorenzo.bianconi, dsahern

On 5/22/20 10:11 AM, Lorenzo Bianconi wrote:
> @@ -307,8 +354,23 @@ static int cpu_map_kthread_run(void *data)
>  	return 0;
>  }
>  
> -static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu,
> -						       int map_id)
> +static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu,
> +				      u32 prog_id)
> +{
> +	struct bpf_prog *prog;
> +
> +	/* TODO attach type */
> +	prog = bpf_prog_by_id(prog_id);
> +	if (IS_ERR(prog) || prog->type != BPF_PROG_TYPE_XDP)
> +		return -EINVAL;

Add check that expected_attach_type is NOT set since it uses existing
xdp programs which should not have it set.


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, back to index

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-05-22 16:11 [RFC bpf-next 0/2] introduce support for XDP programs in cpumaps Lorenzo Bianconi
2020-05-22 16:11 ` [RFC bpf-next 1/2] bpf: cpumap: add the possibility to attach a eBPF program to cpumap Lorenzo Bianconi
2020-05-22 17:44   ` David Ahern
2020-05-22 17:54     ` Jesper Dangaard Brouer
2020-05-24 17:22   ` David Ahern
2020-05-22 16:11 ` [RFC bpf-next 2/2] samples/bpf: xdp_redirect_cpu: load a eBPF program on cpu_map Lorenzo Bianconi

BPF Archive on lore.kernel.org

Archives are clonable:
	git clone --mirror https://lore.kernel.org/bpf/0 bpf/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 bpf bpf/ https://lore.kernel.org/bpf \
		bpf@vger.kernel.org
	public-inbox-index bpf

Example config snippet for mirrors

Newsgroup available over NNTP:
	nntp://nntp.lore.kernel.org/org.kernel.vger.bpf


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git