All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH bpf-next 0/3] support flow dissector in BPF_PROG_TEST_RUN
@ 2018-12-03 18:59 Stanislav Fomichev
  2018-12-03 18:59 ` [PATCH bpf-next 1/3] net/flow_dissector: move bpf case into __skb_flow_bpf_dissect Stanislav Fomichev
                   ` (2 more replies)
  0 siblings, 3 replies; 10+ messages in thread
From: Stanislav Fomichev @ 2018-12-03 18:59 UTC (permalink / raw)
  To: netdev; +Cc: davem, ast, daniel, simon.horman, Stanislav Fomichev

This patch series adds support for testing flow dissector BPF programs by
extending already existing BPF_PROG_TEST_RUN. The goal is to have a
packet as an input and `struct bpf_flow_key' as an output. That way
we can easily test flow dissector programs' behavior.
I've also modified existing test_progs.c test to do a simple flow
dissector run as well.

* first patch introduces new __skb_flow_bpf_dissect to simplify
  sharing between __skb_flow_bpf_dissect and BPF_PROG_TEST_RUN
* second patch adds actual BPF_PROG_TEST_RUN support
* third patch adds example usage to the selftests

Stanislav Fomichev (3):
  net/flow_dissector: move bpf case into __skb_flow_bpf_dissect
  bpf: add BPF_PROG_TEST_RUN support for flow dissector
  selftests/bpf: add simple BPF_PROG_TEST_RUN examples for flow
    dissector

 include/linux/bpf.h                           |  3 +
 include/linux/skbuff.h                        |  5 ++
 net/bpf/test_run.c                            | 76 +++++++++++++++--
 net/core/filter.c                             |  1 +
 net/core/flow_dissector.c                     | 83 +++++++++++--------
 tools/testing/selftests/bpf/Makefile          |  3 +
 .../selftests/bpf/flow_dissector_load.c       | 43 ++--------
 .../selftests/bpf/flow_dissector_load.h       | 55 ++++++++++++
 tools/testing/selftests/bpf/test_progs.c      | 76 ++++++++++++++++-
 9 files changed, 265 insertions(+), 80 deletions(-)
 create mode 100644 tools/testing/selftests/bpf/flow_dissector_load.h

-- 
2.20.0.rc1.387.gf8505762e3-goog

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH bpf-next 1/3] net/flow_dissector: move bpf case into __skb_flow_bpf_dissect
  2018-12-03 18:59 [PATCH bpf-next 0/3] support flow dissector in BPF_PROG_TEST_RUN Stanislav Fomichev
@ 2018-12-03 18:59 ` Stanislav Fomichev
  2018-12-03 18:59 ` [PATCH bpf-next 2/3] bpf: add BPF_PROG_TEST_RUN support for flow dissector Stanislav Fomichev
  2018-12-03 18:59 ` [PATCH bpf-next 3/3] selftests/bpf: add simple BPF_PROG_TEST_RUN examples " Stanislav Fomichev
  2 siblings, 0 replies; 10+ messages in thread
From: Stanislav Fomichev @ 2018-12-03 18:59 UTC (permalink / raw)
  To: netdev; +Cc: davem, ast, daniel, simon.horman, Stanislav Fomichev

This way, we can reuse it for flow dissector in BPF_PROG_TEST_RUN.

No functional changes.

Signed-off-by: Stanislav Fomichev <sdf@google.com>
---
 include/linux/skbuff.h    |  5 +++
 net/core/flow_dissector.c | 83 +++++++++++++++++++++++----------------
 2 files changed, 54 insertions(+), 34 deletions(-)

diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 73902acf2b71..c10b27bb0cab 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1215,6 +1215,11 @@ static inline int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
 }
 #endif
 
+struct bpf_flow_keys;
+bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
+			    const struct sk_buff *skb,
+			    struct flow_dissector *flow_dissector,
+			    struct bpf_flow_keys *flow_keys);
 bool __skb_flow_dissect(const struct sk_buff *skb,
 			struct flow_dissector *flow_dissector,
 			void *target_container,
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 2e8d91e54179..b465f894ec20 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -683,6 +683,39 @@ static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys,
 	}
 }
 
+bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
+			    const struct sk_buff *skb,
+			    struct flow_dissector *flow_dissector,
+			    struct bpf_flow_keys *flow_keys)
+{
+	struct bpf_skb_data_end cb_saved;
+	struct bpf_skb_data_end *cb;
+	u32 result;
+
+	/* Note that even though the const qualifier is discarded
+	 * throughout the execution of the BPF program, all changes(the
+	 * control block) are reverted after the BPF program returns.
+	 * Therefore, __skb_flow_dissect does not alter the skb.
+	 */
+
+	cb = (struct bpf_skb_data_end *)skb->cb;
+
+	/* Save Control Block */
+	memcpy(&cb_saved, cb, sizeof(cb_saved));
+	memset(cb, 0, sizeof(cb_saved));
+
+	/* Pass parameters to the BPF program */
+	cb->qdisc_cb.flow_keys = flow_keys;
+
+	bpf_compute_data_pointers((struct sk_buff *)skb);
+	result = BPF_PROG_RUN(prog, skb);
+
+	/* Restore state */
+	memcpy(cb, &cb_saved, sizeof(cb_saved));
+
+	return result == BPF_OK;
+}
+
 /**
  * __skb_flow_dissect - extract the flow_keys struct and return it
  * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
@@ -714,7 +747,6 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
 	struct flow_dissector_key_vlan *key_vlan;
 	enum flow_dissect_ret fdret;
 	enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX;
-	struct bpf_prog *attached = NULL;
 	int num_hdrs = 0;
 	u8 ip_proto = 0;
 	bool ret;
@@ -754,49 +786,32 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
 					      FLOW_DISSECTOR_KEY_BASIC,
 					      target_container);
 
-	rcu_read_lock();
 	if (skb) {
+		struct bpf_flow_keys flow_keys = {};
+		struct bpf_prog *attached = NULL;
+
+		rcu_read_lock();
+
 		if (skb->dev)
 			attached = rcu_dereference(dev_net(skb->dev)->flow_dissector_prog);
 		else if (skb->sk)
 			attached = rcu_dereference(sock_net(skb->sk)->flow_dissector_prog);
 		else
 			WARN_ON_ONCE(1);
-	}
-	if (attached) {
-		/* Note that even though the const qualifier is discarded
-		 * throughout the execution of the BPF program, all changes(the
-		 * control block) are reverted after the BPF program returns.
-		 * Therefore, __skb_flow_dissect does not alter the skb.
-		 */
-		struct bpf_flow_keys flow_keys = {};
-		struct bpf_skb_data_end cb_saved;
-		struct bpf_skb_data_end *cb;
-		u32 result;
-
-		cb = (struct bpf_skb_data_end *)skb->cb;
 
-		/* Save Control Block */
-		memcpy(&cb_saved, cb, sizeof(cb_saved));
-		memset(cb, 0, sizeof(cb_saved));
-
-		/* Pass parameters to the BPF program */
-		cb->qdisc_cb.flow_keys = &flow_keys;
-		flow_keys.nhoff = nhoff;
-
-		bpf_compute_data_pointers((struct sk_buff *)skb);
-		result = BPF_PROG_RUN(attached, skb);
-
-		/* Restore state */
-		memcpy(cb, &cb_saved, sizeof(cb_saved));
-
-		__skb_flow_bpf_to_target(&flow_keys, flow_dissector,
-					 target_container);
-		key_control->thoff = min_t(u16, key_control->thoff, skb->len);
+		if (attached) {
+			ret = __skb_flow_bpf_dissect(attached, skb,
+						     flow_dissector,
+						     &flow_keys);
+			__skb_flow_bpf_to_target(&flow_keys, flow_dissector,
+						 target_container);
+			key_control->thoff = min_t(u16, key_control->thoff,
+						   skb->len);
+			rcu_read_unlock();
+			return ret;
+		}
 		rcu_read_unlock();
-		return result == BPF_OK;
 	}
-	rcu_read_unlock();
 
 	if (dissector_uses_key(flow_dissector,
 			       FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
-- 
2.20.0.rc1.387.gf8505762e3-goog

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH bpf-next 2/3] bpf: add BPF_PROG_TEST_RUN support for flow dissector
  2018-12-03 18:59 [PATCH bpf-next 0/3] support flow dissector in BPF_PROG_TEST_RUN Stanislav Fomichev
  2018-12-03 18:59 ` [PATCH bpf-next 1/3] net/flow_dissector: move bpf case into __skb_flow_bpf_dissect Stanislav Fomichev
@ 2018-12-03 18:59 ` Stanislav Fomichev
  2018-12-03 22:28   ` Song Liu
  2018-12-03 18:59 ` [PATCH bpf-next 3/3] selftests/bpf: add simple BPF_PROG_TEST_RUN examples " Stanislav Fomichev
  2 siblings, 1 reply; 10+ messages in thread
From: Stanislav Fomichev @ 2018-12-03 18:59 UTC (permalink / raw)
  To: netdev; +Cc: davem, ast, daniel, simon.horman, Stanislav Fomichev

The input is packet data, the output is struct bpf_flow_key. This should
make it easy to test flow dissector programs without elaborate
setup.

Signed-off-by: Stanislav Fomichev <sdf@google.com>
---
 include/linux/bpf.h |  3 ++
 net/bpf/test_run.c  | 76 +++++++++++++++++++++++++++++++++++++++++----
 net/core/filter.c   |  1 +
 3 files changed, 74 insertions(+), 6 deletions(-)

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index e82b7039fc66..7a572d15d5dd 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -373,6 +373,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
 			  union bpf_attr __user *uattr);
 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
 			  union bpf_attr __user *uattr);
+int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
+				     const union bpf_attr *kattr,
+				     union bpf_attr __user *uattr);
 
 /* an array of programs to be executed under rcu_lock.
  *
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index c89c22c49015..bfa05d31c6e3 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -14,21 +14,33 @@
 #include <net/tcp.h>
 
 static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
-		struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
+		struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE],
+		struct bpf_flow_keys *flow_keys)
 {
 	u32 ret;
 
 	preempt_disable();
 	rcu_read_lock();
 	bpf_cgroup_storage_set(storage);
-	ret = BPF_PROG_RUN(prog, ctx);
+
+	switch (prog->type) {
+	case BPF_PROG_TYPE_FLOW_DISSECTOR:
+		ret = __skb_flow_bpf_dissect(prog, ctx, &flow_keys_dissector,
+					     flow_keys);
+		break;
+	default:
+		ret = BPF_PROG_RUN(prog, ctx);
+		break;
+	}
+
 	rcu_read_unlock();
 	preempt_enable();
 
 	return ret;
 }
 
-static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
+static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time,
+			struct bpf_flow_keys *flow_keys)
 {
 	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
 	enum bpf_cgroup_storage_type stype;
@@ -49,7 +61,7 @@ static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
 		repeat = 1;
 	time_start = ktime_get_ns();
 	for (i = 0; i < repeat; i++) {
-		ret = bpf_test_run_one(prog, ctx, storage);
+		ret = bpf_test_run_one(prog, ctx, storage, flow_keys);
 		if (need_resched()) {
 			if (signal_pending(current))
 				break;
@@ -165,7 +177,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
 		__skb_push(skb, hh_len);
 	if (is_direct_pkt_access)
 		bpf_compute_data_pointers(skb);
-	retval = bpf_test_run(prog, skb, repeat, &duration);
+	retval = bpf_test_run(prog, skb, repeat, &duration, NULL);
 	if (!is_l2) {
 		if (skb_headroom(skb) < hh_len) {
 			int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
@@ -212,7 +224,7 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
 	rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
 	xdp.rxq = &rxqueue->xdp_rxq;
 
-	retval = bpf_test_run(prog, &xdp, repeat, &duration);
+	retval = bpf_test_run(prog, &xdp, repeat, &duration, NULL);
 	if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
 	    xdp.data_end != xdp.data + size)
 		size = xdp.data_end - xdp.data;
@@ -220,3 +232,55 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
 	kfree(data);
 	return ret;
 }
+
+int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
+				     const union bpf_attr *kattr,
+				     union bpf_attr __user *uattr)
+{
+	struct bpf_flow_keys flow_keys = {};
+	u32 size = kattr->test.data_size_in;
+	u32 repeat = kattr->test.repeat;
+	u32 retval, duration;
+	struct sk_buff *skb;
+	struct sock *sk;
+	void *data;
+	int ret;
+
+	if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
+		return -EINVAL;
+
+	data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
+			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+	if (IS_ERR(data))
+		return PTR_ERR(data);
+
+	sk = kzalloc(sizeof(*sk), GFP_USER);
+	if (!sk) {
+		kfree(data);
+		return -ENOMEM;
+	}
+	sock_net_set(sk, current->nsproxy->net_ns);
+	sock_init_data(NULL, sk);
+
+	skb = build_skb(data, 0);
+	if (!skb) {
+		kfree(data);
+		kfree(sk);
+		return -ENOMEM;
+	}
+	skb->sk = sk;
+
+	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+	__skb_put(skb, size);
+	skb->protocol = eth_type_trans(skb,
+				       current->nsproxy->net_ns->loopback_dev);
+	skb_reset_network_header(skb);
+
+	retval = bpf_test_run(prog, skb, repeat, &duration, &flow_keys);
+
+	ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
+			      retval, duration);
+	kfree_skb(skb);
+	kfree(sk);
+	return ret;
+}
diff --git a/net/core/filter.c b/net/core/filter.c
index bd0df75dc7b6..4eae6102399d 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -7657,6 +7657,7 @@ const struct bpf_verifier_ops flow_dissector_verifier_ops = {
 };
 
 const struct bpf_prog_ops flow_dissector_prog_ops = {
+	.test_run		= bpf_prog_test_run_flow_dissector,
 };
 
 int sk_detach_filter(struct sock *sk)
-- 
2.20.0.rc1.387.gf8505762e3-goog

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH bpf-next 3/3] selftests/bpf: add simple BPF_PROG_TEST_RUN examples for flow dissector
  2018-12-03 18:59 [PATCH bpf-next 0/3] support flow dissector in BPF_PROG_TEST_RUN Stanislav Fomichev
  2018-12-03 18:59 ` [PATCH bpf-next 1/3] net/flow_dissector: move bpf case into __skb_flow_bpf_dissect Stanislav Fomichev
  2018-12-03 18:59 ` [PATCH bpf-next 2/3] bpf: add BPF_PROG_TEST_RUN support for flow dissector Stanislav Fomichev
@ 2018-12-03 18:59 ` Stanislav Fomichev
  2 siblings, 0 replies; 10+ messages in thread
From: Stanislav Fomichev @ 2018-12-03 18:59 UTC (permalink / raw)
  To: netdev; +Cc: davem, ast, daniel, simon.horman, Stanislav Fomichev

Use existing pkt_v4 and pkt_v6 to make sure flow_keys are what we want.

Also, add new bpf_flow_load routine (and flow_dissector_load.h header)
that loads bpf_flow.o program and does all required setup.

Signed-off-by: Stanislav Fomichev <sdf@google.com>
---
 tools/testing/selftests/bpf/Makefile          |  3 +
 .../selftests/bpf/flow_dissector_load.c       | 43 ++---------
 .../selftests/bpf/flow_dissector_load.h       | 55 ++++++++++++++
 tools/testing/selftests/bpf/test_progs.c      | 76 ++++++++++++++++++-
 4 files changed, 137 insertions(+), 40 deletions(-)
 create mode 100644 tools/testing/selftests/bpf/flow_dissector_load.h

diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 73aa6d8f4a2f..387763e6d137 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -19,6 +19,9 @@ all: $(TEST_CUSTOM_PROGS)
 $(TEST_CUSTOM_PROGS): $(OUTPUT)/%: %.c
 	$(CC) -o $(TEST_CUSTOM_PROGS) -static $< -Wl,--build-id
 
+flow_dissector_load.c: flow_dissector_load.h
+test_run.c: flow_dissector_load.h
+
 # Order correspond to 'make run_tests' order
 TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
 	test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \
diff --git a/tools/testing/selftests/bpf/flow_dissector_load.c b/tools/testing/selftests/bpf/flow_dissector_load.c
index ae8180b11d5f..77cafa66d048 100644
--- a/tools/testing/selftests/bpf/flow_dissector_load.c
+++ b/tools/testing/selftests/bpf/flow_dissector_load.c
@@ -12,6 +12,7 @@
 #include <bpf/libbpf.h>
 
 #include "bpf_rlimit.h"
+#include "flow_dissector_load.h"
 
 const char *cfg_pin_path = "/sys/fs/bpf/flow_dissector";
 const char *cfg_map_name = "jmp_table";
@@ -21,46 +22,13 @@ char *cfg_path_name;
 
 static void load_and_attach_program(void)
 {
-	struct bpf_program *prog, *main_prog;
-	struct bpf_map *prog_array;
-	int i, fd, prog_fd, ret;
+	int prog_fd, ret;
 	struct bpf_object *obj;
-	int prog_array_fd;
 
-	ret = bpf_prog_load(cfg_path_name, BPF_PROG_TYPE_FLOW_DISSECTOR, &obj,
-			    &prog_fd);
+	ret = bpf_flow_load(&obj, cfg_path_name, cfg_section_name,
+			    cfg_map_name, &prog_fd);
 	if (ret)
-		error(1, 0, "bpf_prog_load %s", cfg_path_name);
-
-	main_prog = bpf_object__find_program_by_title(obj, cfg_section_name);
-	if (!main_prog)
-		error(1, 0, "bpf_object__find_program_by_title %s",
-		      cfg_section_name);
-
-	prog_fd = bpf_program__fd(main_prog);
-	if (prog_fd < 0)
-		error(1, 0, "bpf_program__fd");
-
-	prog_array = bpf_object__find_map_by_name(obj, cfg_map_name);
-	if (!prog_array)
-		error(1, 0, "bpf_object__find_map_by_name %s", cfg_map_name);
-
-	prog_array_fd = bpf_map__fd(prog_array);
-	if (prog_array_fd < 0)
-		error(1, 0, "bpf_map__fd %s", cfg_map_name);
-
-	i = 0;
-	bpf_object__for_each_program(prog, obj) {
-		fd = bpf_program__fd(prog);
-		if (fd < 0)
-			error(1, 0, "bpf_program__fd");
-
-		if (fd != prog_fd) {
-			printf("%d: %s\n", i, bpf_program__title(prog, false));
-			bpf_map_update_elem(prog_array_fd, &i, &fd, BPF_ANY);
-			++i;
-		}
-	}
+		error(1, 0, "bpf_flow_load %s", cfg_path_name);
 
 	ret = bpf_prog_attach(prog_fd, 0 /* Ignore */, BPF_FLOW_DISSECTOR, 0);
 	if (ret)
@@ -69,7 +37,6 @@ static void load_and_attach_program(void)
 	ret = bpf_object__pin(obj, cfg_pin_path);
 	if (ret)
 		error(1, 0, "bpf_object__pin %s", cfg_pin_path);
-
 }
 
 static void detach_program(void)
diff --git a/tools/testing/selftests/bpf/flow_dissector_load.h b/tools/testing/selftests/bpf/flow_dissector_load.h
new file mode 100644
index 000000000000..9f2fd38e30b0
--- /dev/null
+++ b/tools/testing/selftests/bpf/flow_dissector_load.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef FLOW_DISSECTOR_LOAD
+#define FLOW_DISSECTOR_LOAD
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+static inline int bpf_flow_load(struct bpf_object **obj,
+				const char *path,
+				const char *section_name,
+				const char *map_name,
+				int *prog_fd)
+{
+	struct bpf_program *prog, *main_prog;
+	struct bpf_map *prog_array;
+	int prog_array_fd;
+	int ret, fd, i;
+
+	ret = bpf_prog_load(path, BPF_PROG_TYPE_FLOW_DISSECTOR, obj,
+			    prog_fd);
+	if (ret)
+		return ret;
+
+	main_prog = bpf_object__find_program_by_title(*obj, section_name);
+	if (!main_prog)
+		return ret;
+
+	*prog_fd = bpf_program__fd(main_prog);
+	if (*prog_fd < 0)
+		return ret;
+
+	prog_array = bpf_object__find_map_by_name(*obj, map_name);
+	if (!prog_array)
+		return ret;
+
+	prog_array_fd = bpf_map__fd(prog_array);
+	if (prog_array_fd < 0)
+		return ret;
+
+	i = 0;
+	bpf_object__for_each_program(prog, *obj) {
+		fd = bpf_program__fd(prog);
+		if (fd < 0)
+			return fd;
+
+		if (fd != *prog_fd) {
+			bpf_map_update_elem(prog_array_fd, &i, &fd, BPF_ANY);
+			++i;
+		}
+	}
+
+	return 0;
+}
+
+#endif /* FLOW_DISSECTOR_LOAD */
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 1c57abbe0945..fa8a307bba9c 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -39,6 +39,7 @@ typedef __u16 __sum16;
 #include "bpf_endian.h"
 #include "bpf_rlimit.h"
 #include "trace_helpers.h"
+#include "flow_dissector_load.h"
 
 static int error_cnt, pass_cnt;
 static bool jit_enabled;
@@ -53,7 +54,7 @@ static struct {
 } __packed pkt_v4 = {
 	.eth.h_proto = bpf_htons(ETH_P_IP),
 	.iph.ihl = 5,
-	.iph.protocol = 6,
+	.iph.protocol = IPPROTO_TCP,
 	.iph.tot_len = bpf_htons(MAGIC_BYTES),
 	.tcp.urg_ptr = 123,
 };
@@ -65,7 +66,7 @@ static struct {
 	struct tcphdr tcp;
 } __packed pkt_v6 = {
 	.eth.h_proto = bpf_htons(ETH_P_IPV6),
-	.iph.nexthdr = 6,
+	.iph.nexthdr = IPPROTO_TCP,
 	.iph.payload_len = bpf_htons(MAGIC_BYTES),
 	.tcp.urg_ptr = 123,
 };
@@ -1830,6 +1831,76 @@ static void test_queue_stack_map(int type)
 	bpf_object__close(obj);
 }
 
+#define CHECK_FLOW_KEYS(desc, got, expected)				\
+	CHECK(memcmp(&got, &expected, sizeof(got)) != 0,		\
+	      desc,							\
+	      "nhoff=%u/%u "						\
+	      "thoff=%u/%u "						\
+	      "addr_proto=0x%x/0x%x "					\
+	      "is_frag=%u/%u "						\
+	      "is_first_frag=%u/%u "					\
+	      "is_encap=%u/%u "						\
+	      "n_proto=0x%x/0x%x "					\
+	      "sport=%u/%u "						\
+	      "dport=%u/%u\n",						\
+	      got.nhoff, expected.nhoff,				\
+	      got.thoff, expected.thoff,				\
+	      got.addr_proto, expected.addr_proto,			\
+	      got.is_frag, expected.is_frag,				\
+	      got.is_first_frag, expected.is_first_frag,		\
+	      got.is_encap, expected.is_encap,				\
+	      got.n_proto, expected.n_proto,				\
+	      got.sport, expected.sport,				\
+	      got.dport, expected.dport)
+
+static struct bpf_flow_keys pkt_v4_flow_keys = {
+	.nhoff = sizeof(struct iphdr),
+	.thoff = 0,
+	.addr_proto = ETH_P_IP,
+	.ip_proto = IPPROTO_TCP,
+	.n_proto = bpf_htons(ETH_P_IP),
+};
+
+static struct bpf_flow_keys pkt_v6_flow_keys = {
+	.nhoff = sizeof(struct ipv6hdr),
+	.thoff = 0,
+	.addr_proto = ETH_P_IPV6,
+	.ip_proto = IPPROTO_TCP,
+	.n_proto = bpf_htons(ETH_P_IPV6),
+};
+
+static void test_flow_dissector(void)
+{
+	struct bpf_flow_keys flow_keys;
+	struct bpf_object *obj;
+	__u32 duration, retval;
+	int err, prog_fd;
+	__u32 size;
+
+	err = bpf_flow_load(&obj, "./bpf_flow.o", "flow_dissector",
+			    "jmp_table", &prog_fd);
+	if (err) {
+		error_cnt++;
+		return;
+	}
+
+	err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
+				&flow_keys, &size, &retval, &duration);
+	CHECK(size != sizeof(flow_keys) || err || retval, "ipv4",
+	      "err %d errno %d retval %d duration %d size %u/%lu\n",
+	      err, errno, retval, duration, size, sizeof(flow_keys));
+	CHECK_FLOW_KEYS("ipv4_flow_keys", flow_keys, pkt_v4_flow_keys);
+
+	err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
+				&flow_keys, &size, &retval, &duration);
+	CHECK(size != sizeof(flow_keys) || err || retval, "ipv6",
+	      "err %d errno %d retval %d duration %d size %u/%lu\n",
+	      err, errno, retval, duration, size, sizeof(flow_keys));
+	CHECK_FLOW_KEYS("ipv6_flow_keys", flow_keys, pkt_v6_flow_keys);
+
+	bpf_object__close(obj);
+}
+
 int main(void)
 {
 	srand(time(NULL));
@@ -1856,6 +1927,7 @@ int main(void)
 	test_reference_tracking();
 	test_queue_stack_map(QUEUE);
 	test_queue_stack_map(STACK);
+	test_flow_dissector();
 
 	printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
 	return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
-- 
2.20.0.rc1.387.gf8505762e3-goog

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [PATCH bpf-next 2/3] bpf: add BPF_PROG_TEST_RUN support for flow dissector
  2018-12-03 18:59 ` [PATCH bpf-next 2/3] bpf: add BPF_PROG_TEST_RUN support for flow dissector Stanislav Fomichev
@ 2018-12-03 22:28   ` Song Liu
  2018-12-03 23:08     ` Stanislav Fomichev
  0 siblings, 1 reply; 10+ messages in thread
From: Song Liu @ 2018-12-03 22:28 UTC (permalink / raw)
  To: sdf
  Cc: Networking, David S . Miller, Alexei Starovoitov,
	Daniel Borkmann, simon.horman

On Mon, Dec 3, 2018 at 11:00 AM Stanislav Fomichev <sdf@google.com> wrote:
>
> The input is packet data, the output is struct bpf_flow_key. This should
> make it easy to test flow dissector programs without elaborate
> setup.
>
> Signed-off-by: Stanislav Fomichev <sdf@google.com>
> ---
>  include/linux/bpf.h |  3 ++
>  net/bpf/test_run.c  | 76 +++++++++++++++++++++++++++++++++++++++++----
>  net/core/filter.c   |  1 +
>  3 files changed, 74 insertions(+), 6 deletions(-)
>
> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> index e82b7039fc66..7a572d15d5dd 100644
> --- a/include/linux/bpf.h
> +++ b/include/linux/bpf.h
> @@ -373,6 +373,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
>                           union bpf_attr __user *uattr);
>  int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
>                           union bpf_attr __user *uattr);
> +int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
> +                                    const union bpf_attr *kattr,
> +                                    union bpf_attr __user *uattr);
>
>  /* an array of programs to be executed under rcu_lock.
>   *
> diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
> index c89c22c49015..bfa05d31c6e3 100644
> --- a/net/bpf/test_run.c
> +++ b/net/bpf/test_run.c
> @@ -14,21 +14,33 @@
>  #include <net/tcp.h>
>
>  static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
> -               struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
> +               struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE],
> +               struct bpf_flow_keys *flow_keys)
>  {
>         u32 ret;
>
>         preempt_disable();
>         rcu_read_lock();
>         bpf_cgroup_storage_set(storage);
> -       ret = BPF_PROG_RUN(prog, ctx);
> +
> +       switch (prog->type) {
> +       case BPF_PROG_TYPE_FLOW_DISSECTOR:
> +               ret = __skb_flow_bpf_dissect(prog, ctx, &flow_keys_dissector,
> +                                            flow_keys);
> +               break;
> +       default:
> +               ret = BPF_PROG_RUN(prog, ctx);
> +               break;
> +       }
> +

Is it possible to fold the logic above into bpf_prog_test_run_flow_dissector()?
In that way, the logic flow is similar to other bpf_prog_test_run_XXX()
functions.

Thanks,
Song


>         rcu_read_unlock();
>         preempt_enable();
>
>         return ret;
>  }
>
> -static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
> +static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time,
> +                       struct bpf_flow_keys *flow_keys)
>  {
>         struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
>         enum bpf_cgroup_storage_type stype;
> @@ -49,7 +61,7 @@ static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
>                 repeat = 1;
>         time_start = ktime_get_ns();
>         for (i = 0; i < repeat; i++) {
> -               ret = bpf_test_run_one(prog, ctx, storage);
> +               ret = bpf_test_run_one(prog, ctx, storage, flow_keys);
>                 if (need_resched()) {
>                         if (signal_pending(current))
>                                 break;
> @@ -165,7 +177,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
>                 __skb_push(skb, hh_len);
>         if (is_direct_pkt_access)
>                 bpf_compute_data_pointers(skb);
> -       retval = bpf_test_run(prog, skb, repeat, &duration);
> +       retval = bpf_test_run(prog, skb, repeat, &duration, NULL);
>         if (!is_l2) {
>                 if (skb_headroom(skb) < hh_len) {
>                         int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
> @@ -212,7 +224,7 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
>         rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
>         xdp.rxq = &rxqueue->xdp_rxq;
>
> -       retval = bpf_test_run(prog, &xdp, repeat, &duration);
> +       retval = bpf_test_run(prog, &xdp, repeat, &duration, NULL);
>         if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
>             xdp.data_end != xdp.data + size)
>                 size = xdp.data_end - xdp.data;
> @@ -220,3 +232,55 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
>         kfree(data);
>         return ret;
>  }
> +
> +int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
> +                                    const union bpf_attr *kattr,
> +                                    union bpf_attr __user *uattr)
> +{
> +       struct bpf_flow_keys flow_keys = {};
> +       u32 size = kattr->test.data_size_in;
> +       u32 repeat = kattr->test.repeat;
> +       u32 retval, duration;
> +       struct sk_buff *skb;
> +       struct sock *sk;
> +       void *data;
> +       int ret;
> +
> +       if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
> +               return -EINVAL;
> +
> +       data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
> +                            SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
> +       if (IS_ERR(data))
> +               return PTR_ERR(data);
> +
> +       sk = kzalloc(sizeof(*sk), GFP_USER);
> +       if (!sk) {
> +               kfree(data);
> +               return -ENOMEM;
> +       }
> +       sock_net_set(sk, current->nsproxy->net_ns);
> +       sock_init_data(NULL, sk);
> +
> +       skb = build_skb(data, 0);
> +       if (!skb) {
> +               kfree(data);
> +               kfree(sk);
> +               return -ENOMEM;
> +       }
> +       skb->sk = sk;
> +
> +       skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
> +       __skb_put(skb, size);
> +       skb->protocol = eth_type_trans(skb,
> +                                      current->nsproxy->net_ns->loopback_dev);
> +       skb_reset_network_header(skb);
> +
> +       retval = bpf_test_run(prog, skb, repeat, &duration, &flow_keys);
> +
> +       ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
> +                             retval, duration);
> +       kfree_skb(skb);
> +       kfree(sk);
> +       return ret;
> +}
> diff --git a/net/core/filter.c b/net/core/filter.c
> index bd0df75dc7b6..4eae6102399d 100644
> --- a/net/core/filter.c
> +++ b/net/core/filter.c
> @@ -7657,6 +7657,7 @@ const struct bpf_verifier_ops flow_dissector_verifier_ops = {
>  };
>
>  const struct bpf_prog_ops flow_dissector_prog_ops = {
> +       .test_run               = bpf_prog_test_run_flow_dissector,
>  };
>
>  int sk_detach_filter(struct sock *sk)
> --
> 2.20.0.rc1.387.gf8505762e3-goog
>

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH bpf-next 2/3] bpf: add BPF_PROG_TEST_RUN support for flow dissector
  2018-12-03 22:28   ` Song Liu
@ 2018-12-03 23:08     ` Stanislav Fomichev
  2018-12-04  3:54       ` Stanislav Fomichev
  2018-12-04 23:25       ` Song Liu
  0 siblings, 2 replies; 10+ messages in thread
From: Stanislav Fomichev @ 2018-12-03 23:08 UTC (permalink / raw)
  To: Song Liu
  Cc: sdf, Networking, David S . Miller, Alexei Starovoitov,
	Daniel Borkmann, simon.horman

On 12/03, Song Liu wrote:
> On Mon, Dec 3, 2018 at 11:00 AM Stanislav Fomichev <sdf@google.com> wrote:
> >
> > The input is packet data, the output is struct bpf_flow_key. This should
> > make it easy to test flow dissector programs without elaborate
> > setup.
> >
> > Signed-off-by: Stanislav Fomichev <sdf@google.com>
> > ---
> >  include/linux/bpf.h |  3 ++
> >  net/bpf/test_run.c  | 76 +++++++++++++++++++++++++++++++++++++++++----
> >  net/core/filter.c   |  1 +
> >  3 files changed, 74 insertions(+), 6 deletions(-)
> >
> > diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> > index e82b7039fc66..7a572d15d5dd 100644
> > --- a/include/linux/bpf.h
> > +++ b/include/linux/bpf.h
> > @@ -373,6 +373,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
> >                           union bpf_attr __user *uattr);
> >  int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
> >                           union bpf_attr __user *uattr);
> > +int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
> > +                                    const union bpf_attr *kattr,
> > +                                    union bpf_attr __user *uattr);
> >
> >  /* an array of programs to be executed under rcu_lock.
> >   *
> > diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
> > index c89c22c49015..bfa05d31c6e3 100644
> > --- a/net/bpf/test_run.c
> > +++ b/net/bpf/test_run.c
> > @@ -14,21 +14,33 @@
> >  #include <net/tcp.h>
> >
> >  static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
> > -               struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
> > +               struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE],
> > +               struct bpf_flow_keys *flow_keys)
> >  {
> >         u32 ret;
> >
> >         preempt_disable();
> >         rcu_read_lock();
> >         bpf_cgroup_storage_set(storage);
> > -       ret = BPF_PROG_RUN(prog, ctx);
> > +
> > +       switch (prog->type) {
> > +       case BPF_PROG_TYPE_FLOW_DISSECTOR:
> > +               ret = __skb_flow_bpf_dissect(prog, ctx, &flow_keys_dissector,
> > +                                            flow_keys);
> > +               break;
> > +       default:
> > +               ret = BPF_PROG_RUN(prog, ctx);
> > +               break;
> > +       }
> > +
> 
> Is it possible to fold the logic above into bpf_prog_test_run_flow_dissector()?
> In that way, the logic flow is similar to other bpf_prog_test_run_XXX()
> functions.
I can probably do everything that __skb_flow_bpf_dissect does inside of
bpf_prog_test_run_flow_dissector (pass flow_keys in cb); that would remove
this ugly program type check down the stack. But my additional goal here was
to test __skb_flow_bpf_dissect itself as well.

Attached some possible prototype below. The only issue I see with that
approach is that we need to clear flow_keys on each test iteration.
I think in my current patch I'm actually missing a memset(&flow_keys, 0, ...)
for each iteration of __skb_flow_bpf_dissect;
I haven't tested multiple runs :-/

So I'd prefer to continue doing this prog type check (plus, add a missing
memset for flow_keys on each iteration in v2). WDYT?

--- 

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index e82b7039fc66..7a572d15d5dd 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -373,6 +373,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
 			  union bpf_attr __user *uattr);
 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
 			  union bpf_attr __user *uattr);
+int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
+				     const union bpf_attr *kattr,
+				     union bpf_attr __user *uattr);
 
 /* an array of programs to be executed under rcu_lock.
  *
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index c89c22c49015..04387ef1f95a 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -220,3 +220,60 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
 	kfree(data);
 	return ret;
 }
+
+int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
+				     const union bpf_attr *kattr,
+				     union bpf_attr __user *uattr)
+{
+	struct bpf_flow_keys flow_keys = {};
+	u32 size = kattr->test.data_size_in;
+	u32 repeat = kattr->test.repeat;
+	struct bpf_skb_data_end *cb;
+	u32 retval, duration;
+	struct sk_buff *skb;
+	struct sock *sk;
+	void *data;
+	int ret;
+
+	if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
+		return -EINVAL;
+
+	data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
+			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+	if (IS_ERR(data))
+		return PTR_ERR(data);
+
+	sk = kzalloc(sizeof(*sk), GFP_USER);
+	if (!sk) {
+		kfree(data);
+		return -ENOMEM;
+	}
+	sock_net_set(sk, current->nsproxy->net_ns);
+	sock_init_data(NULL, sk);
+
+	skb = build_skb(data, 0);
+	if (!skb) {
+		kfree(data);
+		kfree(sk);
+		return -ENOMEM;
+	}
+	skb->sk = sk;
+
+	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+	__skb_put(skb, size);
+	skb->protocol = eth_type_trans(skb,
+				       current->nsproxy->net_ns->loopback_dev);
+	skb_reset_network_header(skb);
+
+	cb = (struct bpf_skb_data_end *)skb->cb;
+	memset(cb, 0, sizeof(cb));
+	cb->qdisc_cb.flow_keys = &flow_keys;
+
+	retval = bpf_test_run(prog, skb, repeat, &duration);
+
+	ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
+			      retval, duration);
+	kfree_skb(skb);
+	kfree(sk);
+	return ret;
+}
diff --git a/net/core/filter.c b/net/core/filter.c
index bd0df75dc7b6..4eae6102399d 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -7657,6 +7657,7 @@ const struct bpf_verifier_ops flow_dissector_verifier_ops = {
 };
 
 const struct bpf_prog_ops flow_dissector_prog_ops = {
+	.test_run		= bpf_prog_test_run_flow_dissector,
 };
 
 int sk_detach_filter(struct sock *sk)

> Thanks,
> Song
> 
> 
> >         rcu_read_unlock();
> >         preempt_enable();
> >
> >         return ret;
> >  }
> >
> > -static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
> > +static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time,
> > +                       struct bpf_flow_keys *flow_keys)
> >  {
> >         struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
> >         enum bpf_cgroup_storage_type stype;
> > @@ -49,7 +61,7 @@ static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
> >                 repeat = 1;
> >         time_start = ktime_get_ns();
> >         for (i = 0; i < repeat; i++) {
> > -               ret = bpf_test_run_one(prog, ctx, storage);
> > +               ret = bpf_test_run_one(prog, ctx, storage, flow_keys);
> >                 if (need_resched()) {
> >                         if (signal_pending(current))
> >                                 break;
> > @@ -165,7 +177,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
> >                 __skb_push(skb, hh_len);
> >         if (is_direct_pkt_access)
> >                 bpf_compute_data_pointers(skb);
> > -       retval = bpf_test_run(prog, skb, repeat, &duration);
> > +       retval = bpf_test_run(prog, skb, repeat, &duration, NULL);
> >         if (!is_l2) {
> >                 if (skb_headroom(skb) < hh_len) {
> >                         int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
> > @@ -212,7 +224,7 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
> >         rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
> >         xdp.rxq = &rxqueue->xdp_rxq;
> >
> > -       retval = bpf_test_run(prog, &xdp, repeat, &duration);
> > +       retval = bpf_test_run(prog, &xdp, repeat, &duration, NULL);
> >         if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
> >             xdp.data_end != xdp.data + size)
> >                 size = xdp.data_end - xdp.data;
> > @@ -220,3 +232,55 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
> >         kfree(data);
> >         return ret;
> >  }
> > +
> > +int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
> > +                                    const union bpf_attr *kattr,
> > +                                    union bpf_attr __user *uattr)
> > +{
> > +       struct bpf_flow_keys flow_keys = {};
> > +       u32 size = kattr->test.data_size_in;
> > +       u32 repeat = kattr->test.repeat;
> > +       u32 retval, duration;
> > +       struct sk_buff *skb;
> > +       struct sock *sk;
> > +       void *data;
> > +       int ret;
> > +
> > +       if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
> > +               return -EINVAL;
> > +
> > +       data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
> > +                            SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
> > +       if (IS_ERR(data))
> > +               return PTR_ERR(data);
> > +
> > +       sk = kzalloc(sizeof(*sk), GFP_USER);
> > +       if (!sk) {
> > +               kfree(data);
> > +               return -ENOMEM;
> > +       }
> > +       sock_net_set(sk, current->nsproxy->net_ns);
> > +       sock_init_data(NULL, sk);
> > +
> > +       skb = build_skb(data, 0);
> > +       if (!skb) {
> > +               kfree(data);
> > +               kfree(sk);
> > +               return -ENOMEM;
> > +       }
> > +       skb->sk = sk;
> > +
> > +       skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
> > +       __skb_put(skb, size);
> > +       skb->protocol = eth_type_trans(skb,
> > +                                      current->nsproxy->net_ns->loopback_dev);
> > +       skb_reset_network_header(skb);
> > +
> > +       retval = bpf_test_run(prog, skb, repeat, &duration, &flow_keys);
> > +
> > +       ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
> > +                             retval, duration);
> > +       kfree_skb(skb);
> > +       kfree(sk);
> > +       return ret;
> > +}
> > diff --git a/net/core/filter.c b/net/core/filter.c
> > index bd0df75dc7b6..4eae6102399d 100644
> > --- a/net/core/filter.c
> > +++ b/net/core/filter.c
> > @@ -7657,6 +7657,7 @@ const struct bpf_verifier_ops flow_dissector_verifier_ops = {
> >  };
> >
> >  const struct bpf_prog_ops flow_dissector_prog_ops = {
> > +       .test_run               = bpf_prog_test_run_flow_dissector,
> >  };
> >
> >  int sk_detach_filter(struct sock *sk)
> > --
> > 2.20.0.rc1.387.gf8505762e3-goog
> >

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [PATCH bpf-next 2/3] bpf: add BPF_PROG_TEST_RUN support for flow dissector
  2018-12-03 23:08     ` Stanislav Fomichev
@ 2018-12-04  3:54       ` Stanislav Fomichev
  2018-12-04 23:25       ` Song Liu
  1 sibling, 0 replies; 10+ messages in thread
From: Stanislav Fomichev @ 2018-12-04  3:54 UTC (permalink / raw)
  To: Song Liu
  Cc: sdf, Networking, David S . Miller, Alexei Starovoitov,
	Daniel Borkmann, simon.horman

On 12/03, Stanislav Fomichev wrote:
> On 12/03, Song Liu wrote:
> > On Mon, Dec 3, 2018 at 11:00 AM Stanislav Fomichev <sdf@google.com> wrote:
> > >
> > > The input is packet data, the output is struct bpf_flow_key. This should
> > > make it easy to test flow dissector programs without elaborate
> > > setup.
> > >
> > > Signed-off-by: Stanislav Fomichev <sdf@google.com>
> > > ---
> > >  include/linux/bpf.h |  3 ++
> > >  net/bpf/test_run.c  | 76 +++++++++++++++++++++++++++++++++++++++++----
> > >  net/core/filter.c   |  1 +
> > >  3 files changed, 74 insertions(+), 6 deletions(-)
> > >
> > > diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> > > index e82b7039fc66..7a572d15d5dd 100644
> > > --- a/include/linux/bpf.h
> > > +++ b/include/linux/bpf.h
> > > @@ -373,6 +373,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
> > >                           union bpf_attr __user *uattr);
> > >  int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
> > >                           union bpf_attr __user *uattr);
> > > +int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
> > > +                                    const union bpf_attr *kattr,
> > > +                                    union bpf_attr __user *uattr);
> > >
> > >  /* an array of programs to be executed under rcu_lock.
> > >   *
> > > diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
> > > index c89c22c49015..bfa05d31c6e3 100644
> > > --- a/net/bpf/test_run.c
> > > +++ b/net/bpf/test_run.c
> > > @@ -14,21 +14,33 @@
> > >  #include <net/tcp.h>
> > >
> > >  static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
> > > -               struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
> > > +               struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE],
> > > +               struct bpf_flow_keys *flow_keys)
> > >  {
> > >         u32 ret;
> > >
> > >         preempt_disable();
> > >         rcu_read_lock();
> > >         bpf_cgroup_storage_set(storage);
> > > -       ret = BPF_PROG_RUN(prog, ctx);
> > > +
> > > +       switch (prog->type) {
> > > +       case BPF_PROG_TYPE_FLOW_DISSECTOR:
> > > +               ret = __skb_flow_bpf_dissect(prog, ctx, &flow_keys_dissector,
> > > +                                            flow_keys);
> > > +               break;
> > > +       default:
> > > +               ret = BPF_PROG_RUN(prog, ctx);
> > > +               break;
> > > +       }
> > > +
> > 
> > Is it possible to fold the logic above into bpf_prog_test_run_flow_dissector()?
> > In that way, the logic flow is similar to other bpf_prog_test_run_XXX()
> > functions.
> I can probably do everything that __skb_flow_bpf_dissect does inside of
> bpf_prog_test_run_flow_dissector (pass flow_keys in cb); that would remove
> this ugly program type check down the stack. But my additional goal here was
> to test __skb_flow_bpf_dissect itself as well.
> 
> Attached some possible prototype below. The only issue I see with that
> approach is that we need to clear flow_keys on each test iteration.
> I think in my current patch I'm actually missing a memset(&flow_keys, 0, ...)
> for each iteration of __skb_flow_bpf_dissect;
> I haven't tested multiple runs :-/
> 
> So I'd prefer to continue doing this prog type check (plus, add a missing
> memset for flow_keys on each iteration in v2). WDYT?
I think I found another problem with not using __skb_flow_bpf_dissect:
I want to add some clamping to the thoff/nhoff returned by the bpf
program (to be safe); it's probably better to keep all this logic in one
place.

I can probably pass flow_keys in the skb->cb, so I don't have to add
flow_keys to the bpf_test_run_one/bpf_test_run signatures. I'll send v2
shortly, let me know if it addresses your point.

> --- 
> 
> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> index e82b7039fc66..7a572d15d5dd 100644
> --- a/include/linux/bpf.h
> +++ b/include/linux/bpf.h
> @@ -373,6 +373,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
>  			  union bpf_attr __user *uattr);
>  int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
>  			  union bpf_attr __user *uattr);
> +int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
> +				     const union bpf_attr *kattr,
> +				     union bpf_attr __user *uattr);
>  
>  /* an array of programs to be executed under rcu_lock.
>   *
> diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
> index c89c22c49015..04387ef1f95a 100644
> --- a/net/bpf/test_run.c
> +++ b/net/bpf/test_run.c
> @@ -220,3 +220,60 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
>  	kfree(data);
>  	return ret;
>  }
> +
> +int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
> +				     const union bpf_attr *kattr,
> +				     union bpf_attr __user *uattr)
> +{
> +	struct bpf_flow_keys flow_keys = {};
> +	u32 size = kattr->test.data_size_in;
> +	u32 repeat = kattr->test.repeat;
> +	struct bpf_skb_data_end *cb;
> +	u32 retval, duration;
> +	struct sk_buff *skb;
> +	struct sock *sk;
> +	void *data;
> +	int ret;
> +
> +	if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
> +		return -EINVAL;
> +
> +	data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
> +			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
> +	if (IS_ERR(data))
> +		return PTR_ERR(data);
> +
> +	sk = kzalloc(sizeof(*sk), GFP_USER);
> +	if (!sk) {
> +		kfree(data);
> +		return -ENOMEM;
> +	}
> +	sock_net_set(sk, current->nsproxy->net_ns);
> +	sock_init_data(NULL, sk);
> +
> +	skb = build_skb(data, 0);
> +	if (!skb) {
> +		kfree(data);
> +		kfree(sk);
> +		return -ENOMEM;
> +	}
> +	skb->sk = sk;
> +
> +	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
> +	__skb_put(skb, size);
> +	skb->protocol = eth_type_trans(skb,
> +				       current->nsproxy->net_ns->loopback_dev);
> +	skb_reset_network_header(skb);
> +
> +	cb = (struct bpf_skb_data_end *)skb->cb;
> +	memset(cb, 0, sizeof(cb));
> +	cb->qdisc_cb.flow_keys = &flow_keys;
> +
> +	retval = bpf_test_run(prog, skb, repeat, &duration);
> +
> +	ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
> +			      retval, duration);
> +	kfree_skb(skb);
> +	kfree(sk);
> +	return ret;
> +}
> diff --git a/net/core/filter.c b/net/core/filter.c
> index bd0df75dc7b6..4eae6102399d 100644
> --- a/net/core/filter.c
> +++ b/net/core/filter.c
> @@ -7657,6 +7657,7 @@ const struct bpf_verifier_ops flow_dissector_verifier_ops = {
>  };
>  
>  const struct bpf_prog_ops flow_dissector_prog_ops = {
> +	.test_run		= bpf_prog_test_run_flow_dissector,
>  };
>  
>  int sk_detach_filter(struct sock *sk)
> 
> > Thanks,
> > Song
> > 
> > 
> > >         rcu_read_unlock();
> > >         preempt_enable();
> > >
> > >         return ret;
> > >  }
> > >
> > > -static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
> > > +static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time,
> > > +                       struct bpf_flow_keys *flow_keys)
> > >  {
> > >         struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
> > >         enum bpf_cgroup_storage_type stype;
> > > @@ -49,7 +61,7 @@ static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
> > >                 repeat = 1;
> > >         time_start = ktime_get_ns();
> > >         for (i = 0; i < repeat; i++) {
> > > -               ret = bpf_test_run_one(prog, ctx, storage);
> > > +               ret = bpf_test_run_one(prog, ctx, storage, flow_keys);
> > >                 if (need_resched()) {
> > >                         if (signal_pending(current))
> > >                                 break;
> > > @@ -165,7 +177,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
> > >                 __skb_push(skb, hh_len);
> > >         if (is_direct_pkt_access)
> > >                 bpf_compute_data_pointers(skb);
> > > -       retval = bpf_test_run(prog, skb, repeat, &duration);
> > > +       retval = bpf_test_run(prog, skb, repeat, &duration, NULL);
> > >         if (!is_l2) {
> > >                 if (skb_headroom(skb) < hh_len) {
> > >                         int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
> > > @@ -212,7 +224,7 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
> > >         rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
> > >         xdp.rxq = &rxqueue->xdp_rxq;
> > >
> > > -       retval = bpf_test_run(prog, &xdp, repeat, &duration);
> > > +       retval = bpf_test_run(prog, &xdp, repeat, &duration, NULL);
> > >         if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
> > >             xdp.data_end != xdp.data + size)
> > >                 size = xdp.data_end - xdp.data;
> > > @@ -220,3 +232,55 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
> > >         kfree(data);
> > >         return ret;
> > >  }
> > > +
> > > +int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
> > > +                                    const union bpf_attr *kattr,
> > > +                                    union bpf_attr __user *uattr)
> > > +{
> > > +       struct bpf_flow_keys flow_keys = {};
> > > +       u32 size = kattr->test.data_size_in;
> > > +       u32 repeat = kattr->test.repeat;
> > > +       u32 retval, duration;
> > > +       struct sk_buff *skb;
> > > +       struct sock *sk;
> > > +       void *data;
> > > +       int ret;
> > > +
> > > +       if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
> > > +               return -EINVAL;
> > > +
> > > +       data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
> > > +                            SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
> > > +       if (IS_ERR(data))
> > > +               return PTR_ERR(data);
> > > +
> > > +       sk = kzalloc(sizeof(*sk), GFP_USER);
> > > +       if (!sk) {
> > > +               kfree(data);
> > > +               return -ENOMEM;
> > > +       }
> > > +       sock_net_set(sk, current->nsproxy->net_ns);
> > > +       sock_init_data(NULL, sk);
> > > +
> > > +       skb = build_skb(data, 0);
> > > +       if (!skb) {
> > > +               kfree(data);
> > > +               kfree(sk);
> > > +               return -ENOMEM;
> > > +       }
> > > +       skb->sk = sk;
> > > +
> > > +       skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
> > > +       __skb_put(skb, size);
> > > +       skb->protocol = eth_type_trans(skb,
> > > +                                      current->nsproxy->net_ns->loopback_dev);
> > > +       skb_reset_network_header(skb);
> > > +
> > > +       retval = bpf_test_run(prog, skb, repeat, &duration, &flow_keys);
> > > +
> > > +       ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
> > > +                             retval, duration);
> > > +       kfree_skb(skb);
> > > +       kfree(sk);
> > > +       return ret;
> > > +}
> > > diff --git a/net/core/filter.c b/net/core/filter.c
> > > index bd0df75dc7b6..4eae6102399d 100644
> > > --- a/net/core/filter.c
> > > +++ b/net/core/filter.c
> > > @@ -7657,6 +7657,7 @@ const struct bpf_verifier_ops flow_dissector_verifier_ops = {
> > >  };
> > >
> > >  const struct bpf_prog_ops flow_dissector_prog_ops = {
> > > +       .test_run               = bpf_prog_test_run_flow_dissector,
> > >  };
> > >
> > >  int sk_detach_filter(struct sock *sk)
> > > --
> > > 2.20.0.rc1.387.gf8505762e3-goog
> > >

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH bpf-next 2/3] bpf: add BPF_PROG_TEST_RUN support for flow dissector
  2018-12-03 23:08     ` Stanislav Fomichev
  2018-12-04  3:54       ` Stanislav Fomichev
@ 2018-12-04 23:25       ` Song Liu
  2018-12-04 23:36         ` Stanislav Fomichev
  1 sibling, 1 reply; 10+ messages in thread
From: Song Liu @ 2018-12-04 23:25 UTC (permalink / raw)
  To: sdf
  Cc: Stanislav Fomichev, Networking, David S . Miller,
	Alexei Starovoitov, Daniel Borkmann, simon.horman

On Mon, Dec 3, 2018 at 3:08 PM Stanislav Fomichev <sdf@fomichev.me> wrote:
>
> On 12/03, Song Liu wrote:
> > On Mon, Dec 3, 2018 at 11:00 AM Stanislav Fomichev <sdf@google.com> wrote:
> > >
> > > The input is packet data, the output is struct bpf_flow_key. This should
> > > make it easy to test flow dissector programs without elaborate
> > > setup.
> > >
> > > Signed-off-by: Stanislav Fomichev <sdf@google.com>
> > > ---
> > >  include/linux/bpf.h |  3 ++
> > >  net/bpf/test_run.c  | 76 +++++++++++++++++++++++++++++++++++++++++----
> > >  net/core/filter.c   |  1 +
> > >  3 files changed, 74 insertions(+), 6 deletions(-)
> > >
> > > diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> > > index e82b7039fc66..7a572d15d5dd 100644
> > > --- a/include/linux/bpf.h
> > > +++ b/include/linux/bpf.h
> > > @@ -373,6 +373,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
> > >                           union bpf_attr __user *uattr);
> > >  int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
> > >                           union bpf_attr __user *uattr);
> > > +int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
> > > +                                    const union bpf_attr *kattr,
> > > +                                    union bpf_attr __user *uattr);
> > >
> > >  /* an array of programs to be executed under rcu_lock.
> > >   *
> > > diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
> > > index c89c22c49015..bfa05d31c6e3 100644
> > > --- a/net/bpf/test_run.c
> > > +++ b/net/bpf/test_run.c
> > > @@ -14,21 +14,33 @@
> > >  #include <net/tcp.h>
> > >
> > >  static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
> > > -               struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
> > > +               struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE],
> > > +               struct bpf_flow_keys *flow_keys)
> > >  {
> > >         u32 ret;
> > >
> > >         preempt_disable();
> > >         rcu_read_lock();
> > >         bpf_cgroup_storage_set(storage);
> > > -       ret = BPF_PROG_RUN(prog, ctx);
> > > +
> > > +       switch (prog->type) {
> > > +       case BPF_PROG_TYPE_FLOW_DISSECTOR:
> > > +               ret = __skb_flow_bpf_dissect(prog, ctx, &flow_keys_dissector,
> > > +                                            flow_keys);
> > > +               break;
> > > +       default:
> > > +               ret = BPF_PROG_RUN(prog, ctx);
> > > +               break;
> > > +       }
> > > +
> >
> > Is it possible to fold the logic above into bpf_prog_test_run_flow_dissector()?
> > In that way, the logic flow is similar to other bpf_prog_test_run_XXX()
> > functions.
> I can probably do everything that __skb_flow_bpf_dissect does inside of
> bpf_prog_test_run_flow_dissector (pass flow_keys in cb); that would remove
> this ugly program type check down the stack. But my additional goal here was
> to test __skb_flow_bpf_dissect itself as well.
>
> Attached some possible prototype below. The only issue I see with that
> approach is that we need to clear flow_keys on each test iteration.
> I think in my current patch I'm actually missing a memset(&flow_keys, 0, ...)
> for each iteration of __skb_flow_bpf_dissect;
> I haven't tested multiple runs :-/
>
> So I'd prefer to continue doing this prog type check (plus, add a missing
> memset for flow_keys on each iteration in v2). WDYT?

I think v2 is still using original logic? Actually, after a second
thought, I think
it is OK to change bpf_test_run_one() (unless Alexei and/or Daniel has better
suggestions.

Thanks,
Song

>
> ---
>
> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> index e82b7039fc66..7a572d15d5dd 100644
> --- a/include/linux/bpf.h
> +++ b/include/linux/bpf.h
> @@ -373,6 +373,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
>                           union bpf_attr __user *uattr);
>  int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
>                           union bpf_attr __user *uattr);
> +int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
> +                                    const union bpf_attr *kattr,
> +                                    union bpf_attr __user *uattr);
>
>  /* an array of programs to be executed under rcu_lock.
>   *
> diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
> index c89c22c49015..04387ef1f95a 100644
> --- a/net/bpf/test_run.c
> +++ b/net/bpf/test_run.c
> @@ -220,3 +220,60 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
>         kfree(data);
>         return ret;
>  }
> +
> +int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
> +                                    const union bpf_attr *kattr,
> +                                    union bpf_attr __user *uattr)
> +{
> +       struct bpf_flow_keys flow_keys = {};
> +       u32 size = kattr->test.data_size_in;
> +       u32 repeat = kattr->test.repeat;
> +       struct bpf_skb_data_end *cb;
> +       u32 retval, duration;
> +       struct sk_buff *skb;
> +       struct sock *sk;
> +       void *data;
> +       int ret;
> +
> +       if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
> +               return -EINVAL;
> +
> +       data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
> +                            SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
> +       if (IS_ERR(data))
> +               return PTR_ERR(data);
> +
> +       sk = kzalloc(sizeof(*sk), GFP_USER);
> +       if (!sk) {
> +               kfree(data);
> +               return -ENOMEM;
> +       }
> +       sock_net_set(sk, current->nsproxy->net_ns);
> +       sock_init_data(NULL, sk);
> +
> +       skb = build_skb(data, 0);
> +       if (!skb) {
> +               kfree(data);
> +               kfree(sk);
> +               return -ENOMEM;
> +       }
> +       skb->sk = sk;
> +
> +       skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
> +       __skb_put(skb, size);
> +       skb->protocol = eth_type_trans(skb,
> +                                      current->nsproxy->net_ns->loopback_dev);
> +       skb_reset_network_header(skb);
> +
> +       cb = (struct bpf_skb_data_end *)skb->cb;
> +       memset(cb, 0, sizeof(cb));
> +       cb->qdisc_cb.flow_keys = &flow_keys;
> +
> +       retval = bpf_test_run(prog, skb, repeat, &duration);
> +
> +       ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
> +                             retval, duration);
> +       kfree_skb(skb);
> +       kfree(sk);
> +       return ret;
> +}
> diff --git a/net/core/filter.c b/net/core/filter.c
> index bd0df75dc7b6..4eae6102399d 100644
> --- a/net/core/filter.c
> +++ b/net/core/filter.c
> @@ -7657,6 +7657,7 @@ const struct bpf_verifier_ops flow_dissector_verifier_ops = {
>  };
>
>  const struct bpf_prog_ops flow_dissector_prog_ops = {
> +       .test_run               = bpf_prog_test_run_flow_dissector,
>  };
>
>  int sk_detach_filter(struct sock *sk)
>
> > Thanks,
> > Song
> >
> >
> > >         rcu_read_unlock();
> > >         preempt_enable();
> > >
> > >         return ret;
> > >  }
> > >
> > > -static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
> > > +static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time,
> > > +                       struct bpf_flow_keys *flow_keys)
> > >  {
> > >         struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
> > >         enum bpf_cgroup_storage_type stype;
> > > @@ -49,7 +61,7 @@ static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
> > >                 repeat = 1;
> > >         time_start = ktime_get_ns();
> > >         for (i = 0; i < repeat; i++) {
> > > -               ret = bpf_test_run_one(prog, ctx, storage);
> > > +               ret = bpf_test_run_one(prog, ctx, storage, flow_keys);
> > >                 if (need_resched()) {
> > >                         if (signal_pending(current))
> > >                                 break;
> > > @@ -165,7 +177,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
> > >                 __skb_push(skb, hh_len);
> > >         if (is_direct_pkt_access)
> > >                 bpf_compute_data_pointers(skb);
> > > -       retval = bpf_test_run(prog, skb, repeat, &duration);
> > > +       retval = bpf_test_run(prog, skb, repeat, &duration, NULL);
> > >         if (!is_l2) {
> > >                 if (skb_headroom(skb) < hh_len) {
> > >                         int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
> > > @@ -212,7 +224,7 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
> > >         rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
> > >         xdp.rxq = &rxqueue->xdp_rxq;
> > >
> > > -       retval = bpf_test_run(prog, &xdp, repeat, &duration);
> > > +       retval = bpf_test_run(prog, &xdp, repeat, &duration, NULL);
> > >         if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
> > >             xdp.data_end != xdp.data + size)
> > >                 size = xdp.data_end - xdp.data;
> > > @@ -220,3 +232,55 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
> > >         kfree(data);
> > >         return ret;
> > >  }
> > > +
> > > +int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
> > > +                                    const union bpf_attr *kattr,
> > > +                                    union bpf_attr __user *uattr)
> > > +{
> > > +       struct bpf_flow_keys flow_keys = {};
> > > +       u32 size = kattr->test.data_size_in;
> > > +       u32 repeat = kattr->test.repeat;
> > > +       u32 retval, duration;
> > > +       struct sk_buff *skb;
> > > +       struct sock *sk;
> > > +       void *data;
> > > +       int ret;
> > > +
> > > +       if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
> > > +               return -EINVAL;
> > > +
> > > +       data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
> > > +                            SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
> > > +       if (IS_ERR(data))
> > > +               return PTR_ERR(data);
> > > +
> > > +       sk = kzalloc(sizeof(*sk), GFP_USER);
> > > +       if (!sk) {
> > > +               kfree(data);
> > > +               return -ENOMEM;
> > > +       }
> > > +       sock_net_set(sk, current->nsproxy->net_ns);
> > > +       sock_init_data(NULL, sk);
> > > +
> > > +       skb = build_skb(data, 0);
> > > +       if (!skb) {
> > > +               kfree(data);
> > > +               kfree(sk);
> > > +               return -ENOMEM;
> > > +       }
> > > +       skb->sk = sk;
> > > +
> > > +       skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
> > > +       __skb_put(skb, size);
> > > +       skb->protocol = eth_type_trans(skb,
> > > +                                      current->nsproxy->net_ns->loopback_dev);
> > > +       skb_reset_network_header(skb);
> > > +
> > > +       retval = bpf_test_run(prog, skb, repeat, &duration, &flow_keys);
> > > +
> > > +       ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
> > > +                             retval, duration);
> > > +       kfree_skb(skb);
> > > +       kfree(sk);
> > > +       return ret;
> > > +}
> > > diff --git a/net/core/filter.c b/net/core/filter.c
> > > index bd0df75dc7b6..4eae6102399d 100644
> > > --- a/net/core/filter.c
> > > +++ b/net/core/filter.c
> > > @@ -7657,6 +7657,7 @@ const struct bpf_verifier_ops flow_dissector_verifier_ops = {
> > >  };
> > >
> > >  const struct bpf_prog_ops flow_dissector_prog_ops = {
> > > +       .test_run               = bpf_prog_test_run_flow_dissector,
> > >  };
> > >
> > >  int sk_detach_filter(struct sock *sk)
> > > --
> > > 2.20.0.rc1.387.gf8505762e3-goog
> > >

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH bpf-next 2/3] bpf: add BPF_PROG_TEST_RUN support for flow dissector
  2018-12-04 23:25       ` Song Liu
@ 2018-12-04 23:36         ` Stanislav Fomichev
  0 siblings, 0 replies; 10+ messages in thread
From: Stanislav Fomichev @ 2018-12-04 23:36 UTC (permalink / raw)
  To: Song Liu
  Cc: Stanislav Fomichev, Networking, David S . Miller,
	Alexei Starovoitov, Daniel Borkmann, simon.horman

On 12/04, Song Liu wrote:
> On Mon, Dec 3, 2018 at 3:08 PM Stanislav Fomichev <sdf@fomichev.me> wrote:
> >
> > On 12/03, Song Liu wrote:
> > > On Mon, Dec 3, 2018 at 11:00 AM Stanislav Fomichev <sdf@google.com> wrote:
> > > >
> > > > The input is packet data, the output is struct bpf_flow_key. This should
> > > > make it easy to test flow dissector programs without elaborate
> > > > setup.
> > > >
> > > > Signed-off-by: Stanislav Fomichev <sdf@google.com>
> > > > ---
> > > >  include/linux/bpf.h |  3 ++
> > > >  net/bpf/test_run.c  | 76 +++++++++++++++++++++++++++++++++++++++++----
> > > >  net/core/filter.c   |  1 +
> > > >  3 files changed, 74 insertions(+), 6 deletions(-)
> > > >
> > > > diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> > > > index e82b7039fc66..7a572d15d5dd 100644
> > > > --- a/include/linux/bpf.h
> > > > +++ b/include/linux/bpf.h
> > > > @@ -373,6 +373,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
> > > >                           union bpf_attr __user *uattr);
> > > >  int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
> > > >                           union bpf_attr __user *uattr);
> > > > +int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
> > > > +                                    const union bpf_attr *kattr,
> > > > +                                    union bpf_attr __user *uattr);
> > > >
> > > >  /* an array of programs to be executed under rcu_lock.
> > > >   *
> > > > diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
> > > > index c89c22c49015..bfa05d31c6e3 100644
> > > > --- a/net/bpf/test_run.c
> > > > +++ b/net/bpf/test_run.c
> > > > @@ -14,21 +14,33 @@
> > > >  #include <net/tcp.h>
> > > >
> > > >  static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
> > > > -               struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
> > > > +               struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE],
> > > > +               struct bpf_flow_keys *flow_keys)
> > > >  {
> > > >         u32 ret;
> > > >
> > > >         preempt_disable();
> > > >         rcu_read_lock();
> > > >         bpf_cgroup_storage_set(storage);
> > > > -       ret = BPF_PROG_RUN(prog, ctx);
> > > > +
> > > > +       switch (prog->type) {
> > > > +       case BPF_PROG_TYPE_FLOW_DISSECTOR:
> > > > +               ret = __skb_flow_bpf_dissect(prog, ctx, &flow_keys_dissector,
> > > > +                                            flow_keys);
> > > > +               break;
> > > > +       default:
> > > > +               ret = BPF_PROG_RUN(prog, ctx);
> > > > +               break;
> > > > +       }
> > > > +
> > >
> > > Is it possible to fold the logic above into bpf_prog_test_run_flow_dissector()?
> > > In that way, the logic flow is similar to other bpf_prog_test_run_XXX()
> > > functions.
> > I can probably do everything that __skb_flow_bpf_dissect does inside of
> > bpf_prog_test_run_flow_dissector (pass flow_keys in cb); that would remove
> > this ugly program type check down the stack. But my additional goal here was
> > to test __skb_flow_bpf_dissect itself as well.
> >
> > Attached some possible prototype below. The only issue I see with that
> > approach is that we need to clear flow_keys on each test iteration.
> > I think in my current patch I'm actually missing a memset(&flow_keys, 0, ...)
> > for each iteration of __skb_flow_bpf_dissect;
> > I haven't tested multiple runs :-/
> >
> > So I'd prefer to continue doing this prog type check (plus, add a missing
> > memset for flow_keys on each iteration in v2). WDYT?
> 
> I think v2 is still using original logic? Actually, after a second
> thought, I think
In a sense, it does. v2 still passes flow_dissector to the lower level
bpf_test_run_one routine, it just passes it in the skb->cb, not
explicitly in the function arguments.

> it is OK to change bpf_test_run_one() (unless Alexei and/or Daniel has better
> suggestions.
The only preference I have is that I want to call __skb_flow_bpf_dissect
from bpf_test_run_one (i.e. do NOT do elaborate setup in
bpf_prog_test_run_flow_dissector so BPF_PROG_RUN in bpf_test_run_one
just magically works).

I don't really care how we pass flow_keys there (skb->cb vs plumbing the
argument) :-)

> 
> Thanks,
> Song
> 
> >
> > ---
> >
> > diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> > index e82b7039fc66..7a572d15d5dd 100644
> > --- a/include/linux/bpf.h
> > +++ b/include/linux/bpf.h
> > @@ -373,6 +373,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
> >                           union bpf_attr __user *uattr);
> >  int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
> >                           union bpf_attr __user *uattr);
> > +int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
> > +                                    const union bpf_attr *kattr,
> > +                                    union bpf_attr __user *uattr);
> >
> >  /* an array of programs to be executed under rcu_lock.
> >   *
> > diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
> > index c89c22c49015..04387ef1f95a 100644
> > --- a/net/bpf/test_run.c
> > +++ b/net/bpf/test_run.c
> > @@ -220,3 +220,60 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
> >         kfree(data);
> >         return ret;
> >  }
> > +
> > +int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
> > +                                    const union bpf_attr *kattr,
> > +                                    union bpf_attr __user *uattr)
> > +{
> > +       struct bpf_flow_keys flow_keys = {};
> > +       u32 size = kattr->test.data_size_in;
> > +       u32 repeat = kattr->test.repeat;
> > +       struct bpf_skb_data_end *cb;
> > +       u32 retval, duration;
> > +       struct sk_buff *skb;
> > +       struct sock *sk;
> > +       void *data;
> > +       int ret;
> > +
> > +       if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
> > +               return -EINVAL;
> > +
> > +       data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
> > +                            SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
> > +       if (IS_ERR(data))
> > +               return PTR_ERR(data);
> > +
> > +       sk = kzalloc(sizeof(*sk), GFP_USER);
> > +       if (!sk) {
> > +               kfree(data);
> > +               return -ENOMEM;
> > +       }
> > +       sock_net_set(sk, current->nsproxy->net_ns);
> > +       sock_init_data(NULL, sk);
> > +
> > +       skb = build_skb(data, 0);
> > +       if (!skb) {
> > +               kfree(data);
> > +               kfree(sk);
> > +               return -ENOMEM;
> > +       }
> > +       skb->sk = sk;
> > +
> > +       skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
> > +       __skb_put(skb, size);
> > +       skb->protocol = eth_type_trans(skb,
> > +                                      current->nsproxy->net_ns->loopback_dev);
> > +       skb_reset_network_header(skb);
> > +
> > +       cb = (struct bpf_skb_data_end *)skb->cb;
> > +       memset(cb, 0, sizeof(cb));
> > +       cb->qdisc_cb.flow_keys = &flow_keys;
> > +
> > +       retval = bpf_test_run(prog, skb, repeat, &duration);
> > +
> > +       ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
> > +                             retval, duration);
> > +       kfree_skb(skb);
> > +       kfree(sk);
> > +       return ret;
> > +}
> > diff --git a/net/core/filter.c b/net/core/filter.c
> > index bd0df75dc7b6..4eae6102399d 100644
> > --- a/net/core/filter.c
> > +++ b/net/core/filter.c
> > @@ -7657,6 +7657,7 @@ const struct bpf_verifier_ops flow_dissector_verifier_ops = {
> >  };
> >
> >  const struct bpf_prog_ops flow_dissector_prog_ops = {
> > +       .test_run               = bpf_prog_test_run_flow_dissector,
> >  };
> >
> >  int sk_detach_filter(struct sock *sk)
> >
> > > Thanks,
> > > Song
> > >
> > >
> > > >         rcu_read_unlock();
> > > >         preempt_enable();
> > > >
> > > >         return ret;
> > > >  }
> > > >
> > > > -static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
> > > > +static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time,
> > > > +                       struct bpf_flow_keys *flow_keys)
> > > >  {
> > > >         struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
> > > >         enum bpf_cgroup_storage_type stype;
> > > > @@ -49,7 +61,7 @@ static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
> > > >                 repeat = 1;
> > > >         time_start = ktime_get_ns();
> > > >         for (i = 0; i < repeat; i++) {
> > > > -               ret = bpf_test_run_one(prog, ctx, storage);
> > > > +               ret = bpf_test_run_one(prog, ctx, storage, flow_keys);
> > > >                 if (need_resched()) {
> > > >                         if (signal_pending(current))
> > > >                                 break;
> > > > @@ -165,7 +177,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
> > > >                 __skb_push(skb, hh_len);
> > > >         if (is_direct_pkt_access)
> > > >                 bpf_compute_data_pointers(skb);
> > > > -       retval = bpf_test_run(prog, skb, repeat, &duration);
> > > > +       retval = bpf_test_run(prog, skb, repeat, &duration, NULL);
> > > >         if (!is_l2) {
> > > >                 if (skb_headroom(skb) < hh_len) {
> > > >                         int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
> > > > @@ -212,7 +224,7 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
> > > >         rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
> > > >         xdp.rxq = &rxqueue->xdp_rxq;
> > > >
> > > > -       retval = bpf_test_run(prog, &xdp, repeat, &duration);
> > > > +       retval = bpf_test_run(prog, &xdp, repeat, &duration, NULL);
> > > >         if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
> > > >             xdp.data_end != xdp.data + size)
> > > >                 size = xdp.data_end - xdp.data;
> > > > @@ -220,3 +232,55 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
> > > >         kfree(data);
> > > >         return ret;
> > > >  }
> > > > +
> > > > +int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
> > > > +                                    const union bpf_attr *kattr,
> > > > +                                    union bpf_attr __user *uattr)
> > > > +{
> > > > +       struct bpf_flow_keys flow_keys = {};
> > > > +       u32 size = kattr->test.data_size_in;
> > > > +       u32 repeat = kattr->test.repeat;
> > > > +       u32 retval, duration;
> > > > +       struct sk_buff *skb;
> > > > +       struct sock *sk;
> > > > +       void *data;
> > > > +       int ret;
> > > > +
> > > > +       if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
> > > > +               return -EINVAL;
> > > > +
> > > > +       data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
> > > > +                            SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
> > > > +       if (IS_ERR(data))
> > > > +               return PTR_ERR(data);
> > > > +
> > > > +       sk = kzalloc(sizeof(*sk), GFP_USER);
> > > > +       if (!sk) {
> > > > +               kfree(data);
> > > > +               return -ENOMEM;
> > > > +       }
> > > > +       sock_net_set(sk, current->nsproxy->net_ns);
> > > > +       sock_init_data(NULL, sk);
> > > > +
> > > > +       skb = build_skb(data, 0);
> > > > +       if (!skb) {
> > > > +               kfree(data);
> > > > +               kfree(sk);
> > > > +               return -ENOMEM;
> > > > +       }
> > > > +       skb->sk = sk;
> > > > +
> > > > +       skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
> > > > +       __skb_put(skb, size);
> > > > +       skb->protocol = eth_type_trans(skb,
> > > > +                                      current->nsproxy->net_ns->loopback_dev);
> > > > +       skb_reset_network_header(skb);
> > > > +
> > > > +       retval = bpf_test_run(prog, skb, repeat, &duration, &flow_keys);
> > > > +
> > > > +       ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
> > > > +                             retval, duration);
> > > > +       kfree_skb(skb);
> > > > +       kfree(sk);
> > > > +       return ret;
> > > > +}
> > > > diff --git a/net/core/filter.c b/net/core/filter.c
> > > > index bd0df75dc7b6..4eae6102399d 100644
> > > > --- a/net/core/filter.c
> > > > +++ b/net/core/filter.c
> > > > @@ -7657,6 +7657,7 @@ const struct bpf_verifier_ops flow_dissector_verifier_ops = {
> > > >  };
> > > >
> > > >  const struct bpf_prog_ops flow_dissector_prog_ops = {
> > > > +       .test_run               = bpf_prog_test_run_flow_dissector,
> > > >  };
> > > >
> > > >  int sk_detach_filter(struct sock *sk)
> > > > --
> > > > 2.20.0.rc1.387.gf8505762e3-goog
> > > >

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH bpf-next 3/3] selftests/bpf: add simple BPF_PROG_TEST_RUN examples for flow dissector
  2019-01-22 21:23 [PATCH bpf-next 0/3] support flow dissector in BPF_PROG_TEST_RUN Stanislav Fomichev
@ 2019-01-22 21:23 ` Stanislav Fomichev
  0 siblings, 0 replies; 10+ messages in thread
From: Stanislav Fomichev @ 2019-01-22 21:23 UTC (permalink / raw)
  To: netdev; +Cc: davem, ast, daniel, Stanislav Fomichev

Use existing pkt_v4 and pkt_v6 to make sure flow_keys are what we want.

Also, add new bpf_flow_load routine (and flow_dissector_load.h header)
that loads bpf_flow.o program and does all required setup.

Signed-off-by: Stanislav Fomichev <sdf@google.com>
---
 tools/testing/selftests/bpf/Makefile          |  3 +
 .../selftests/bpf/flow_dissector_load.c       | 43 ++--------
 .../selftests/bpf/flow_dissector_load.h       | 55 +++++++++++++
 tools/testing/selftests/bpf/test_progs.c      | 78 ++++++++++++++++++-
 4 files changed, 139 insertions(+), 40 deletions(-)
 create mode 100644 tools/testing/selftests/bpf/flow_dissector_load.h

diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 70229de510f5..ade67470a152 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -19,6 +19,9 @@ all: $(TEST_CUSTOM_PROGS)
 $(TEST_CUSTOM_PROGS): $(OUTPUT)/%: %.c
 	$(CC) -o $(TEST_CUSTOM_PROGS) -static $< -Wl,--build-id
 
+flow_dissector_load.c: flow_dissector_load.h
+test_run.c: flow_dissector_load.h
+
 # Order correspond to 'make run_tests' order
 TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
 	test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \
diff --git a/tools/testing/selftests/bpf/flow_dissector_load.c b/tools/testing/selftests/bpf/flow_dissector_load.c
index ae8180b11d5f..77cafa66d048 100644
--- a/tools/testing/selftests/bpf/flow_dissector_load.c
+++ b/tools/testing/selftests/bpf/flow_dissector_load.c
@@ -12,6 +12,7 @@
 #include <bpf/libbpf.h>
 
 #include "bpf_rlimit.h"
+#include "flow_dissector_load.h"
 
 const char *cfg_pin_path = "/sys/fs/bpf/flow_dissector";
 const char *cfg_map_name = "jmp_table";
@@ -21,46 +22,13 @@ char *cfg_path_name;
 
 static void load_and_attach_program(void)
 {
-	struct bpf_program *prog, *main_prog;
-	struct bpf_map *prog_array;
-	int i, fd, prog_fd, ret;
+	int prog_fd, ret;
 	struct bpf_object *obj;
-	int prog_array_fd;
 
-	ret = bpf_prog_load(cfg_path_name, BPF_PROG_TYPE_FLOW_DISSECTOR, &obj,
-			    &prog_fd);
+	ret = bpf_flow_load(&obj, cfg_path_name, cfg_section_name,
+			    cfg_map_name, &prog_fd);
 	if (ret)
-		error(1, 0, "bpf_prog_load %s", cfg_path_name);
-
-	main_prog = bpf_object__find_program_by_title(obj, cfg_section_name);
-	if (!main_prog)
-		error(1, 0, "bpf_object__find_program_by_title %s",
-		      cfg_section_name);
-
-	prog_fd = bpf_program__fd(main_prog);
-	if (prog_fd < 0)
-		error(1, 0, "bpf_program__fd");
-
-	prog_array = bpf_object__find_map_by_name(obj, cfg_map_name);
-	if (!prog_array)
-		error(1, 0, "bpf_object__find_map_by_name %s", cfg_map_name);
-
-	prog_array_fd = bpf_map__fd(prog_array);
-	if (prog_array_fd < 0)
-		error(1, 0, "bpf_map__fd %s", cfg_map_name);
-
-	i = 0;
-	bpf_object__for_each_program(prog, obj) {
-		fd = bpf_program__fd(prog);
-		if (fd < 0)
-			error(1, 0, "bpf_program__fd");
-
-		if (fd != prog_fd) {
-			printf("%d: %s\n", i, bpf_program__title(prog, false));
-			bpf_map_update_elem(prog_array_fd, &i, &fd, BPF_ANY);
-			++i;
-		}
-	}
+		error(1, 0, "bpf_flow_load %s", cfg_path_name);
 
 	ret = bpf_prog_attach(prog_fd, 0 /* Ignore */, BPF_FLOW_DISSECTOR, 0);
 	if (ret)
@@ -69,7 +37,6 @@ static void load_and_attach_program(void)
 	ret = bpf_object__pin(obj, cfg_pin_path);
 	if (ret)
 		error(1, 0, "bpf_object__pin %s", cfg_pin_path);
-
 }
 
 static void detach_program(void)
diff --git a/tools/testing/selftests/bpf/flow_dissector_load.h b/tools/testing/selftests/bpf/flow_dissector_load.h
new file mode 100644
index 000000000000..41dd6959feb0
--- /dev/null
+++ b/tools/testing/selftests/bpf/flow_dissector_load.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+#ifndef FLOW_DISSECTOR_LOAD
+#define FLOW_DISSECTOR_LOAD
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+static inline int bpf_flow_load(struct bpf_object **obj,
+				const char *path,
+				const char *section_name,
+				const char *map_name,
+				int *prog_fd)
+{
+	struct bpf_program *prog, *main_prog;
+	struct bpf_map *prog_array;
+	int prog_array_fd;
+	int ret, fd, i;
+
+	ret = bpf_prog_load(path, BPF_PROG_TYPE_FLOW_DISSECTOR, obj,
+			    prog_fd);
+	if (ret)
+		return ret;
+
+	main_prog = bpf_object__find_program_by_title(*obj, section_name);
+	if (!main_prog)
+		return ret;
+
+	*prog_fd = bpf_program__fd(main_prog);
+	if (*prog_fd < 0)
+		return ret;
+
+	prog_array = bpf_object__find_map_by_name(*obj, map_name);
+	if (!prog_array)
+		return ret;
+
+	prog_array_fd = bpf_map__fd(prog_array);
+	if (prog_array_fd < 0)
+		return ret;
+
+	i = 0;
+	bpf_object__for_each_program(prog, *obj) {
+		fd = bpf_program__fd(prog);
+		if (fd < 0)
+			return fd;
+
+		if (fd != *prog_fd) {
+			bpf_map_update_elem(prog_array_fd, &i, &fd, BPF_ANY);
+			++i;
+		}
+	}
+
+	return 0;
+}
+
+#endif /* FLOW_DISSECTOR_LOAD */
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 126fc624290d..5f46680d4ad4 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -39,6 +39,7 @@ typedef __u16 __sum16;
 #include "bpf_endian.h"
 #include "bpf_rlimit.h"
 #include "trace_helpers.h"
+#include "flow_dissector_load.h"
 
 static int error_cnt, pass_cnt;
 static bool jit_enabled;
@@ -53,9 +54,10 @@ static struct {
 } __packed pkt_v4 = {
 	.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
 	.iph.ihl = 5,
-	.iph.protocol = 6,
+	.iph.protocol = IPPROTO_TCP,
 	.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
 	.tcp.urg_ptr = 123,
+	.tcp.doff = 5,
 };
 
 /* ipv6 test vector */
@@ -65,9 +67,10 @@ static struct {
 	struct tcphdr tcp;
 } __packed pkt_v6 = {
 	.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
-	.iph.nexthdr = 6,
+	.iph.nexthdr = IPPROTO_TCP,
 	.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
 	.tcp.urg_ptr = 123,
+	.tcp.doff = 5,
 };
 
 #define _CHECK(condition, tag, duration, format...) ({			\
@@ -1882,6 +1885,76 @@ static void test_queue_stack_map(int type)
 	bpf_object__close(obj);
 }
 
+#define CHECK_FLOW_KEYS(desc, got, expected)				\
+	CHECK(memcmp(&got, &expected, sizeof(got)) != 0,		\
+	      desc,							\
+	      "nhoff=%u/%u "						\
+	      "thoff=%u/%u "						\
+	      "addr_proto=0x%x/0x%x "					\
+	      "is_frag=%u/%u "						\
+	      "is_first_frag=%u/%u "					\
+	      "is_encap=%u/%u "						\
+	      "n_proto=0x%x/0x%x "					\
+	      "sport=%u/%u "						\
+	      "dport=%u/%u\n",						\
+	      got.nhoff, expected.nhoff,				\
+	      got.thoff, expected.thoff,				\
+	      got.addr_proto, expected.addr_proto,			\
+	      got.is_frag, expected.is_frag,				\
+	      got.is_first_frag, expected.is_first_frag,		\
+	      got.is_encap, expected.is_encap,				\
+	      got.n_proto, expected.n_proto,				\
+	      got.sport, expected.sport,				\
+	      got.dport, expected.dport)
+
+static struct bpf_flow_keys pkt_v4_flow_keys = {
+	.nhoff = 0,
+	.thoff = sizeof(struct iphdr),
+	.addr_proto = ETH_P_IP,
+	.ip_proto = IPPROTO_TCP,
+	.n_proto = bpf_htons(ETH_P_IP),
+};
+
+static struct bpf_flow_keys pkt_v6_flow_keys = {
+	.nhoff = 0,
+	.thoff = sizeof(struct ipv6hdr),
+	.addr_proto = ETH_P_IPV6,
+	.ip_proto = IPPROTO_TCP,
+	.n_proto = bpf_htons(ETH_P_IPV6),
+};
+
+static void test_flow_dissector(void)
+{
+	struct bpf_flow_keys flow_keys;
+	struct bpf_object *obj;
+	__u32 duration, retval;
+	int err, prog_fd;
+	__u32 size;
+
+	err = bpf_flow_load(&obj, "./bpf_flow.o", "flow_dissector",
+			    "jmp_table", &prog_fd);
+	if (err) {
+		error_cnt++;
+		return;
+	}
+
+	err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4),
+				&flow_keys, &size, &retval, &duration);
+	CHECK(size != sizeof(flow_keys) || err || retval != 1, "ipv4",
+	      "err %d errno %d retval %d duration %d size %u/%lu\n",
+	      err, errno, retval, duration, size, sizeof(flow_keys));
+	CHECK_FLOW_KEYS("ipv4_flow_keys", flow_keys, pkt_v4_flow_keys);
+
+	err = bpf_prog_test_run(prog_fd, 10, &pkt_v6, sizeof(pkt_v6),
+				&flow_keys, &size, &retval, &duration);
+	CHECK(size != sizeof(flow_keys) || err || retval != 1, "ipv6",
+	      "err %d errno %d retval %d duration %d size %u/%lu\n",
+	      err, errno, retval, duration, size, sizeof(flow_keys));
+	CHECK_FLOW_KEYS("ipv6_flow_keys", flow_keys, pkt_v6_flow_keys);
+
+	bpf_object__close(obj);
+}
+
 int main(void)
 {
 	srand(time(NULL));
@@ -1909,6 +1982,7 @@ int main(void)
 	test_reference_tracking();
 	test_queue_stack_map(QUEUE);
 	test_queue_stack_map(STACK);
+	test_flow_dissector();
 
 	printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
 	return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
-- 
2.20.1.321.g9e740568ce-goog


^ permalink raw reply related	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2019-01-22 21:23 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-12-03 18:59 [PATCH bpf-next 0/3] support flow dissector in BPF_PROG_TEST_RUN Stanislav Fomichev
2018-12-03 18:59 ` [PATCH bpf-next 1/3] net/flow_dissector: move bpf case into __skb_flow_bpf_dissect Stanislav Fomichev
2018-12-03 18:59 ` [PATCH bpf-next 2/3] bpf: add BPF_PROG_TEST_RUN support for flow dissector Stanislav Fomichev
2018-12-03 22:28   ` Song Liu
2018-12-03 23:08     ` Stanislav Fomichev
2018-12-04  3:54       ` Stanislav Fomichev
2018-12-04 23:25       ` Song Liu
2018-12-04 23:36         ` Stanislav Fomichev
2018-12-03 18:59 ` [PATCH bpf-next 3/3] selftests/bpf: add simple BPF_PROG_TEST_RUN examples " Stanislav Fomichev
2019-01-22 21:23 [PATCH bpf-next 0/3] support flow dissector in BPF_PROG_TEST_RUN Stanislav Fomichev
2019-01-22 21:23 ` [PATCH bpf-next 3/3] selftests/bpf: add simple BPF_PROG_TEST_RUN examples for flow dissector Stanislav Fomichev

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.