All of lore.kernel.org
 help / color / mirror / Atom feed
* [LTP] [PATCH 0/2] [RFC] BPF testing
@ 2019-07-24  8:03 Richard Palethorpe
  2019-07-24  8:03 ` [LTP] [PATCH 1/2] Essential headers for BPF map creation Richard Palethorpe
                   ` (4 more replies)
  0 siblings, 5 replies; 14+ messages in thread
From: Richard Palethorpe @ 2019-07-24  8:03 UTC (permalink / raw)
  To: ltp

Hello,

This patch set introduces a very basic test which kicks the tires of the bpf
system call. It doesn't actually load a eBPF program, I will create another
test for that. However I have some concerns which I will discuss while doing
that.

There are already extensive BPF tests in the kernel selftests. These appear to
be quite complex and test a variety of functionality. They also are far less
structured than LTP's modern tests and are tied to the kernel tree which makes
using them in QA a pain. There are also some tests in the BCC project, which
may test the kernel as a byproduct.

So there are a number of options which are not necessarily mutually exclusive:

1) Port (some of) the selftests to the LTP.
2) Port the LTP library to the selftests.
3) Focus the LTP's BPF tests on reproducing specific high impact bugs.

This patch set copies in the necessary headers so that we have zero external
dependencies. I will also use raw byte code for the program test which is at
least acceptable for trivial programs. So we do not need BCC or Clang/LLVM
with eBPF support or matching kernel sources to generate offsets into internal
structures.

For the time being atleast my preference would be for (3) while avoiding
taking on any dependencies to ensure those tests are run by users mostly
ignorant of BPF, but are still exposed to critical bugs in the BPF stack.

Richard Palethorpe (2):
  Essential headers for BPF map creation
  BPF: Sanity check creating and updating maps

 include/lapi/bpf.h                        | 242 ++++++++++++++++++++++
 include/lapi/syscalls/aarch64.in          |   1 +
 include/lapi/syscalls/i386.in             |   1 +
 include/lapi/syscalls/s390.in             |   1 +
 include/lapi/syscalls/sparc.in            |   1 +
 include/lapi/syscalls/x86_64.in           |   1 +
 runtest/syscalls                          |   2 +
 testcases/kernel/syscalls/bpf/.gitignore  |   1 +
 testcases/kernel/syscalls/bpf/Makefile    |  10 +
 testcases/kernel/syscalls/bpf/bpf_map01.c | 138 ++++++++++++
 10 files changed, 398 insertions(+)
 create mode 100644 include/lapi/bpf.h
 create mode 100644 testcases/kernel/syscalls/bpf/.gitignore
 create mode 100644 testcases/kernel/syscalls/bpf/Makefile
 create mode 100644 testcases/kernel/syscalls/bpf/bpf_map01.c

-- 
2.22.0


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [LTP] [PATCH 1/2] Essential headers for BPF map creation
  2019-07-24  8:03 [LTP] [PATCH 0/2] [RFC] BPF testing Richard Palethorpe
@ 2019-07-24  8:03 ` Richard Palethorpe
  2019-07-24  9:27   ` Petr Vorel
  2019-07-24  8:03 ` [LTP] [PATCH 2/2] BPF: Sanity check creating and updating maps Richard Palethorpe
                   ` (3 subsequent siblings)
  4 siblings, 1 reply; 14+ messages in thread
From: Richard Palethorpe @ 2019-07-24  8:03 UTC (permalink / raw)
  To: ltp

Signed-off-by: Richard Palethorpe <rpalethorpe@suse.com>
---
 include/lapi/bpf.h               | 242 +++++++++++++++++++++++++++++++
 include/lapi/syscalls/aarch64.in |   1 +
 include/lapi/syscalls/i386.in    |   1 +
 include/lapi/syscalls/s390.in    |   1 +
 include/lapi/syscalls/sparc.in   |   1 +
 include/lapi/syscalls/x86_64.in  |   1 +
 6 files changed, 247 insertions(+)
 create mode 100644 include/lapi/bpf.h

diff --git a/include/lapi/bpf.h b/include/lapi/bpf.h
new file mode 100644
index 000000000..369de0175
--- /dev/null
+++ b/include/lapi/bpf.h
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2019 Richard Palethorpe <rpalethorpe@suse.com>
+ *
+ * Essential Extended Berkeley Packet Filter (eBPF) headers
+ *
+ * Mostly copied/adapted from linux/bpf.h and libbpf so that we can perform
+ * some eBPF testing without any external dependencies.
+ */
+
+#ifndef BPF_H
+# define BPF_H
+
+#include <stdint.h>
+
+#include "lapi/syscalls.h"
+
+/* Start copy from linux/bpf.h */
+enum bpf_cmd {
+	BPF_MAP_CREATE,
+	BPF_MAP_LOOKUP_ELEM,
+	BPF_MAP_UPDATE_ELEM,
+	BPF_MAP_DELETE_ELEM,
+	BPF_MAP_GET_NEXT_KEY,
+	BPF_PROG_LOAD,
+	BPF_OBJ_PIN,
+	BPF_OBJ_GET,
+	BPF_PROG_ATTACH,
+	BPF_PROG_DETACH,
+	BPF_PROG_TEST_RUN,
+	BPF_PROG_GET_NEXT_ID,
+	BPF_MAP_GET_NEXT_ID,
+	BPF_PROG_GET_FD_BY_ID,
+	BPF_MAP_GET_FD_BY_ID,
+	BPF_OBJ_GET_INFO_BY_FD,
+	BPF_PROG_QUERY,
+	BPF_RAW_TRACEPOINT_OPEN,
+	BPF_BTF_LOAD,
+	BPF_BTF_GET_FD_BY_ID,
+	BPF_TASK_FD_QUERY,
+	BPF_MAP_LOOKUP_AND_DELETE_ELEM,
+	BPF_MAP_FREEZE,
+};
+
+enum bpf_map_type {
+	BPF_MAP_TYPE_UNSPEC,
+	BPF_MAP_TYPE_HASH,
+	BPF_MAP_TYPE_ARRAY,
+	BPF_MAP_TYPE_PROG_ARRAY,
+	BPF_MAP_TYPE_PERF_EVENT_ARRAY,
+	BPF_MAP_TYPE_PERCPU_HASH,
+	BPF_MAP_TYPE_PERCPU_ARRAY,
+	BPF_MAP_TYPE_STACK_TRACE,
+	BPF_MAP_TYPE_CGROUP_ARRAY,
+	BPF_MAP_TYPE_LRU_HASH,
+	BPF_MAP_TYPE_LRU_PERCPU_HASH,
+	BPF_MAP_TYPE_LPM_TRIE,
+	BPF_MAP_TYPE_ARRAY_OF_MAPS,
+	BPF_MAP_TYPE_HASH_OF_MAPS,
+	BPF_MAP_TYPE_DEVMAP,
+	BPF_MAP_TYPE_SOCKMAP,
+	BPF_MAP_TYPE_CPUMAP,
+	BPF_MAP_TYPE_XSKMAP,
+	BPF_MAP_TYPE_SOCKHASH,
+	BPF_MAP_TYPE_CGROUP_STORAGE,
+	BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
+	BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
+	BPF_MAP_TYPE_QUEUE,
+	BPF_MAP_TYPE_STACK,
+	BPF_MAP_TYPE_SK_STORAGE,
+};
+
+#define BPF_OBJ_NAME_LEN 16U
+
+#define BPF_ANY		0 /* create new element or update existing */
+#define BPF_NOEXIST	1 /* create new element if it didn't exist */
+#define BPF_EXIST	2 /* update existing element */
+#define BPF_F_LOCK	4 /* spin_lock-ed map_lookup/map_update */
+
+#define aligned_uint64_t uint64_t __attribute__((aligned(8)))
+
+union bpf_attr {
+	struct { /* anonymous struct used by BPF_MAP_CREATE command */
+		uint32_t	map_type;	/* one of enum bpf_map_type */
+		uint32_t	key_size;	/* size of key in bytes */
+		uint32_t	value_size;	/* size of value in bytes */
+		uint32_t	max_entries;	/* max number of entries in a map */
+		uint32_t	map_flags;	/* BPF_MAP_CREATE related
+					 * flags defined above.
+					 */
+		uint32_t	inner_map_fd;	/* fd pointing to the inner map */
+		uint32_t	numa_node;	/* numa node (effective only if
+					 * BPF_F_NUMA_NODE is set).
+					 */
+		char	map_name[BPF_OBJ_NAME_LEN];
+		uint32_t	map_ifindex;	/* ifindex of netdev to create on */
+		uint32_t	btf_fd;		/* fd pointing to a BTF type data */
+		uint32_t	btf_key_type_id;	/* BTF type_id of the key */
+		uint32_t	btf_value_type_id;	/* BTF type_id of the value */
+	};
+
+	struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
+		uint32_t		map_fd;
+		aligned_uint64_t	key;
+		union {
+			aligned_uint64_t value;
+			aligned_uint64_t next_key;
+		};
+		uint64_t		flags;
+	};
+
+	struct { /* anonymous struct used by BPF_PROG_LOAD command */
+		uint32_t		prog_type;	/* one of enum bpf_prog_type */
+		uint32_t		insn_cnt;
+		aligned_uint64_t	insns;
+		aligned_uint64_t	license;
+		uint32_t		log_level;	/* verbosity level of verifier */
+		uint32_t		log_size;	/* size of user buffer */
+		aligned_uint64_t	log_buf;	/* user supplied buffer */
+		uint32_t		kern_version;	/* not used */
+		uint32_t		prog_flags;
+		char		prog_name[BPF_OBJ_NAME_LEN];
+		uint32_t		prog_ifindex;	/* ifindex of netdev to prep for */
+		/* For some prog types expected attach type must be known at
+		 * load time to verify attach type specific parts of prog
+		 * (context accesses, allowed helpers, etc).
+		 */
+		uint32_t		expected_attach_type;
+		uint32_t		prog_btf_fd;	/* fd pointing to BTF type data */
+		uint32_t		func_info_rec_size;	/* userspace bpf_func_info size */
+		aligned_uint64_t	func_info;	/* func info */
+		uint32_t		func_info_cnt;	/* number of bpf_func_info records */
+		uint32_t		line_info_rec_size;	/* userspace bpf_line_info size */
+		aligned_uint64_t	line_info;	/* line info */
+		uint32_t		line_info_cnt;	/* number of bpf_line_info records */
+	};
+
+	struct { /* anonymous struct used by BPF_OBJ_* commands */
+		aligned_uint64_t	pathname;
+		uint32_t		bpf_fd;
+		uint32_t		file_flags;
+	};
+
+	struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
+		uint32_t		target_fd;	/* container object to attach to */
+		uint32_t		attach_bpf_fd;	/* eBPF program to attach */
+		uint32_t		attach_type;
+		uint32_t		attach_flags;
+	};
+
+	struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
+		uint32_t		prog_fd;
+		uint32_t		retval;
+		uint32_t		data_size_in;	/* input: len of data_in */
+		uint32_t		data_size_out;	/* input/output: len of data_out
+						 *   returns ENOSPC if data_out
+						 *   is too small.
+						 */
+		aligned_uint64_t	data_in;
+		aligned_uint64_t	data_out;
+		uint32_t		repeat;
+		uint32_t		duration;
+		uint32_t		ctx_size_in;	/* input: len of ctx_in */
+		uint32_t		ctx_size_out;	/* input/output: len of ctx_out
+						 *   returns ENOSPC if ctx_out
+						 *   is too small.
+						 */
+		aligned_uint64_t	ctx_in;
+		aligned_uint64_t	ctx_out;
+	} test;
+
+	struct { /* anonymous struct used by BPF_*_GET_*_ID */
+		union {
+			uint32_t		start_id;
+			uint32_t		prog_id;
+			uint32_t		map_id;
+			uint32_t		btf_id;
+		};
+		uint32_t		next_id;
+		uint32_t		open_flags;
+	};
+
+	struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */
+		uint32_t		bpf_fd;
+		uint32_t		info_len;
+		aligned_uint64_t	info;
+	} info;
+
+	struct { /* anonymous struct used by BPF_PROG_QUERY command */
+		uint32_t		target_fd;	/* container object to query */
+		uint32_t		attach_type;
+		uint32_t		query_flags;
+		uint32_t		attach_flags;
+		aligned_uint64_t	prog_ids;
+		uint32_t		prog_cnt;
+	} query;
+
+	struct {
+		uint64_t name;
+		uint32_t prog_fd;
+	} raw_tracepoint;
+
+	struct { /* anonymous struct for BPF_BTF_LOAD */
+		aligned_uint64_t	btf;
+		aligned_uint64_t	btf_log_buf;
+		uint32_t		btf_size;
+		uint32_t		btf_log_size;
+		uint32_t		btf_log_level;
+	};
+
+	struct {
+		uint32_t		pid;		/* input: pid */
+		uint32_t		fd;		/* input: fd */
+		uint32_t		flags;		/* input: flags */
+		uint32_t		buf_len;	/* input/output: buf len */
+		aligned_uint64_t	buf;		/* input/output:
+						 *   tp_name for tracepoint
+						 *   symbol for kprobe
+						 *   filename for uprobe
+						 */
+		uint32_t		prog_id;	/* output: prod_id */
+		uint32_t		fd_type;	/* output: BPF_FD_TYPE_* */
+		uint64_t		probe_offset;	/* output: probe_offset */
+		uint64_t		probe_addr;	/* output: probe_addr */
+	} task_fd_query;
+} __attribute__((aligned(8)));
+
+/* End copy from linux/bpf.h */
+
+/* Start copy from tools/lib/bpf  */
+inline uint64_t ptr_to_u64(const void *ptr)
+{
+	return (uint64_t) (unsigned long) ptr;
+}
+
+inline int bpf(enum bpf_cmd cmd, union bpf_attr *attr, unsigned int size)
+{
+	return tst_syscall(__NR_bpf, cmd, attr, size);
+}
+/* End copy from tools/lib/bpf */
+
+#endif	/* BPF_H */
diff --git a/include/lapi/syscalls/aarch64.in b/include/lapi/syscalls/aarch64.in
index 7db6e281c..0e00641bc 100644
--- a/include/lapi/syscalls/aarch64.in
+++ b/include/lapi/syscalls/aarch64.in
@@ -258,6 +258,7 @@ process_vm_writev 271
 kcmp 272
 getrandom 278
 memfd_create 279
+bpf 280
 userfaultfd 282
 membarrier 283
 execveat 281
diff --git a/include/lapi/syscalls/i386.in b/include/lapi/syscalls/i386.in
index 02f3955ba..87ab46933 100644
--- a/include/lapi/syscalls/i386.in
+++ b/include/lapi/syscalls/i386.in
@@ -340,6 +340,7 @@ sched_getattr 352
 renameat2 354
 getrandom 355
 memfd_create 356
+bpf 357
 execveat 358
 userfaultfd 374
 membarrier 375
diff --git a/include/lapi/syscalls/s390.in b/include/lapi/syscalls/s390.in
index c304ef4b7..d3f7eb1f6 100644
--- a/include/lapi/syscalls/s390.in
+++ b/include/lapi/syscalls/s390.in
@@ -331,6 +331,7 @@ sched_getattr 346
 renameat2 347
 getrandom 349
 memfd_create 350
+bpf 351
 userfaultfd 355
 membarrier 356
 execveat 354
diff --git a/include/lapi/syscalls/sparc.in b/include/lapi/syscalls/sparc.in
index ab7204663..94a672428 100644
--- a/include/lapi/syscalls/sparc.in
+++ b/include/lapi/syscalls/sparc.in
@@ -336,6 +336,7 @@ kcmp 341
 renameat2 345
 getrandom 347
 memfd_create 348
+bpf 349
 membarrier 351
 userfaultfd 352
 execveat 350
diff --git a/include/lapi/syscalls/x86_64.in b/include/lapi/syscalls/x86_64.in
index fdb414c10..b1cbd4f2f 100644
--- a/include/lapi/syscalls/x86_64.in
+++ b/include/lapi/syscalls/x86_64.in
@@ -307,6 +307,7 @@ sched_getattr 315
 renameat2 316
 getrandom 318
 memfd_create 319
+bpf 321
 execveat 322
 userfaultfd 323
 membarrier 324
-- 
2.22.0


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [LTP] [PATCH 2/2] BPF: Sanity check creating and updating maps
  2019-07-24  8:03 [LTP] [PATCH 0/2] [RFC] BPF testing Richard Palethorpe
  2019-07-24  8:03 ` [LTP] [PATCH 1/2] Essential headers for BPF map creation Richard Palethorpe
@ 2019-07-24  8:03 ` Richard Palethorpe
  2019-07-24  9:18   ` Petr Vorel
  2019-07-24  9:30 ` [LTP] [PATCH 0/2] [RFC] BPF testing Petr Vorel
                   ` (2 subsequent siblings)
  4 siblings, 1 reply; 14+ messages in thread
From: Richard Palethorpe @ 2019-07-24  8:03 UTC (permalink / raw)
  To: ltp

Signed-off-by: Richard Palethorpe <rpalethorpe@suse.com>
---
 runtest/syscalls                          |   2 +
 testcases/kernel/syscalls/bpf/.gitignore  |   1 +
 testcases/kernel/syscalls/bpf/Makefile    |  10 ++
 testcases/kernel/syscalls/bpf/bpf_map01.c | 138 ++++++++++++++++++++++
 4 files changed, 151 insertions(+)
 create mode 100644 testcases/kernel/syscalls/bpf/.gitignore
 create mode 100644 testcases/kernel/syscalls/bpf/Makefile
 create mode 100644 testcases/kernel/syscalls/bpf/bpf_map01.c

diff --git a/runtest/syscalls b/runtest/syscalls
index 67dfed661..46880ee1d 100644
--- a/runtest/syscalls
+++ b/runtest/syscalls
@@ -32,6 +32,8 @@ bind01 bind01
 bind02 bind02
 bind03 bind03
 
+bpf_map01 bpf_map01
+
 brk01 brk01
 
 capget01 capget01
diff --git a/testcases/kernel/syscalls/bpf/.gitignore b/testcases/kernel/syscalls/bpf/.gitignore
new file mode 100644
index 000000000..f33532484
--- /dev/null
+++ b/testcases/kernel/syscalls/bpf/.gitignore
@@ -0,0 +1 @@
+bpf_map01
diff --git a/testcases/kernel/syscalls/bpf/Makefile b/testcases/kernel/syscalls/bpf/Makefile
new file mode 100644
index 000000000..990c8c83c
--- /dev/null
+++ b/testcases/kernel/syscalls/bpf/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+# Copyright (c) 2019 Linux Test Project
+
+top_srcdir		?= ../../../..
+
+include $(top_srcdir)/include/mk/testcases.mk
+
+CFLAGS			+= -D_GNU_SOURCE
+
+include $(top_srcdir)/include/mk/generic_leaf_target.mk
diff --git a/testcases/kernel/syscalls/bpf/bpf_map01.c b/testcases/kernel/syscalls/bpf/bpf_map01.c
new file mode 100644
index 000000000..81416b790
--- /dev/null
+++ b/testcases/kernel/syscalls/bpf/bpf_map01.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2019 Richard Palethorpe <rpalethorpe@suse.com>
+ *
+ * Trivial Extended Berkeley Packet Filter (eBPF) test.
+ *
+ * Sanity check creating and updating maps.
+ */
+
+#include <limits.h>
+#include <string.h>
+
+#include "config.h"
+#include "tst_test.h"
+#include "lapi/bpf.h"
+
+#define KEY_SZ 8
+#define VAL_SZ 1024
+
+struct map_type {
+	uint32_t id;
+	char *name;
+};
+
+static const struct map_type map_types[] = {
+	{BPF_MAP_TYPE_HASH, "hash"},
+	{BPF_MAP_TYPE_ARRAY, "array"}
+};
+
+static void *key;
+static void *val0;
+static void *val1;
+
+static void setup(void)
+{
+	key = SAFE_MALLOC(KEY_SZ);
+	memset(key, 0, (size_t) KEY_SZ);
+	val0 = SAFE_MALLOC(VAL_SZ);
+	val1 = SAFE_MALLOC(VAL_SZ);
+	memset(val1, 0, (size_t) VAL_SZ);
+}
+
+void run(unsigned int n)
+{
+	int fd, i;
+	union bpf_attr attr;
+
+	memset(&attr, 0, sizeof(attr));
+	attr.map_type = map_types[n].id;
+	attr.key_size = n == 0 ? KEY_SZ : 4;
+	attr.value_size = VAL_SZ;
+	attr.max_entries = 1;
+
+	if ((fd = bpf(BPF_MAP_CREATE, &attr, sizeof(attr))) == -1) {
+		tst_brk(TFAIL | TERRNO, "Failed to create %s map",
+			map_types[n].name);
+	} else {
+		tst_res(TPASS, "Created %s map", map_types[n].name);
+	}
+
+	if (n == 0)
+		memcpy(key, "12345678", KEY_SZ);
+	else
+		memset(key, 0, 4);
+
+	memset(&attr, 0, sizeof(attr));
+	attr.map_fd = fd;
+	attr.key = ptr_to_u64(key);
+	attr.value = ptr_to_u64(val1);
+
+	TEST(bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)));
+	if (n == 0) {
+		if (TST_RET != -1 || TST_ERR != ENOENT) {
+			tst_res(TFAIL | TTERRNO,
+				"Empty hash map lookup should fail with ENOENT");
+		} else {
+			tst_res(TPASS | TTERRNO, "Empty hash map lookup");
+		}
+	} else if (TST_RET != -1) {
+		for (i = 0;;) {
+			if (*(char *) val1 != 0) {
+				tst_res(TFAIL,
+					"Preallocated array map val not zero");
+			} else if (++i >= VAL_SZ) {
+				tst_res(TPASS,
+					"Preallocated array map lookup");
+				break;
+			}
+		}
+	} else {
+		tst_res(TFAIL | TERRNO, "Prellocated array map lookup");
+	}
+
+	memset(&attr, 0, sizeof(attr));
+	attr.map_fd = fd;
+	attr.key = ptr_to_u64(key);
+	attr.value = ptr_to_u64(val0);
+	attr.flags = BPF_ANY;
+
+	TEST(bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr)));
+	if (TST_RET == -1) {
+		tst_brk(TFAIL | TTERRNO,
+			"Update %s map element",
+			map_types[n].name);
+	} else {
+		tst_res(TPASS,
+			"Update %s map element",
+			map_types[n].name);
+	}
+
+	memset(&attr, 0, sizeof(attr));
+	attr.map_fd = fd;
+	attr.key = ptr_to_u64(key);
+	attr.value = ptr_to_u64(val1);
+
+	TEST(bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)));
+	if (TST_RET == -1) {
+		tst_res(TFAIL | TTERRNO,
+			"%s map lookup missing",
+			map_types[n].name);
+	} else if (memcmp(val0, val1, (size_t) VAL_SZ)) {
+		tst_res(TFAIL,
+			"%s map lookup returned different value",
+			map_types[n].name);
+	} else {
+		tst_res(TPASS, "%s map lookup", map_types[n].name);
+	}
+
+	SAFE_CLOSE(fd);
+}
+
+static struct tst_test test = {
+	.tcnt = 2,
+	.needs_root = 1,
+	.setup = setup,
+	.test = run,
+	.min_kver = "3.18",
+};
-- 
2.22.0


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [LTP] [PATCH 2/2] BPF: Sanity check creating and updating maps
  2019-07-24  8:03 ` [LTP] [PATCH 2/2] BPF: Sanity check creating and updating maps Richard Palethorpe
@ 2019-07-24  9:18   ` Petr Vorel
  2019-07-24 12:15     ` Richard Palethorpe
  0 siblings, 1 reply; 14+ messages in thread
From: Petr Vorel @ 2019-07-24  9:18 UTC (permalink / raw)
  To: ltp

Hi Richie,

> Signed-off-by: Richard Palethorpe <rpalethorpe@suse.com>
Reviewed-by: Petr Vorel <pvorel@suse.cz>

LGTM with minor comments.

...
> diff --git a/testcases/kernel/syscalls/bpf/bpf_map01.c b/testcases/kernel/syscalls/bpf/bpf_map01.c
...
> +#include <limits.h>
> +#include <string.h>
> +
> +#include "config.h"
> +#include "tst_test.h"
> +#include "lapi/bpf.h"
> +
> +#define KEY_SZ 8
> +#define VAL_SZ 1024
> +
> +struct map_type {
> +	uint32_t id;
> +	char *name;
> +};
> +
> +static const struct map_type map_types[] = {
> +	{BPF_MAP_TYPE_HASH, "hash"},
> +	{BPF_MAP_TYPE_ARRAY, "array"}
> +};
> +
> +static void *key;
> +static void *val0;
> +static void *val1;
> +
> +static void setup(void)
> +{
> +	key = SAFE_MALLOC(KEY_SZ);
> +	memset(key, 0, (size_t) KEY_SZ);
> +	val0 = SAFE_MALLOC(VAL_SZ);
> +	val1 = SAFE_MALLOC(VAL_SZ);
> +	memset(val1, 0, (size_t) VAL_SZ);
> +}
> +
> +void run(unsigned int n)
> +{
> +	int fd, i;
> +	union bpf_attr attr;
> +	memset(&attr, 0, sizeof(attr));
> +	attr.map_type = map_types[n].id;
> +	attr.key_size = n == 0 ? KEY_SZ : 4;
Out of curiosity why 4? As whole test is working for KEY_SZ >= 1 (but for second
test it's needed to be 4).

> +	attr.value_size = VAL_SZ;
> +	attr.max_entries = 1;
+ we usually use struct test_case + description at the test start, but I guess
it's ok like this.

> +
> +	if ((fd = bpf(BPF_MAP_CREATE, &attr, sizeof(attr))) == -1) {
> +		tst_brk(TFAIL | TERRNO, "Failed to create %s map",
> +			map_types[n].name);
> +	} else {
> +		tst_res(TPASS, "Created %s map", map_types[n].name);
> +	}
> +
> +	if (n == 0)
> +		memcpy(key, "12345678", KEY_SZ);
> +	else
> +		memset(key, 0, 4);
> +
> +	memset(&attr, 0, sizeof(attr));
> +	attr.map_fd = fd;
> +	attr.key = ptr_to_u64(key);
> +	attr.value = ptr_to_u64(val1);
> +
> +	TEST(bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)));
> +	if (n == 0) {
> +		if (TST_RET != -1 || TST_ERR != ENOENT) {
> +			tst_res(TFAIL | TTERRNO,
> +				"Empty hash map lookup should fail with ENOENT");
> +		} else {
> +			tst_res(TPASS | TTERRNO, "Empty hash map lookup");
> +		}
> +	} else if (TST_RET != -1) {
> +		for (i = 0;;) {
> +			if (*(char *) val1 != 0) {
> +				tst_res(TFAIL,
> +					"Preallocated array map val not zero");
I guess here is missing break.

> +			} else if (++i >= VAL_SZ) {
> +				tst_res(TPASS,
> +					"Preallocated array map lookup");
> +				break;
> +			}
> +		}
> +	} else {
> +		tst_res(TFAIL | TERRNO, "Prellocated array map lookup");
> +	}
...

Kind regards,
Petr

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [LTP] [PATCH 1/2] Essential headers for BPF map creation
  2019-07-24  8:03 ` [LTP] [PATCH 1/2] Essential headers for BPF map creation Richard Palethorpe
@ 2019-07-24  9:27   ` Petr Vorel
  2019-07-24  9:55     ` Richard Palethorpe
  0 siblings, 1 reply; 14+ messages in thread
From: Petr Vorel @ 2019-07-24  9:27 UTC (permalink / raw)
  To: ltp

Hi Richie,

> Signed-off-by: Richard Palethorpe <rpalethorpe@suse.com>
Reviewed-by: Petr Vorel <pvorel@suse.cz>

> ---
>  include/lapi/bpf.h               | 242 +++++++++++++++++++++++++++++++
>  include/lapi/syscalls/aarch64.in |   1 +
>  include/lapi/syscalls/i386.in    |   1 +
>  include/lapi/syscalls/s390.in    |   1 +
>  include/lapi/syscalls/sparc.in   |   1 +
>  include/lapi/syscalls/x86_64.in  |   1 +
>  6 files changed, 247 insertions(+)
>  create mode 100644 include/lapi/bpf.h

> diff --git a/include/lapi/bpf.h b/include/lapi/bpf.h
> new file mode 100644
> index 000000000..369de0175
> --- /dev/null
> +++ b/include/lapi/bpf.h
> @@ -0,0 +1,242 @@
> +// SPDX-License-Identifier: GPL-2.0-or-later
> +/*
> + * Copyright (c) 2019 Richard Palethorpe <rpalethorpe@suse.com>
> + *
> + * Essential Extended Berkeley Packet Filter (eBPF) headers
> + *
> + * Mostly copied/adapted from linux/bpf.h and libbpf so that we can perform
> + * some eBPF testing without any external dependencies.
Probably the only sane way. But it will be uncomfortable to keep the header
updated. Simple copy of include/uapi/linux/bpf.h would be easier
(but we'd require to have kernel headers anyway (<linux/types.h> and
<linux/bpf_common.h>).

Kind regards,
Petr

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [LTP] [PATCH 0/2] [RFC] BPF testing
  2019-07-24  8:03 [LTP] [PATCH 0/2] [RFC] BPF testing Richard Palethorpe
  2019-07-24  8:03 ` [LTP] [PATCH 1/2] Essential headers for BPF map creation Richard Palethorpe
  2019-07-24  8:03 ` [LTP] [PATCH 2/2] BPF: Sanity check creating and updating maps Richard Palethorpe
@ 2019-07-24  9:30 ` Petr Vorel
  2019-07-25 14:23 ` Cyril Hrubis
  2019-07-30 13:44 ` [LTP] [PATCH v2 1/4] BPF: Essential headers for map creation Richard Palethorpe
  4 siblings, 0 replies; 14+ messages in thread
From: Petr Vorel @ 2019-07-24  9:30 UTC (permalink / raw)
  To: ltp

Hi Richie,

> Hello,

> This patch set introduces a very basic test which kicks the tires of the bpf
> system call. It doesn't actually load a eBPF program, I will create another
> test for that. However I have some concerns which I will discuss while doing
> that.
Good start, great.

> There are already extensive BPF tests in the kernel selftests. These appear to
> be quite complex and test a variety of functionality. They also are far less
> structured than LTP's modern tests and are tied to the kernel tree which makes
> using them in QA a pain. There are also some tests in the BCC project, which
> may test the kernel as a byproduct.
Yep, this is true for other tests in kselftest tree.

> So there are a number of options which are not necessarily mutually exclusive:

> 1) Port (some of) the selftests to the LTP.
> 2) Port the LTP library to the selftests.
> 3) Focus the LTP's BPF tests on reproducing specific high impact bugs.

> This patch set copies in the necessary headers so that we have zero external
> dependencies.

> I will also use raw byte code for the program test which is at
> least acceptable for trivial programs. So we do not need BCC or Clang/LLVM
> with eBPF support or matching kernel sources to generate offsets into internal
> structures.
+1

> For the time being atleast my preference would be for (3) while avoiding
> taking on any dependencies to ensure those tests are run by users mostly
> ignorant of BPF, but are still exposed to critical bugs in the BPF stack.
+1

Kind regards,
Petr

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [LTP] [PATCH 1/2] Essential headers for BPF map creation
  2019-07-24  9:27   ` Petr Vorel
@ 2019-07-24  9:55     ` Richard Palethorpe
  0 siblings, 0 replies; 14+ messages in thread
From: Richard Palethorpe @ 2019-07-24  9:55 UTC (permalink / raw)
  To: ltp

Hello,

Petr Vorel <pvorel@suse.cz> writes:

> Hi Richie,
>
>> Signed-off-by: Richard Palethorpe <rpalethorpe@suse.com>
> Reviewed-by: Petr Vorel <pvorel@suse.cz>
>
>> ---
>>  include/lapi/bpf.h               | 242 +++++++++++++++++++++++++++++++
>>  include/lapi/syscalls/aarch64.in |   1 +
>>  include/lapi/syscalls/i386.in    |   1 +
>>  include/lapi/syscalls/s390.in    |   1 +
>>  include/lapi/syscalls/sparc.in   |   1 +
>>  include/lapi/syscalls/x86_64.in  |   1 +
>>  6 files changed, 247 insertions(+)
>>  create mode 100644 include/lapi/bpf.h
>
>> diff --git a/include/lapi/bpf.h b/include/lapi/bpf.h
>> new file mode 100644
>> index 000000000..369de0175
>> --- /dev/null
>> +++ b/include/lapi/bpf.h
>> @@ -0,0 +1,242 @@
>> +// SPDX-License-Identifier: GPL-2.0-or-later
>> +/*
>> + * Copyright (c) 2019 Richard Palethorpe <rpalethorpe@suse.com>
>> + *
>> + * Essential Extended Berkeley Packet Filter (eBPF) headers
>> + *
>> + * Mostly copied/adapted from linux/bpf.h and libbpf so that we can perform
>> + * some eBPF testing without any external dependencies.
> Probably the only sane way. But it will be uncomfortable to keep the header
> updated. Simple copy of include/uapi/linux/bpf.h would be easier
> (but we'd require to have kernel headers anyway (<linux/types.h> and
> <linux/bpf_common.h>).

It hasn't been a problem in the past, we just need to add the missing
parts when adding a new test.

--
Thank you,
Richard.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [LTP] [PATCH 2/2] BPF: Sanity check creating and updating maps
  2019-07-24  9:18   ` Petr Vorel
@ 2019-07-24 12:15     ` Richard Palethorpe
  0 siblings, 0 replies; 14+ messages in thread
From: Richard Palethorpe @ 2019-07-24 12:15 UTC (permalink / raw)
  To: ltp


Petr Vorel <pvorel@suse.cz> writes:

> Hi Richie,
>
>> Signed-off-by: Richard Palethorpe <rpalethorpe@suse.com>
> Reviewed-by: Petr Vorel <pvorel@suse.cz>
>
> LGTM with minor comments.
>
> ...
>> diff --git a/testcases/kernel/syscalls/bpf/bpf_map01.c b/testcases/kernel/syscalls/bpf/bpf_map01.c
> ...
>> +#include <limits.h>
>> +#include <string.h>
>> +
>> +#include "config.h"
>> +#include "tst_test.h"
>> +#include "lapi/bpf.h"
>> +
>> +#define KEY_SZ 8
>> +#define VAL_SZ 1024
>> +
>> +struct map_type {
>> +	uint32_t id;
>> +	char *name;
>> +};
>> +
>> +static const struct map_type map_types[] = {
>> +	{BPF_MAP_TYPE_HASH, "hash"},
>> +	{BPF_MAP_TYPE_ARRAY, "array"}
>> +};
>> +
>> +static void *key;
>> +static void *val0;
>> +static void *val1;
>> +
>> +static void setup(void)
>> +{
>> +	key = SAFE_MALLOC(KEY_SZ);
>> +	memset(key, 0, (size_t) KEY_SZ);
>> +	val0 = SAFE_MALLOC(VAL_SZ);
>> +	val1 = SAFE_MALLOC(VAL_SZ);
>> +	memset(val1, 0, (size_t) VAL_SZ);
>> +}
>> +
>> +void run(unsigned int n)
>> +{
>> +	int fd, i;
>> +	union bpf_attr attr;
>> +	memset(&attr, 0, sizeof(attr));
>> +	attr.map_type = map_types[n].id;
>> +	attr.key_size = n == 0 ? KEY_SZ : 4;
> Out of curiosity why 4? As whole test is working for KEY_SZ >= 1 (but for second
> test it's needed to be 4).

The second test is for an array map type which only supports 32bit integer indexes.

-- 
Thank you,
Richard.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [LTP] [PATCH 0/2] [RFC] BPF testing
  2019-07-24  8:03 [LTP] [PATCH 0/2] [RFC] BPF testing Richard Palethorpe
                   ` (2 preceding siblings ...)
  2019-07-24  9:30 ` [LTP] [PATCH 0/2] [RFC] BPF testing Petr Vorel
@ 2019-07-25 14:23 ` Cyril Hrubis
  2019-07-29 10:02   ` Richard Palethorpe
  2019-07-30 13:44 ` [LTP] [PATCH v2 1/4] BPF: Essential headers for map creation Richard Palethorpe
  4 siblings, 1 reply; 14+ messages in thread
From: Cyril Hrubis @ 2019-07-25 14:23 UTC (permalink / raw)
  To: ltp

Hi!
> This patch set introduces a very basic test which kicks the tires of the bpf
> system call. It doesn't actually load a eBPF program, I will create another
> test for that. However I have some concerns which I will discuss while doing
> that.
> 
> There are already extensive BPF tests in the kernel selftests. These appear to
> be quite complex and test a variety of functionality. They also are far less
> structured than LTP's modern tests and are tied to the kernel tree which makes
> using them in QA a pain. There are also some tests in the BCC project, which
> may test the kernel as a byproduct.
> 
> So there are a number of options which are not necessarily mutually exclusive:
> 
> 1) Port (some of) the selftests to the LTP.
> 2) Port the LTP library to the selftests.
> 3) Focus the LTP's BPF tests on reproducing specific high impact bugs.

The option 3 sounds good, just FYI there are CVEs some with POCs for BPF,
just by googling "ebpf CVE" you got some:

https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-16995
https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-7308

Also cloudfare blog seems to be very relevant:

https://blog.cloudflare.com/ebpf-cant-count/

And there are some test stuffed in linux/samples/bpf/ as well.

-- 
Cyril Hrubis
chrubis@suse.cz

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [LTP] [PATCH 0/2] [RFC] BPF testing
  2019-07-25 14:23 ` Cyril Hrubis
@ 2019-07-29 10:02   ` Richard Palethorpe
  0 siblings, 0 replies; 14+ messages in thread
From: Richard Palethorpe @ 2019-07-29 10:02 UTC (permalink / raw)
  To: ltp

Hello,

Cyril Hrubis <chrubis@suse.cz> writes:

> Hi!
>> This patch set introduces a very basic test which kicks the tires of the bpf
>> system call. It doesn't actually load a eBPF program, I will create another
>> test for that. However I have some concerns which I will discuss while doing
>> that.
>>
>> There are already extensive BPF tests in the kernel selftests. These appear to
>> be quite complex and test a variety of functionality. They also are far less
>> structured than LTP's modern tests and are tied to the kernel tree which makes
>> using them in QA a pain. There are also some tests in the BCC project, which
>> may test the kernel as a byproduct.
>>
>> So there are a number of options which are not necessarily mutually exclusive:
>>
>> 1) Port (some of) the selftests to the LTP.
>> 2) Port the LTP library to the selftests.
>> 3) Focus the LTP's BPF tests on reproducing specific high impact bugs.
>
> The option 3 sounds good, just FYI there are CVEs some with POCs for BPF,
> just by googling "ebpf CVE" you got some:
>
> https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-16995
> https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-7308
>
> Also cloudfare blog seems to be very relevant:
>
> https://blog.cloudflare.com/ebpf-cant-count/
>
> And there are some test stuffed in linux/samples/bpf/ as well.

Ah, something I didn't realise is that various types of eBPF program can
be run without CAP_SYS_ADMIN because the man page is out of date. I will
send a patch for that.

--
Thank you,
Richard.

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [LTP] [PATCH v2 1/4] BPF: Essential headers for map creation
  2019-07-24  8:03 [LTP] [PATCH 0/2] [RFC] BPF testing Richard Palethorpe
                   ` (3 preceding siblings ...)
  2019-07-25 14:23 ` Cyril Hrubis
@ 2019-07-30 13:44 ` Richard Palethorpe
  2019-07-30 13:44   ` [LTP] [PATCH v2 2/4] BPF: Sanity check creating and updating maps Richard Palethorpe
                     ` (2 more replies)
  4 siblings, 3 replies; 14+ messages in thread
From: Richard Palethorpe @ 2019-07-30 13:44 UTC (permalink / raw)
  To: ltp

Signed-off-by: Richard Palethorpe <rpalethorpe@suse.com>
---

V2:
* Added bpf_prog01 test
* Removed root check and instead signal TCONF if bpf fails with EPERM
* Added more headers, including for byte code and old distro's

 include/lapi/bpf.h               | 242 +++++++++++++++++++++++++++++++
 include/lapi/syscalls/aarch64.in |   1 +
 include/lapi/syscalls/i386.in    |   1 +
 include/lapi/syscalls/s390.in    |   1 +
 include/lapi/syscalls/sparc.in   |   1 +
 include/lapi/syscalls/x86_64.in  |   1 +
 6 files changed, 247 insertions(+)
 create mode 100644 include/lapi/bpf.h

diff --git a/include/lapi/bpf.h b/include/lapi/bpf.h
new file mode 100644
index 000000000..369de0175
--- /dev/null
+++ b/include/lapi/bpf.h
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2019 Richard Palethorpe <rpalethorpe@suse.com>
+ *
+ * Essential Extended Berkeley Packet Filter (eBPF) headers
+ *
+ * Mostly copied/adapted from linux/bpf.h and libbpf so that we can perform
+ * some eBPF testing without any external dependencies.
+ */
+
+#ifndef BPF_H
+# define BPF_H
+
+#include <stdint.h>
+
+#include "lapi/syscalls.h"
+
+/* Start copy from linux/bpf.h */
+enum bpf_cmd {
+	BPF_MAP_CREATE,
+	BPF_MAP_LOOKUP_ELEM,
+	BPF_MAP_UPDATE_ELEM,
+	BPF_MAP_DELETE_ELEM,
+	BPF_MAP_GET_NEXT_KEY,
+	BPF_PROG_LOAD,
+	BPF_OBJ_PIN,
+	BPF_OBJ_GET,
+	BPF_PROG_ATTACH,
+	BPF_PROG_DETACH,
+	BPF_PROG_TEST_RUN,
+	BPF_PROG_GET_NEXT_ID,
+	BPF_MAP_GET_NEXT_ID,
+	BPF_PROG_GET_FD_BY_ID,
+	BPF_MAP_GET_FD_BY_ID,
+	BPF_OBJ_GET_INFO_BY_FD,
+	BPF_PROG_QUERY,
+	BPF_RAW_TRACEPOINT_OPEN,
+	BPF_BTF_LOAD,
+	BPF_BTF_GET_FD_BY_ID,
+	BPF_TASK_FD_QUERY,
+	BPF_MAP_LOOKUP_AND_DELETE_ELEM,
+	BPF_MAP_FREEZE,
+};
+
+enum bpf_map_type {
+	BPF_MAP_TYPE_UNSPEC,
+	BPF_MAP_TYPE_HASH,
+	BPF_MAP_TYPE_ARRAY,
+	BPF_MAP_TYPE_PROG_ARRAY,
+	BPF_MAP_TYPE_PERF_EVENT_ARRAY,
+	BPF_MAP_TYPE_PERCPU_HASH,
+	BPF_MAP_TYPE_PERCPU_ARRAY,
+	BPF_MAP_TYPE_STACK_TRACE,
+	BPF_MAP_TYPE_CGROUP_ARRAY,
+	BPF_MAP_TYPE_LRU_HASH,
+	BPF_MAP_TYPE_LRU_PERCPU_HASH,
+	BPF_MAP_TYPE_LPM_TRIE,
+	BPF_MAP_TYPE_ARRAY_OF_MAPS,
+	BPF_MAP_TYPE_HASH_OF_MAPS,
+	BPF_MAP_TYPE_DEVMAP,
+	BPF_MAP_TYPE_SOCKMAP,
+	BPF_MAP_TYPE_CPUMAP,
+	BPF_MAP_TYPE_XSKMAP,
+	BPF_MAP_TYPE_SOCKHASH,
+	BPF_MAP_TYPE_CGROUP_STORAGE,
+	BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
+	BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
+	BPF_MAP_TYPE_QUEUE,
+	BPF_MAP_TYPE_STACK,
+	BPF_MAP_TYPE_SK_STORAGE,
+};
+
+#define BPF_OBJ_NAME_LEN 16U
+
+#define BPF_ANY		0 /* create new element or update existing */
+#define BPF_NOEXIST	1 /* create new element if it didn't exist */
+#define BPF_EXIST	2 /* update existing element */
+#define BPF_F_LOCK	4 /* spin_lock-ed map_lookup/map_update */
+
+#define aligned_uint64_t uint64_t __attribute__((aligned(8)))
+
+union bpf_attr {
+	struct { /* anonymous struct used by BPF_MAP_CREATE command */
+		uint32_t	map_type;	/* one of enum bpf_map_type */
+		uint32_t	key_size;	/* size of key in bytes */
+		uint32_t	value_size;	/* size of value in bytes */
+		uint32_t	max_entries;	/* max number of entries in a map */
+		uint32_t	map_flags;	/* BPF_MAP_CREATE related
+					 * flags defined above.
+					 */
+		uint32_t	inner_map_fd;	/* fd pointing to the inner map */
+		uint32_t	numa_node;	/* numa node (effective only if
+					 * BPF_F_NUMA_NODE is set).
+					 */
+		char	map_name[BPF_OBJ_NAME_LEN];
+		uint32_t	map_ifindex;	/* ifindex of netdev to create on */
+		uint32_t	btf_fd;		/* fd pointing to a BTF type data */
+		uint32_t	btf_key_type_id;	/* BTF type_id of the key */
+		uint32_t	btf_value_type_id;	/* BTF type_id of the value */
+	};
+
+	struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
+		uint32_t		map_fd;
+		aligned_uint64_t	key;
+		union {
+			aligned_uint64_t value;
+			aligned_uint64_t next_key;
+		};
+		uint64_t		flags;
+	};
+
+	struct { /* anonymous struct used by BPF_PROG_LOAD command */
+		uint32_t		prog_type;	/* one of enum bpf_prog_type */
+		uint32_t		insn_cnt;
+		aligned_uint64_t	insns;
+		aligned_uint64_t	license;
+		uint32_t		log_level;	/* verbosity level of verifier */
+		uint32_t		log_size;	/* size of user buffer */
+		aligned_uint64_t	log_buf;	/* user supplied buffer */
+		uint32_t		kern_version;	/* not used */
+		uint32_t		prog_flags;
+		char		prog_name[BPF_OBJ_NAME_LEN];
+		uint32_t		prog_ifindex;	/* ifindex of netdev to prep for */
+		/* For some prog types expected attach type must be known at
+		 * load time to verify attach type specific parts of prog
+		 * (context accesses, allowed helpers, etc).
+		 */
+		uint32_t		expected_attach_type;
+		uint32_t		prog_btf_fd;	/* fd pointing to BTF type data */
+		uint32_t		func_info_rec_size;	/* userspace bpf_func_info size */
+		aligned_uint64_t	func_info;	/* func info */
+		uint32_t		func_info_cnt;	/* number of bpf_func_info records */
+		uint32_t		line_info_rec_size;	/* userspace bpf_line_info size */
+		aligned_uint64_t	line_info;	/* line info */
+		uint32_t		line_info_cnt;	/* number of bpf_line_info records */
+	};
+
+	struct { /* anonymous struct used by BPF_OBJ_* commands */
+		aligned_uint64_t	pathname;
+		uint32_t		bpf_fd;
+		uint32_t		file_flags;
+	};
+
+	struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
+		uint32_t		target_fd;	/* container object to attach to */
+		uint32_t		attach_bpf_fd;	/* eBPF program to attach */
+		uint32_t		attach_type;
+		uint32_t		attach_flags;
+	};
+
+	struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
+		uint32_t		prog_fd;
+		uint32_t		retval;
+		uint32_t		data_size_in;	/* input: len of data_in */
+		uint32_t		data_size_out;	/* input/output: len of data_out
+						 *   returns ENOSPC if data_out
+						 *   is too small.
+						 */
+		aligned_uint64_t	data_in;
+		aligned_uint64_t	data_out;
+		uint32_t		repeat;
+		uint32_t		duration;
+		uint32_t		ctx_size_in;	/* input: len of ctx_in */
+		uint32_t		ctx_size_out;	/* input/output: len of ctx_out
+						 *   returns ENOSPC if ctx_out
+						 *   is too small.
+						 */
+		aligned_uint64_t	ctx_in;
+		aligned_uint64_t	ctx_out;
+	} test;
+
+	struct { /* anonymous struct used by BPF_*_GET_*_ID */
+		union {
+			uint32_t		start_id;
+			uint32_t		prog_id;
+			uint32_t		map_id;
+			uint32_t		btf_id;
+		};
+		uint32_t		next_id;
+		uint32_t		open_flags;
+	};
+
+	struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */
+		uint32_t		bpf_fd;
+		uint32_t		info_len;
+		aligned_uint64_t	info;
+	} info;
+
+	struct { /* anonymous struct used by BPF_PROG_QUERY command */
+		uint32_t		target_fd;	/* container object to query */
+		uint32_t		attach_type;
+		uint32_t		query_flags;
+		uint32_t		attach_flags;
+		aligned_uint64_t	prog_ids;
+		uint32_t		prog_cnt;
+	} query;
+
+	struct {
+		uint64_t name;
+		uint32_t prog_fd;
+	} raw_tracepoint;
+
+	struct { /* anonymous struct for BPF_BTF_LOAD */
+		aligned_uint64_t	btf;
+		aligned_uint64_t	btf_log_buf;
+		uint32_t		btf_size;
+		uint32_t		btf_log_size;
+		uint32_t		btf_log_level;
+	};
+
+	struct {
+		uint32_t		pid;		/* input: pid */
+		uint32_t		fd;		/* input: fd */
+		uint32_t		flags;		/* input: flags */
+		uint32_t		buf_len;	/* input/output: buf len */
+		aligned_uint64_t	buf;		/* input/output:
+						 *   tp_name for tracepoint
+						 *   symbol for kprobe
+						 *   filename for uprobe
+						 */
+		uint32_t		prog_id;	/* output: prod_id */
+		uint32_t		fd_type;	/* output: BPF_FD_TYPE_* */
+		uint64_t		probe_offset;	/* output: probe_offset */
+		uint64_t		probe_addr;	/* output: probe_addr */
+	} task_fd_query;
+} __attribute__((aligned(8)));
+
+/* End copy from linux/bpf.h */
+
+/* Start copy from tools/lib/bpf  */
+inline uint64_t ptr_to_u64(const void *ptr)
+{
+	return (uint64_t) (unsigned long) ptr;
+}
+
+inline int bpf(enum bpf_cmd cmd, union bpf_attr *attr, unsigned int size)
+{
+	return tst_syscall(__NR_bpf, cmd, attr, size);
+}
+/* End copy from tools/lib/bpf */
+
+#endif	/* BPF_H */
diff --git a/include/lapi/syscalls/aarch64.in b/include/lapi/syscalls/aarch64.in
index 7db6e281c..0e00641bc 100644
--- a/include/lapi/syscalls/aarch64.in
+++ b/include/lapi/syscalls/aarch64.in
@@ -258,6 +258,7 @@ process_vm_writev 271
 kcmp 272
 getrandom 278
 memfd_create 279
+bpf 280
 userfaultfd 282
 membarrier 283
 execveat 281
diff --git a/include/lapi/syscalls/i386.in b/include/lapi/syscalls/i386.in
index 02f3955ba..87ab46933 100644
--- a/include/lapi/syscalls/i386.in
+++ b/include/lapi/syscalls/i386.in
@@ -340,6 +340,7 @@ sched_getattr 352
 renameat2 354
 getrandom 355
 memfd_create 356
+bpf 357
 execveat 358
 userfaultfd 374
 membarrier 375
diff --git a/include/lapi/syscalls/s390.in b/include/lapi/syscalls/s390.in
index c304ef4b7..d3f7eb1f6 100644
--- a/include/lapi/syscalls/s390.in
+++ b/include/lapi/syscalls/s390.in
@@ -331,6 +331,7 @@ sched_getattr 346
 renameat2 347
 getrandom 349
 memfd_create 350
+bpf 351
 userfaultfd 355
 membarrier 356
 execveat 354
diff --git a/include/lapi/syscalls/sparc.in b/include/lapi/syscalls/sparc.in
index ab7204663..94a672428 100644
--- a/include/lapi/syscalls/sparc.in
+++ b/include/lapi/syscalls/sparc.in
@@ -336,6 +336,7 @@ kcmp 341
 renameat2 345
 getrandom 347
 memfd_create 348
+bpf 349
 membarrier 351
 userfaultfd 352
 execveat 350
diff --git a/include/lapi/syscalls/x86_64.in b/include/lapi/syscalls/x86_64.in
index fdb414c10..b1cbd4f2f 100644
--- a/include/lapi/syscalls/x86_64.in
+++ b/include/lapi/syscalls/x86_64.in
@@ -307,6 +307,7 @@ sched_getattr 315
 renameat2 316
 getrandom 318
 memfd_create 319
+bpf 321
 execveat 322
 userfaultfd 323
 membarrier 324
-- 
2.22.0


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [LTP] [PATCH v2 2/4] BPF: Sanity check creating and updating maps
  2019-07-30 13:44 ` [LTP] [PATCH v2 1/4] BPF: Essential headers for map creation Richard Palethorpe
@ 2019-07-30 13:44   ` Richard Palethorpe
  2019-07-30 13:44   ` [LTP] [PATCH v2 3/4] BPF: Essential headers for a basic program Richard Palethorpe
  2019-07-30 13:44   ` [LTP] [PATCH v2 4/4] BPF: Sanity check creating a program Richard Palethorpe
  2 siblings, 0 replies; 14+ messages in thread
From: Richard Palethorpe @ 2019-07-30 13:44 UTC (permalink / raw)
  To: ltp

Signed-off-by: Richard Palethorpe <rpalethorpe@suse.com>
---
 runtest/syscalls                          |   2 +
 testcases/kernel/syscalls/bpf/.gitignore  |   1 +
 testcases/kernel/syscalls/bpf/Makefile    |  10 ++
 testcases/kernel/syscalls/bpf/bpf_map01.c | 143 ++++++++++++++++++++++
 4 files changed, 156 insertions(+)
 create mode 100644 testcases/kernel/syscalls/bpf/.gitignore
 create mode 100644 testcases/kernel/syscalls/bpf/Makefile
 create mode 100644 testcases/kernel/syscalls/bpf/bpf_map01.c

diff --git a/runtest/syscalls b/runtest/syscalls
index 67dfed661..46880ee1d 100644
--- a/runtest/syscalls
+++ b/runtest/syscalls
@@ -32,6 +32,8 @@ bind01 bind01
 bind02 bind02
 bind03 bind03
 
+bpf_map01 bpf_map01
+
 brk01 brk01
 
 capget01 capget01
diff --git a/testcases/kernel/syscalls/bpf/.gitignore b/testcases/kernel/syscalls/bpf/.gitignore
new file mode 100644
index 000000000..f33532484
--- /dev/null
+++ b/testcases/kernel/syscalls/bpf/.gitignore
@@ -0,0 +1 @@
+bpf_map01
diff --git a/testcases/kernel/syscalls/bpf/Makefile b/testcases/kernel/syscalls/bpf/Makefile
new file mode 100644
index 000000000..990c8c83c
--- /dev/null
+++ b/testcases/kernel/syscalls/bpf/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+# Copyright (c) 2019 Linux Test Project
+
+top_srcdir		?= ../../../..
+
+include $(top_srcdir)/include/mk/testcases.mk
+
+CFLAGS			+= -D_GNU_SOURCE
+
+include $(top_srcdir)/include/mk/generic_leaf_target.mk
diff --git a/testcases/kernel/syscalls/bpf/bpf_map01.c b/testcases/kernel/syscalls/bpf/bpf_map01.c
new file mode 100644
index 000000000..fd0ec2c7c
--- /dev/null
+++ b/testcases/kernel/syscalls/bpf/bpf_map01.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2019 Richard Palethorpe <rpalethorpe@suse.com>
+ *
+ * Trivial Extended Berkeley Packet Filter (eBPF) test.
+ *
+ * Sanity check creating and updating maps.
+ */
+
+#include <limits.h>
+#include <string.h>
+
+#include "config.h"
+#include "tst_test.h"
+#include "lapi/bpf.h"
+
+#define KEY_SZ 8
+#define VAL_SZ 1024
+
+struct map_type {
+	uint32_t id;
+	char *name;
+};
+
+static const struct map_type map_types[] = {
+	{BPF_MAP_TYPE_HASH, "hash"},
+	{BPF_MAP_TYPE_ARRAY, "array"}
+};
+
+static void *key;
+static void *val0;
+static void *val1;
+
+static void setup(void)
+{
+	key = SAFE_MALLOC(KEY_SZ);
+	memset(key, 0, (size_t) KEY_SZ);
+	val0 = SAFE_MALLOC(VAL_SZ);
+	val1 = SAFE_MALLOC(VAL_SZ);
+	memset(val1, 0, (size_t) VAL_SZ);
+}
+
+void run(unsigned int n)
+{
+	int fd, i;
+	union bpf_attr attr;
+
+	memset(&attr, 0, sizeof(attr));
+	attr.map_type = map_types[n].id;
+	attr.key_size = n == 0 ? KEY_SZ : 4;
+	attr.value_size = VAL_SZ;
+	attr.max_entries = 1;
+
+	TEST(bpf(BPF_MAP_CREATE, &attr, sizeof(attr)));
+	if (TST_RET == -1) {
+		if (TST_ERR == EPERM) {
+			tst_brk(TCONF | TTERRNO,
+				"bpf() requires CAP_SYS_ADMIN on this system");
+		} else {
+			tst_brk(TFAIL | TTERRNO, "Failed to create %s map",
+				map_types[n].name);
+		}
+	}
+	tst_res(TPASS, "Created %s map", map_types[n].name);
+	fd = TST_RET;
+
+	if (n == 0)
+		memcpy(key, "12345678", KEY_SZ);
+	else
+		memset(key, 0, 4);
+
+	memset(&attr, 0, sizeof(attr));
+	attr.map_fd = fd;
+	attr.key = ptr_to_u64(key);
+	attr.value = ptr_to_u64(val1);
+
+	TEST(bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)));
+	if (n == 0) {
+		if (TST_RET != -1 || TST_ERR != ENOENT) {
+			tst_res(TFAIL | TTERRNO,
+				"Empty hash map lookup should fail with ENOENT");
+		} else {
+			tst_res(TPASS | TTERRNO, "Empty hash map lookup");
+		}
+	} else if (TST_RET != -1) {
+		for (i = 0;;) {
+			if (*(char *) val1 != 0) {
+				tst_res(TFAIL,
+					"Preallocated array map val not zero");
+			} else if (++i >= VAL_SZ) {
+				tst_res(TPASS,
+					"Preallocated array map lookup");
+				break;
+			}
+		}
+	} else {
+		tst_res(TFAIL | TERRNO, "Prellocated array map lookup");
+	}
+
+	memset(&attr, 0, sizeof(attr));
+	attr.map_fd = fd;
+	attr.key = ptr_to_u64(key);
+	attr.value = ptr_to_u64(val0);
+	attr.flags = BPF_ANY;
+
+	TEST(bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr)));
+	if (TST_RET == -1) {
+		tst_brk(TFAIL | TTERRNO,
+			"Update %s map element",
+			map_types[n].name);
+	} else {
+		tst_res(TPASS,
+			"Update %s map element",
+			map_types[n].name);
+	}
+
+	memset(&attr, 0, sizeof(attr));
+	attr.map_fd = fd;
+	attr.key = ptr_to_u64(key);
+	attr.value = ptr_to_u64(val1);
+
+	TEST(bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)));
+	if (TST_RET == -1) {
+		tst_res(TFAIL | TTERRNO,
+			"%s map lookup missing",
+			map_types[n].name);
+	} else if (memcmp(val0, val1, (size_t) VAL_SZ)) {
+		tst_res(TFAIL,
+			"%s map lookup returned different value",
+			map_types[n].name);
+	} else {
+		tst_res(TPASS, "%s map lookup", map_types[n].name);
+	}
+
+	SAFE_CLOSE(fd);
+}
+
+static struct tst_test test = {
+	.tcnt = 2,
+	.setup = setup,
+	.test = run,
+	.min_kver = "3.18",
+};
-- 
2.22.0


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [LTP] [PATCH v2 3/4] BPF: Essential headers for a basic program
  2019-07-30 13:44 ` [LTP] [PATCH v2 1/4] BPF: Essential headers for map creation Richard Palethorpe
  2019-07-30 13:44   ` [LTP] [PATCH v2 2/4] BPF: Sanity check creating and updating maps Richard Palethorpe
@ 2019-07-30 13:44   ` Richard Palethorpe
  2019-07-30 13:44   ` [LTP] [PATCH v2 4/4] BPF: Sanity check creating a program Richard Palethorpe
  2 siblings, 0 replies; 14+ messages in thread
From: Richard Palethorpe @ 2019-07-30 13:44 UTC (permalink / raw)
  To: ltp

Signed-off-by: Richard Palethorpe <rpalethorpe@suse.com>
---
 include/lapi/bpf.h    | 286 +++++++++++++++++++++++++++++++++++++++++-
 include/lapi/socket.h |   4 +
 2 files changed, 289 insertions(+), 1 deletion(-)

diff --git a/include/lapi/bpf.h b/include/lapi/bpf.h
index 369de0175..122eb5469 100644
--- a/include/lapi/bpf.h
+++ b/include/lapi/bpf.h
@@ -15,7 +15,57 @@
 
 #include "lapi/syscalls.h"
 
-/* Start copy from linux/bpf.h */
+/* Start copy from linux/bpf_(common).h */
+#define BPF_CLASS(code) ((code) & 0x07)
+#define		BPF_LD		0x00
+#define		BPF_ST		0x02
+#define		BPF_JMP		0x05
+
+#define BPF_SIZE(code)  ((code) & 0x18)
+#define         BPF_DW		0x18	/* double word (64-bit) */
+
+#define BPF_MODE(code)  ((code) & 0xe0)
+#define		BPF_IMM		0x00
+#define		BPF_MEM		0x60
+
+#define BPF_OP(code)    ((code) & 0xf0)
+#define		BPF_ADD		0x00
+
+#define		BPF_JEQ		0x10
+
+#define BPF_SRC(code)   ((code) & 0x08)
+#define		BPF_K		0x00
+#define		BPF_X		0x08
+
+#define BPF_ALU64	0x07	/* alu mode in double word width */
+#define BPF_MOV		0xb0	/* mov reg to reg */
+#define BPF_CALL	0x80	/* function call */
+#define BPF_EXIT	0x90	/* function return */
+
+/* Register numbers */
+enum {
+	BPF_REG_0 = 0,
+	BPF_REG_1,
+	BPF_REG_2,
+	BPF_REG_3,
+	BPF_REG_4,
+	BPF_REG_5,
+	BPF_REG_6,
+	BPF_REG_7,
+	BPF_REG_8,
+	BPF_REG_9,
+	BPF_REG_10,
+	MAX_BPF_REG,
+};
+
+struct bpf_insn {
+	uint8_t	code;		/* opcode */
+	uint8_t	dst_reg:4;	/* dest register */
+	uint8_t	src_reg:4;	/* source register */
+	int16_t	off;		/* signed offset */
+	int32_t	imm;		/* signed immediate constant */
+};
+
 enum bpf_cmd {
 	BPF_MAP_CREATE,
 	BPF_MAP_LOOKUP_ELEM,
@@ -70,6 +120,37 @@ enum bpf_map_type {
 	BPF_MAP_TYPE_SK_STORAGE,
 };
 
+enum bpf_prog_type {
+	BPF_PROG_TYPE_UNSPEC,
+	BPF_PROG_TYPE_SOCKET_FILTER,
+	BPF_PROG_TYPE_KPROBE,
+	BPF_PROG_TYPE_SCHED_CLS,
+	BPF_PROG_TYPE_SCHED_ACT,
+	BPF_PROG_TYPE_TRACEPOINT,
+	BPF_PROG_TYPE_XDP,
+	BPF_PROG_TYPE_PERF_EVENT,
+	BPF_PROG_TYPE_CGROUP_SKB,
+	BPF_PROG_TYPE_CGROUP_SOCK,
+	BPF_PROG_TYPE_LWT_IN,
+	BPF_PROG_TYPE_LWT_OUT,
+	BPF_PROG_TYPE_LWT_XMIT,
+	BPF_PROG_TYPE_SOCK_OPS,
+	BPF_PROG_TYPE_SK_SKB,
+	BPF_PROG_TYPE_CGROUP_DEVICE,
+	BPF_PROG_TYPE_SK_MSG,
+	BPF_PROG_TYPE_RAW_TRACEPOINT,
+	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
+	BPF_PROG_TYPE_LWT_SEG6LOCAL,
+	BPF_PROG_TYPE_LIRC_MODE2,
+	BPF_PROG_TYPE_SK_REUSEPORT,
+	BPF_PROG_TYPE_FLOW_DISSECTOR,
+	BPF_PROG_TYPE_CGROUP_SYSCTL,
+	BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
+	BPF_PROG_TYPE_CGROUP_SOCKOPT,
+};
+
+#define BPF_PSEUDO_MAP_FD	1
+
 #define BPF_OBJ_NAME_LEN 16U
 
 #define BPF_ANY		0 /* create new element or update existing */
@@ -225,8 +306,211 @@ union bpf_attr {
 	} task_fd_query;
 } __attribute__((aligned(8)));
 
+#define __BPF_FUNC_MAPPER(FN)		\
+	FN(unspec),			\
+	FN(map_lookup_elem),		\
+	FN(map_update_elem),		\
+	FN(map_delete_elem),		\
+	FN(probe_read),			\
+	FN(ktime_get_ns),		\
+	FN(trace_printk),		\
+	FN(get_prandom_u32),		\
+	FN(get_smp_processor_id),	\
+	FN(skb_store_bytes),		\
+	FN(l3_csum_replace),		\
+	FN(l4_csum_replace),		\
+	FN(tail_call),			\
+	FN(clone_redirect),		\
+	FN(get_current_pid_tgid),	\
+	FN(get_current_uid_gid),	\
+	FN(get_current_comm),		\
+	FN(get_cgroup_classid),		\
+	FN(skb_vlan_push),		\
+	FN(skb_vlan_pop),		\
+	FN(skb_get_tunnel_key),		\
+	FN(skb_set_tunnel_key),		\
+	FN(perf_event_read),		\
+	FN(redirect),			\
+	FN(get_route_realm),		\
+	FN(perf_event_output),		\
+	FN(skb_load_bytes),		\
+	FN(get_stackid),		\
+	FN(csum_diff),			\
+	FN(skb_get_tunnel_opt),		\
+	FN(skb_set_tunnel_opt),		\
+	FN(skb_change_proto),		\
+	FN(skb_change_type),		\
+	FN(skb_under_cgroup),		\
+	FN(get_hash_recalc),		\
+	FN(get_current_task),		\
+	FN(probe_write_user),		\
+	FN(current_task_under_cgroup),	\
+	FN(skb_change_tail),		\
+	FN(skb_pull_data),		\
+	FN(csum_update),		\
+	FN(set_hash_invalid),		\
+	FN(get_numa_node_id),		\
+	FN(skb_change_head),		\
+	FN(xdp_adjust_head),		\
+	FN(probe_read_str),		\
+	FN(get_socket_cookie),		\
+	FN(get_socket_uid),		\
+	FN(set_hash),			\
+	FN(setsockopt),			\
+	FN(skb_adjust_room),		\
+	FN(redirect_map),		\
+	FN(sk_redirect_map),		\
+	FN(sock_map_update),		\
+	FN(xdp_adjust_meta),		\
+	FN(perf_event_read_value),	\
+	FN(perf_prog_read_value),	\
+	FN(getsockopt),			\
+	FN(override_return),		\
+	FN(sock_ops_cb_flags_set),	\
+	FN(msg_redirect_map),		\
+	FN(msg_apply_bytes),		\
+	FN(msg_cork_bytes),		\
+	FN(msg_pull_data),		\
+	FN(bind),			\
+	FN(xdp_adjust_tail),		\
+	FN(skb_get_xfrm_state),		\
+	FN(get_stack),			\
+	FN(skb_load_bytes_relative),	\
+	FN(fib_lookup),			\
+	FN(sock_hash_update),		\
+	FN(msg_redirect_hash),		\
+	FN(sk_redirect_hash),		\
+	FN(lwt_push_encap),		\
+	FN(lwt_seg6_store_bytes),	\
+	FN(lwt_seg6_adjust_srh),	\
+	FN(lwt_seg6_action),		\
+	FN(rc_repeat),			\
+	FN(rc_keydown),			\
+	FN(skb_cgroup_id),		\
+	FN(get_current_cgroup_id),	\
+	FN(get_local_storage),		\
+	FN(sk_select_reuseport),	\
+	FN(skb_ancestor_cgroup_id),	\
+	FN(sk_lookup_tcp),		\
+	FN(sk_lookup_udp),		\
+	FN(sk_release),			\
+	FN(map_push_elem),		\
+	FN(map_pop_elem),		\
+	FN(map_peek_elem),		\
+	FN(msg_push_data),		\
+	FN(msg_pop_data),		\
+	FN(rc_pointer_rel),		\
+	FN(spin_lock),			\
+	FN(spin_unlock),		\
+	FN(sk_fullsock),		\
+	FN(tcp_sock),			\
+	FN(skb_ecn_set_ce),		\
+	FN(get_listener_sock),		\
+	FN(skc_lookup_tcp),		\
+	FN(tcp_check_syncookie),	\
+	FN(sysctl_get_name),		\
+	FN(sysctl_get_current_value),	\
+	FN(sysctl_get_new_value),	\
+	FN(sysctl_set_new_value),	\
+	FN(strtol),			\
+	FN(strtoul),			\
+	FN(sk_storage_get),		\
+	FN(sk_storage_delete),		\
+	FN(send_signal),
+
+/* integer value in 'imm' field of BPF_CALL instruction selects which helper
+ * function eBPF program intends to call
+ */
+#define __BPF_ENUM_FN(x) BPF_FUNC_ ## x
+enum bpf_func_id {
+	__BPF_FUNC_MAPPER(__BPF_ENUM_FN)
+	__BPF_FUNC_MAX_ID,
+};
+#undef __BPF_ENUM_FN
+
 /* End copy from linux/bpf.h */
 
+/* Start copy from tools/include/filter.h */
+
+#define BPF_ALU64_IMM(OP, DST, IMM)				\
+	((struct bpf_insn) {					\
+		.code  = BPF_ALU64 | BPF_OP(OP) | BPF_K,	\
+		.dst_reg = DST,					\
+		.src_reg = 0,					\
+		.off   = 0,					\
+		.imm   = IMM })
+
+#define BPF_MOV64_REG(DST, SRC)					\
+	((struct bpf_insn) {					\
+		.code  = BPF_ALU64 | BPF_MOV | BPF_X,		\
+		.dst_reg = DST,					\
+		.src_reg = SRC,					\
+		.off   = 0,					\
+		.imm   = 0 })
+
+#define BPF_LD_IMM64(DST, IMM)					\
+	BPF_LD_IMM64_RAW(DST, 0, IMM)
+
+#define BPF_LD_IMM64_RAW(DST, SRC, IMM)				\
+	((struct bpf_insn) {					\
+		.code  = BPF_LD | BPF_DW | BPF_IMM,		\
+		.dst_reg = DST,					\
+		.src_reg = SRC,					\
+		.off   = 0,					\
+		.imm   = (uint32_t) (IMM) }),			\
+	((struct bpf_insn) {					\
+		.code  = 0, /* zero is reserved opcode */	\
+		.dst_reg = 0,					\
+		.src_reg = 0,					\
+		.off   = 0,					\
+		.imm   = ((uint64_t) (IMM)) >> 32 })
+
+/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
+#define BPF_LD_MAP_FD(DST, MAP_FD)				\
+	BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
+
+#define BPF_ST_MEM(SIZE, DST, OFF, IMM)				\
+	((struct bpf_insn) {					\
+		.code  = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM,	\
+		.dst_reg = DST,					\
+		.src_reg = 0,					\
+		.off   = OFF,					\
+		.imm   = IMM })
+
+#define BPF_JMP_IMM(OP, DST, IMM, OFF)				\
+	((struct bpf_insn) {					\
+		.code  = BPF_JMP | BPF_OP(OP) | BPF_K,		\
+		.dst_reg = DST,					\
+		.src_reg = 0,					\
+		.off   = OFF,					\
+		.imm   = IMM })
+
+#define BPF_MOV64_IMM(DST, IMM)					\
+	((struct bpf_insn) {					\
+		.code  = BPF_ALU64 | BPF_MOV | BPF_K,		\
+		.dst_reg = DST,					\
+		.src_reg = 0,					\
+		.off   = 0,					\
+		.imm   = IMM })
+
+#define BPF_EMIT_CALL(FUNC)					\
+	((struct bpf_insn) {					\
+		.code  = BPF_JMP | BPF_CALL,			\
+		.dst_reg = 0,					\
+		.src_reg = 0,					\
+		.off   = 0,					\
+		.imm   = ((FUNC) - BPF_FUNC_unspec) })
+
+#define BPF_EXIT_INSN()						\
+	((struct bpf_insn) {					\
+		.code  = BPF_JMP | BPF_EXIT,			\
+		.dst_reg = 0,					\
+		.src_reg = 0,					\
+		.off   = 0,					\
+		.imm   = 0 })
+
+/* End copy from tools/include/filter.h */
+
 /* Start copy from tools/lib/bpf  */
 inline uint64_t ptr_to_u64(const void *ptr)
 {
diff --git a/include/lapi/socket.h b/include/lapi/socket.h
index 2605443e8..299a4f27a 100644
--- a/include/lapi/socket.h
+++ b/include/lapi/socket.h
@@ -37,6 +37,10 @@
 # define SO_BUSY_POLL	46
 #endif
 
+#ifndef SO_ATTACH_BPF
+# define SO_ATTACH_BPF  50
+#endif
+
 #ifndef SO_ZEROCOPY
 # define SO_ZEROCOPY	60
 #endif
-- 
2.22.0


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [LTP] [PATCH v2 4/4] BPF: Sanity check creating a program
  2019-07-30 13:44 ` [LTP] [PATCH v2 1/4] BPF: Essential headers for map creation Richard Palethorpe
  2019-07-30 13:44   ` [LTP] [PATCH v2 2/4] BPF: Sanity check creating and updating maps Richard Palethorpe
  2019-07-30 13:44   ` [LTP] [PATCH v2 3/4] BPF: Essential headers for a basic program Richard Palethorpe
@ 2019-07-30 13:44   ` Richard Palethorpe
  2 siblings, 0 replies; 14+ messages in thread
From: Richard Palethorpe @ 2019-07-30 13:44 UTC (permalink / raw)
  To: ltp

Signed-off-by: Richard Palethorpe <rpalethorpe@suse.com>
---
 runtest/syscalls                           |   1 +
 testcases/kernel/syscalls/bpf/.gitignore   |   1 +
 testcases/kernel/syscalls/bpf/bpf_prog01.c | 138 +++++++++++++++++++++
 3 files changed, 140 insertions(+)
 create mode 100644 testcases/kernel/syscalls/bpf/bpf_prog01.c

diff --git a/runtest/syscalls b/runtest/syscalls
index 46880ee1d..678a601d3 100644
--- a/runtest/syscalls
+++ b/runtest/syscalls
@@ -33,6 +33,7 @@ bind02 bind02
 bind03 bind03
 
 bpf_map01 bpf_map01
+bpf_prog01 bpf_prog01
 
 brk01 brk01
 
diff --git a/testcases/kernel/syscalls/bpf/.gitignore b/testcases/kernel/syscalls/bpf/.gitignore
index f33532484..7eb5f7c92 100644
--- a/testcases/kernel/syscalls/bpf/.gitignore
+++ b/testcases/kernel/syscalls/bpf/.gitignore
@@ -1 +1,2 @@
 bpf_map01
+bpf_prog01
diff --git a/testcases/kernel/syscalls/bpf/bpf_prog01.c b/testcases/kernel/syscalls/bpf/bpf_prog01.c
new file mode 100644
index 000000000..faccd2219
--- /dev/null
+++ b/testcases/kernel/syscalls/bpf/bpf_prog01.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2019 Richard Palethorpe <rpalethorpe@suse.com>
+ *
+ * Trivial Extended Berkeley Packet Filter (eBPF) test.
+ *
+ * Sanity check loading and running bytecode.
+ *
+ * Test flow:
+ * 1. Create array map
+ * 2. Load eBPF program
+ * 3. Attach program to socket
+ * 4. Send packet on socket
+ * 5. This should trigger eBPF program which writes to array map
+ * 6. Verify array map was written to
+ */
+
+#include <limits.h>
+#include <string.h>
+#include <stdio.h>
+
+#include "config.h"
+#include "tst_test.h"
+#include "lapi/socket.h"
+#include "lapi/bpf.h"
+
+static int map_fd, prog_fd;
+static int sk[2];
+
+/*
+ * r0 - r10 = registers 0 to 10
+ * r0 = return code
+ * fp/r10 = stack frame pointer
+ */
+int load_prog(int fd)
+{
+	char log_buf[BUFSIZ] = { 0 };
+	struct bpf_insn prog[] = {
+		/* Load the map FD into r1*/
+		BPF_LD_MAP_FD(BPF_REG_1, fd),
+		/* Put (key = 0) on stack and key ptr into r2 */
+		BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),   /* r2 = fp */
+		BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),  /* r2 = r2 - 8 */
+		BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),    /* *r2 = 0 */
+		/* r0 = bpf_map_lookup_elem(r1, r2) */
+		BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+		/* if r0 == 0 goto exit */
+		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+		/* Set map[0] = 1 */
+		BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),     /* r1 = r0 */
+		BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 1),     /* *r1 = 1 */
+		BPF_MOV64_IMM(BPF_REG_0, 0),             /* r0 = 0 */
+		BPF_EXIT_INSN(),		         /* return r0 */
+	};
+	union bpf_attr attr = { 0 };
+
+	attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
+	attr.insns = ptr_to_u64(prog);
+	attr.insn_cnt = ARRAY_SIZE(prog);
+	attr.license = ptr_to_u64("GPL");
+	attr.log_buf = ptr_to_u64(log_buf);
+	attr.log_size = sizeof(log_buf);
+	attr.log_level = 1;
+
+	TEST(bpf(BPF_PROG_LOAD, &attr, sizeof(attr)));
+	if (TST_RET == -1) {
+		if (log_buf[0] != 0) {
+			tst_brk(TFAIL | TTERRNO,
+				"Failed verification: %s",
+				log_buf);
+		} else {
+			tst_brk(TFAIL | TTERRNO, "Failed to load program");
+		}
+	} else {
+		tst_res(TPASS, "Loaded program");
+	}
+
+	return TST_RET;
+}
+
+void run(void)
+{
+	uint32_t key = 0;
+	uint64_t val;
+	char buf[5];
+	union bpf_attr attr;
+
+	memset(&attr, 0, sizeof(attr));
+	attr.map_type = BPF_MAP_TYPE_ARRAY;
+	attr.key_size = 4;
+	attr.value_size = 8;
+	attr.max_entries = 1;
+
+	TEST(bpf(BPF_MAP_CREATE, &attr, sizeof(attr)));
+	if (TST_RET == -1) {
+		if (TST_ERR == EPERM) {
+			tst_brk(TCONF | TTERRNO,
+				"bpf() requires CAP_SYS_ADMIN on this system");
+		} else {
+			tst_brk(TBROK | TTERRNO, "Failed to create array map");
+		}
+	}
+	map_fd = TST_RET;
+
+	prog_fd = load_prog(map_fd);
+
+	SAFE_SOCKETPAIR(AF_UNIX, SOCK_DGRAM, 0, sk);
+	SAFE_SETSOCKOPT(sk[1], SOL_SOCKET, SO_ATTACH_BPF,
+			&prog_fd, sizeof(prog_fd));
+
+	SAFE_WRITE(1, sk[0], "Ahoj!", sizeof(buf));
+
+	memset(&attr, 0, sizeof(attr));
+	attr.map_fd = map_fd;
+	attr.key = ptr_to_u64(&key);
+	attr.value = ptr_to_u64(&val);
+
+	TEST(bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)));
+	if (TST_RET == -1) {
+		tst_res(TFAIL | TTERRNO, "array map lookup");
+	} else if (val != 1) {
+		tst_res(TFAIL,
+			"val = %lu, but should be val = 1",
+			val);
+        } else {
+	        tst_res(TPASS, "val = 1");
+	}
+
+	SAFE_CLOSE(prog_fd);
+	SAFE_CLOSE(map_fd);
+	SAFE_CLOSE(sk[0]);
+	SAFE_CLOSE(sk[1]);
+}
+
+static struct tst_test test = {
+	.test_all = run,
+	.min_kver = "3.18",
+};
-- 
2.22.0


^ permalink raw reply related	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2019-07-30 13:44 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-07-24  8:03 [LTP] [PATCH 0/2] [RFC] BPF testing Richard Palethorpe
2019-07-24  8:03 ` [LTP] [PATCH 1/2] Essential headers for BPF map creation Richard Palethorpe
2019-07-24  9:27   ` Petr Vorel
2019-07-24  9:55     ` Richard Palethorpe
2019-07-24  8:03 ` [LTP] [PATCH 2/2] BPF: Sanity check creating and updating maps Richard Palethorpe
2019-07-24  9:18   ` Petr Vorel
2019-07-24 12:15     ` Richard Palethorpe
2019-07-24  9:30 ` [LTP] [PATCH 0/2] [RFC] BPF testing Petr Vorel
2019-07-25 14:23 ` Cyril Hrubis
2019-07-29 10:02   ` Richard Palethorpe
2019-07-30 13:44 ` [LTP] [PATCH v2 1/4] BPF: Essential headers for map creation Richard Palethorpe
2019-07-30 13:44   ` [LTP] [PATCH v2 2/4] BPF: Sanity check creating and updating maps Richard Palethorpe
2019-07-30 13:44   ` [LTP] [PATCH v2 3/4] BPF: Essential headers for a basic program Richard Palethorpe
2019-07-30 13:44   ` [LTP] [PATCH v2 4/4] BPF: Sanity check creating a program Richard Palethorpe

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.