linux-kselftest.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Roberto Sassu <roberto.sassu@huaweicloud.com>
To: ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org,
	martin.lau@linux.dev, song@kernel.org, yhs@fb.com,
	john.fastabend@gmail.com, kpsingh@kernel.org, sdf@google.com,
	haoluo@google.com, jolsa@kernel.org, mykolal@fb.com,
	shuah@kernel.org, oss@lmb.io
Cc: bpf@vger.kernel.org, linux-kselftest@vger.kernel.org,
	linux-kernel@vger.kernel.org, fengc@google.com,
	davem@davemloft.net
Subject: [RFC][PATCH 3/3] selftests/bpf: Test enforcement of map fd permissions at verifier level
Date: Mon, 26 Sep 2022 17:44:30 +0200	[thread overview]
Message-ID: <20220926154430.1552800-4-roberto.sassu@huaweicloud.com> (raw)
In-Reply-To: <20220926154430.1552800-1-roberto.sassu@huaweicloud.com>

From: Roberto Sassu <roberto.sassu@huawei.com>

Create two maps, one read/writable and another only readable. Also, define
four programs, that respectively read, read/write, read/write (with two map
fds), and write to a given map.

For the read/writable map, two additional fds are obtained to test the
ability of the verifier to restrict operations by a program depending on
the map permissions granted.

To make testing easier, the map fd for the BPF_LD_MAP_FD instruction is
always the same (20), and dup2() is used to make sure that the program
takes the correct map at the time it is loaded.

In addition, a second fd (21) set with dup2() is passed to one eBPF program
to check the merging of fd modes (read-only and write-only).

The tests first verify the correct behavior, i.e. a program is successfully
executed if it has sufficient permissions on the map. Then, they verify the
incorrect combinations (e.g. a program willing to perform read/write
operations on a map referenced with a write-only fd), and ensure that the
verifier emits the expected error message.

Signed-off-by: Roberto Sassu <roberto.sassu@huawei.com>
---
 .../selftests/bpf/prog_tests/map_fd_perm.c    | 227 ++++++++++++++++++
 1 file changed, 227 insertions(+)
 create mode 100644 tools/testing/selftests/bpf/prog_tests/map_fd_perm.c

diff --git a/tools/testing/selftests/bpf/prog_tests/map_fd_perm.c b/tools/testing/selftests/bpf/prog_tests/map_fd_perm.c
new file mode 100644
index 000000000000..eaabf6f5bb9b
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/map_fd_perm.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (C) 2022 Huawei Technologies Duesseldorf GmbH
+ *
+ * Author: Roberto Sassu <roberto.sassu@huawei.com>
+ */
+
+#include <test_progs.h>
+
+#define TARGET_MAP_FD 20
+#define TARGET_MAP_FD2 21
+#define EXPECTED_MAP_VALUE 2
+
+char bpf_log_buf[BPF_LOG_BUF_SIZE];
+
+struct bpf_insn prog_r[] = {
+	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+	BPF_MOV64_IMM(BPF_REG_0, 0),
+	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */
+	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */
+	BPF_LD_MAP_FD(BPF_REG_1, TARGET_MAP_FD),
+	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+	BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+	BPF_MOV64_IMM(BPF_REG_0, 0), /* r0 = 0 */
+	BPF_EXIT_INSN(),
+};
+
+struct bpf_insn prog_rw[] = {
+	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+	BPF_MOV64_IMM(BPF_REG_0, 0),
+	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */
+	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */
+	BPF_LD_MAP_FD(BPF_REG_1, TARGET_MAP_FD),
+	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+	BPF_MOV64_IMM(BPF_REG_1, EXPECTED_MAP_VALUE),
+	BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
+	BPF_MOV64_IMM(BPF_REG_0, 0), /* r0 = 0 */
+	BPF_EXIT_INSN(),
+};
+
+struct bpf_insn prog_rw_merge[] = {
+	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+	BPF_MOV64_IMM(BPF_REG_0, 0),
+	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */
+	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */
+	BPF_LD_MAP_FD(BPF_REG_1, TARGET_MAP_FD),
+	BPF_LD_MAP_FD(BPF_REG_1, TARGET_MAP_FD2),
+	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+	BPF_MOV64_IMM(BPF_REG_1, EXPECTED_MAP_VALUE),
+	BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
+	BPF_MOV64_IMM(BPF_REG_0, 0), /* r0 = 0 */
+	BPF_EXIT_INSN(),
+};
+
+struct bpf_insn prog_w[] = {
+	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+	BPF_MOV64_IMM(BPF_REG_0, 0),
+	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */
+	BPF_MOV64_IMM(BPF_REG_0, EXPECTED_MAP_VALUE),
+	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8), /* *(u32 *)(fp - 8) = r0 */
+	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */
+	BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
+	BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -8), /* r3 = fp - 8 */
+	BPF_LD_MAP_FD(BPF_REG_1, TARGET_MAP_FD),
+	BPF_MOV64_IMM(BPF_REG_4, 0),
+	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem),
+	BPF_MOV64_IMM(BPF_REG_0, 0), /* r0 = 0 */
+	BPF_EXIT_INSN(),
+};
+
+static int load_prog(struct bpf_insn *prog, int num_insn, int map_fd,
+		     int map_fd2, int map_check_fd, int expected_map_value,
+		     const char *expected_err_msg)
+{
+	u32 key = 0, value;
+	int ret, prog_fd, link_fd;
+
+	LIBBPF_OPTS(bpf_prog_load_opts, trace_opts,
+		.expected_attach_type = BPF_TRACE_FENTRY,
+		.log_buf = bpf_log_buf,
+		.log_size = BPF_LOG_BUF_SIZE,
+	);
+
+	memset(bpf_log_buf, 0, sizeof(bpf_log_buf));
+
+	trace_opts.attach_btf_id =
+		libbpf_find_vmlinux_btf_id("array_map_lookup_elem",
+					   trace_opts.expected_attach_type);
+
+	ret = dup2(map_fd, TARGET_MAP_FD);
+	if (ret < 0)
+		return ret;
+
+	if (map_fd2 != -1) {
+		ret = dup2(map_fd2, TARGET_MAP_FD2);
+		if (ret < 0) {
+			close(TARGET_MAP_FD);
+			return ret;
+		}
+	}
+
+	prog_fd = bpf_prog_load(BPF_PROG_TYPE_TRACING, NULL, "GPL",
+				prog, num_insn, &trace_opts);
+
+	close(TARGET_MAP_FD);
+	if (map_fd2 != -1)
+		close(TARGET_MAP_FD2);
+
+	if (prog_fd < 0) {
+		if (expected_err_msg && strstr(bpf_log_buf, expected_err_msg))
+			return 0;
+
+		printf("%s\n", bpf_log_buf);
+		return -EINVAL;
+	}
+
+	if (map_check_fd >= 0) {
+		link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_FENTRY, NULL);
+		if (link_fd < 0) {
+			ret = -errno;
+			close(prog_fd);
+			return ret;
+		}
+
+		ret = bpf_map_lookup_elem(map_check_fd, &key, &value);
+
+		close(prog_fd);
+		close(link_fd);
+
+		if (ret < 0)
+			return ret;
+
+		if (value != expected_map_value)
+			return -EINVAL;
+	} else {
+		close(prog_fd);
+	}
+
+	return 0;
+}
+
+void test_map_fd_perm(void)
+{
+	int map_fd, map_fd_rdonly, map_fd_wronly;
+	int map_rdonly_fd;
+	struct bpf_map_info info_m = { 0 };
+	__u32 len = sizeof(info_m);
+	int ret;
+
+	DECLARE_LIBBPF_OPTS(bpf_get_fd_opts, fd_opts_rdonly,
+		.open_flags = BPF_F_RDONLY,
+	);
+
+	DECLARE_LIBBPF_OPTS(bpf_get_fd_opts, fd_opts_wronly,
+		.open_flags = BPF_F_WRONLY,
+	);
+
+	DECLARE_LIBBPF_OPTS(bpf_map_create_opts, create_opts,
+		.map_flags = BPF_F_RDONLY_PROG,
+	);
+
+	map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(u32),
+				sizeof(u32), 1, NULL);
+	ASSERT_GE(map_fd, 0, "failed to create rw map");
+
+	map_rdonly_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(u32),
+				       sizeof(u32), 1, &create_opts);
+	ASSERT_GE(map_rdonly_fd, 0, "failed to create ro map");
+
+	ret = bpf_obj_get_info_by_fd(map_fd, &info_m, &len);
+	ASSERT_OK(ret, "bpf_obj_get_info_by_fd");
+
+	map_fd_rdonly = bpf_map_get_fd_by_id_opts(info_m.id, &fd_opts_rdonly);
+	ASSERT_GE(map_fd_rdonly, 0, "bpf_map_get_fd_by_id_opts rw map ro fd");
+
+	map_fd_wronly = bpf_map_get_fd_by_id_opts(info_m.id, &fd_opts_wronly);
+	ASSERT_GE(map_fd_wronly, 0, "bpf_map_get_fd_by_id_opts rw map wo fd");
+
+	ret = load_prog(prog_r, ARRAY_SIZE(prog_r), map_fd_rdonly, -1, -1, -1,
+			NULL);
+	ASSERT_OK(ret, "load ro prog, rw map, ro fd");
+
+	ret = load_prog(prog_rw, ARRAY_SIZE(prog_rw), map_fd, -1, map_fd,
+			EXPECTED_MAP_VALUE, NULL);
+	ASSERT_OK(ret, "load rw prog, rw map, rw fd");
+
+	ret = load_prog(prog_w, ARRAY_SIZE(prog_w), map_fd_wronly, -1, map_fd,
+			EXPECTED_MAP_VALUE, NULL);
+	ASSERT_OK(ret, "load wo prog, rw map, wo fd");
+
+	ret = load_prog(prog_r, ARRAY_SIZE(prog_r), map_rdonly_fd, -1, -1, -1,
+			NULL);
+	ASSERT_OK(ret, "load ro prog, ro map, ro fd");
+
+	/* Existing value was set by prog_w, so it is EXPECTED_MAP_VALUE * 2. */
+	ret = load_prog(prog_rw_merge, ARRAY_SIZE(prog_rw_merge), map_fd_rdonly,
+			map_fd_wronly, map_fd, EXPECTED_MAP_VALUE * 2, NULL);
+	ASSERT_OK(ret, "load rw prog merge, ro fd, wo fd");
+
+	ret = load_prog(prog_r, ARRAY_SIZE(prog_r), map_fd_wronly, -1, -1, -1,
+			"read from map forbidden");
+	ASSERT_OK(ret, "load ro prog, rw map, wo fd");
+
+	ret = load_prog(prog_w, ARRAY_SIZE(prog_w), map_fd_rdonly, -1, -1, -1,
+			"write into map forbidden");
+	ASSERT_OK(ret, "load wo prog, rw map, ro fd");
+
+	ret = load_prog(prog_rw, ARRAY_SIZE(prog_rw), map_fd_rdonly, -1, -1, -1,
+			"write into map forbidden");
+	ASSERT_OK(ret, "load rw prog, rw map, ro fd");
+
+	ret = load_prog(prog_rw, ARRAY_SIZE(prog_rw), map_fd_wronly, -1, -1, -1,
+			"read from map forbidden");
+	ASSERT_OK(ret, "load rw prog, rw map, wo fd");
+
+	ret = load_prog(prog_w, ARRAY_SIZE(prog_w), map_rdonly_fd, -1, -1, -1,
+			"write into map forbidden");
+	ASSERT_OK(ret, "load wo prog, ro map, rw fd");
+}
-- 
2.25.1


      parent reply	other threads:[~2022-09-26 16:52 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-09-26 15:44 [RFC][PATCH 0/3] bpf: Enforce map fd modes in verifier Roberto Sassu
2022-09-26 15:44 ` [RFC][PATCH 1/3] libbpf: Define bpf_get_fd_opts and introduce bpf_map_get_fd_by_id_opts() Roberto Sassu
2022-09-30 20:50   ` Andrii Nakryiko
2022-09-26 15:44 ` [RFC][PATCH 2/3] bpf: Enforce granted permissions in a map fd at verifier level Roberto Sassu
2022-09-26 15:44 ` Roberto Sassu [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220926154430.1552800-4-roberto.sassu@huaweicloud.com \
    --to=roberto.sassu@huaweicloud.com \
    --cc=andrii@kernel.org \
    --cc=ast@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=davem@davemloft.net \
    --cc=fengc@google.com \
    --cc=haoluo@google.com \
    --cc=john.fastabend@gmail.com \
    --cc=jolsa@kernel.org \
    --cc=kpsingh@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-kselftest@vger.kernel.org \
    --cc=martin.lau@linux.dev \
    --cc=mykolal@fb.com \
    --cc=oss@lmb.io \
    --cc=sdf@google.com \
    --cc=shuah@kernel.org \
    --cc=song@kernel.org \
    --cc=yhs@fb.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).