All of lore.kernel.org
 help / color / mirror / Atom feed
From: Andrii Nakryiko <andrii@kernel.org>
To: <bpf@vger.kernel.org>, <netdev@vger.kernel.org>,
	<paul@paul-moore.com>, <brauner@kernel.org>
Cc: <linux-fsdevel@vger.kernel.org>,
	<linux-security-module@vger.kernel.org>, <keescook@chromium.org>,
	<kernel-team@meta.com>, <sargun@sargun.me>
Subject: [PATCH v10 bpf-next 08/17] bpf: consistently use BPF token throughout BPF verifier logic
Date: Thu, 9 Nov 2023 19:48:29 -0800	[thread overview]
Message-ID: <20231110034838.1295764-9-andrii@kernel.org> (raw)
In-Reply-To: <20231110034838.1295764-1-andrii@kernel.org>

Remove remaining direct queries to perfmon_capable() and bpf_capable()
in BPF verifier logic and instead use BPF token (if available) to make
decisions about privileges.

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
---
 include/linux/bpf.h    | 16 ++++++++--------
 include/linux/filter.h |  2 +-
 kernel/bpf/arraymap.c  |  2 +-
 kernel/bpf/core.c      |  2 +-
 kernel/bpf/verifier.c  | 13 ++++++-------
 net/core/filter.c      |  4 ++--
 6 files changed, 19 insertions(+), 20 deletions(-)

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 226f74712dc0..d44f252c3a1b 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -2183,24 +2183,24 @@ extern int sysctl_unprivileged_bpf_disabled;
 
 bool bpf_token_capable(const struct bpf_token *token, int cap);
 
-static inline bool bpf_allow_ptr_leaks(void)
+static inline bool bpf_allow_ptr_leaks(const struct bpf_token *token)
 {
-	return perfmon_capable();
+	return bpf_token_capable(token, CAP_PERFMON);
 }
 
-static inline bool bpf_allow_uninit_stack(void)
+static inline bool bpf_allow_uninit_stack(const struct bpf_token *token)
 {
-	return perfmon_capable();
+	return bpf_token_capable(token, CAP_PERFMON);
 }
 
-static inline bool bpf_bypass_spec_v1(void)
+static inline bool bpf_bypass_spec_v1(const struct bpf_token *token)
 {
-	return cpu_mitigations_off() || perfmon_capable();
+	return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON);
 }
 
-static inline bool bpf_bypass_spec_v4(void)
+static inline bool bpf_bypass_spec_v4(const struct bpf_token *token)
 {
-	return cpu_mitigations_off() || perfmon_capable();
+	return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON);
 }
 
 int bpf_map_new_fd(struct bpf_map *map, int flags);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index a4953fafc8cb..14354605ad26 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -1139,7 +1139,7 @@ static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
 		return false;
 	if (!bpf_jit_harden)
 		return false;
-	if (bpf_jit_harden == 1 && bpf_capable())
+	if (bpf_jit_harden == 1 && bpf_token_capable(prog->aux->token, CAP_BPF))
 		return false;
 
 	return true;
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 2058e89b5ddd..f0c64df6b6ff 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -82,7 +82,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
 	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
 	int numa_node = bpf_map_attr_numa_node(attr);
 	u32 elem_size, index_mask, max_entries;
-	bool bypass_spec_v1 = bpf_bypass_spec_v1();
+	bool bypass_spec_v1 = bpf_bypass_spec_v1(NULL);
 	u64 array_size, mask64;
 	struct bpf_array *array;
 
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index fc8de25b7948..ce307440fa8d 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -675,7 +675,7 @@ static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
 {
 	if (!bpf_prog_kallsyms_candidate(fp) ||
-	    !bpf_capable())
+	    !bpf_token_capable(fp->aux->token, CAP_BPF))
 		return;
 
 	bpf_prog_ksym_set_addr(fp);
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index b058d3a3bb6a..a28a1f6d5fc1 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -20708,7 +20708,12 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
 	env->prog = *prog;
 	env->ops = bpf_verifier_ops[env->prog->type];
 	env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel);
-	is_priv = bpf_capable();
+
+	env->allow_ptr_leaks = bpf_allow_ptr_leaks(env->prog->aux->token);
+	env->allow_uninit_stack = bpf_allow_uninit_stack(env->prog->aux->token);
+	env->bypass_spec_v1 = bpf_bypass_spec_v1(env->prog->aux->token);
+	env->bypass_spec_v4 = bpf_bypass_spec_v4(env->prog->aux->token);
+	env->bpf_capable = is_priv = bpf_token_capable(env->prog->aux->token, CAP_BPF);
 
 	bpf_get_btf_vmlinux();
 
@@ -20740,12 +20745,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
 	if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
 		env->strict_alignment = false;
 
-	env->allow_ptr_leaks = bpf_allow_ptr_leaks();
-	env->allow_uninit_stack = bpf_allow_uninit_stack();
-	env->bypass_spec_v1 = bpf_bypass_spec_v1();
-	env->bypass_spec_v4 = bpf_bypass_spec_v4();
-	env->bpf_capable = bpf_capable();
-
 	if (is_priv)
 		env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
 
diff --git a/net/core/filter.c b/net/core/filter.c
index cd2ef9fb4f1e..a3de4a7d3733 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -8557,7 +8557,7 @@ static bool cg_skb_is_valid_access(int off, int size,
 		return false;
 	case bpf_ctx_range(struct __sk_buff, data):
 	case bpf_ctx_range(struct __sk_buff, data_end):
-		if (!bpf_capable())
+		if (!bpf_token_capable(prog->aux->token, CAP_BPF))
 			return false;
 		break;
 	}
@@ -8569,7 +8569,7 @@ static bool cg_skb_is_valid_access(int off, int size,
 		case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
 			break;
 		case bpf_ctx_range(struct __sk_buff, tstamp):
-			if (!bpf_capable())
+			if (!bpf_token_capable(prog->aux->token, CAP_BPF))
 				return false;
 			break;
 		default:
-- 
2.34.1


  parent reply	other threads:[~2023-11-10  3:49 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-11-10  3:48 [PATCH v10 bpf-next 00/17] BPF token and BPF FS-based delegation Andrii Nakryiko
2023-11-10  3:48 ` [PATCH v10 bpf-next 01/17] bpf: align CAP_NET_ADMIN checks with bpf_capable() approach Andrii Nakryiko
2023-11-10  3:48 ` [PATCH v10 bpf-next 02/17] bpf: add BPF token delegation mount options to BPF FS Andrii Nakryiko
2023-11-10  3:48 ` [PATCH v10 bpf-next 03/17] bpf: introduce BPF token object Andrii Nakryiko
2023-11-27 14:25   ` Christian Brauner
2023-11-27 18:16     ` Andrii Nakryiko
2023-11-27 16:05   ` Christian Brauner
2023-11-27 18:18     ` Andrii Nakryiko
2023-11-10  3:48 ` [PATCH v10 bpf-next 04/17] bpf: add BPF token support to BPF_MAP_CREATE command Andrii Nakryiko
2023-11-10  3:48 ` [PATCH v10 bpf-next 05/17] bpf: add BPF token support to BPF_BTF_LOAD command Andrii Nakryiko
2023-11-10  3:48 ` [PATCH v10 bpf-next 06/17] bpf: add BPF token support to BPF_PROG_LOAD command Andrii Nakryiko
2023-11-10  3:48 ` [PATCH v10 bpf-next 07/17] bpf: take into account BPF token when fetching helper protos Andrii Nakryiko
2023-11-10  3:48 ` Andrii Nakryiko [this message]
2023-11-10  3:48 ` [PATCH v10 bpf-next 09/17] bpf,lsm: refactor bpf_prog_alloc/bpf_prog_free LSM hooks Andrii Nakryiko
2023-11-10  3:48 ` [PATCH v10 bpf-next 10/17] bpf,lsm: refactor bpf_map_alloc/bpf_map_free " Andrii Nakryiko
2023-11-10  3:48 ` [PATCH v10 bpf-next 11/17] bpf,lsm: add BPF token " Andrii Nakryiko
2023-11-10  3:48 ` [PATCH v10 bpf-next 12/17] libbpf: add bpf_token_create() API Andrii Nakryiko
2023-11-10  3:48 ` [PATCH v10 bpf-next 13/17] libbpf: add BPF token support to bpf_map_create() API Andrii Nakryiko
2023-11-10  3:48 ` [PATCH v10 bpf-next 14/17] libbpf: add BPF token support to bpf_btf_load() API Andrii Nakryiko
2023-11-10  3:48 ` [PATCH v10 bpf-next 15/17] libbpf: add BPF token support to bpf_prog_load() API Andrii Nakryiko
2023-11-10  3:48 ` [PATCH v10 bpf-next 16/17] selftests/bpf: add BPF token-enabled tests Andrii Nakryiko
2023-11-10  3:48 ` [PATCH v10 bpf-next 17/17] bpf,selinux: allocate bpf_security_struct per BPF token Andrii Nakryiko

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231110034838.1295764-9-andrii@kernel.org \
    --to=andrii@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=brauner@kernel.org \
    --cc=keescook@chromium.org \
    --cc=kernel-team@meta.com \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-security-module@vger.kernel.org \
    --cc=netdev@vger.kernel.org \
    --cc=paul@paul-moore.com \
    --cc=sargun@sargun.me \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.