* [RFC PATCH bpf-next 0/4] bpf_panic() helper
@ 2022-07-11 8:32 Artem Savkov
2022-07-11 8:32 ` [RFC PATCH bpf-next 1/4] bpf: add a sysctl to enable destructive bpf helpers Artem Savkov
` (4 more replies)
0 siblings, 5 replies; 18+ messages in thread
From: Artem Savkov @ 2022-07-11 8:32 UTC (permalink / raw)
To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko, bpf, netdev
Cc: linux-kernel, Andrea Arcangeli, Artem Savkov
eBPF is often used for kernel debugging, and one of the widely used and
powerful debugging techniques is post-mortem debugging with a full memory dump.
Triggering a panic at exactly the right moment allows the user to get such a
dump and thus a better view at the system's state. This patchset adds
bpf_panic() helper to do exactly that.
I realize that even though there are multiple guards present, a helper like
this is contrary to BPF being "safe", so this is sent as RFC to have a
discussion on whether adding destructive capabilities is deemed acceptable.
Artem Savkov (4):
bpf: add a sysctl to enable destructive bpf helpers
bpf: add BPF_F_DESTRUCTIVE flag for BPF_PROG_LOAD
bpf: add bpf_panic() helper
selftests/bpf: bpf_panic selftest
include/linux/bpf.h | 8 +
include/uapi/linux/bpf.h | 13 ++
kernel/bpf/core.c | 1 +
kernel/bpf/helpers.c | 13 ++
kernel/bpf/syscall.c | 33 +++-
kernel/bpf/verifier.c | 7 +
kernel/trace/bpf_trace.c | 2 +
tools/include/uapi/linux/bpf.h | 13 ++
.../selftests/bpf/prog_tests/bpf_panic.c | 144 ++++++++++++++++++
9 files changed, 233 insertions(+), 1 deletion(-)
create mode 100644 tools/testing/selftests/bpf/prog_tests/bpf_panic.c
--
2.35.3
^ permalink raw reply [flat|nested] 18+ messages in thread
* [RFC PATCH bpf-next 1/4] bpf: add a sysctl to enable destructive bpf helpers
2022-07-11 8:32 [RFC PATCH bpf-next 0/4] bpf_panic() helper Artem Savkov
@ 2022-07-11 8:32 ` Artem Savkov
2022-07-11 8:32 ` [RFC PATCH bpf-next 2/4] bpf: add BPF_F_DESTRUCTIVE flag for BPF_PROG_LOAD Artem Savkov
` (3 subsequent siblings)
4 siblings, 0 replies; 18+ messages in thread
From: Artem Savkov @ 2022-07-11 8:32 UTC (permalink / raw)
To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko, bpf, netdev
Cc: linux-kernel, Andrea Arcangeli, Artem Savkov
Add a kernel.destructive_bpf_enabled sysctl knob to allow enabling bpf
helpers that can be destructive to the system. One such helper,
bpf_panic(), is added later in the series.
Signed-off-by: Artem Savkov <asavkov@redhat.com>
---
include/linux/bpf.h | 6 ++++++
kernel/bpf/syscall.c | 29 +++++++++++++++++++++++++++++
2 files changed, 35 insertions(+)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 0edd7d2c0064..77972724bed7 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1641,6 +1641,7 @@ bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align,
#endif
extern int sysctl_unprivileged_bpf_disabled;
+extern int sysctl_destructive_bpf_enabled;
static inline bool bpf_allow_ptr_leaks(void)
{
@@ -1926,6 +1927,11 @@ static inline bool unprivileged_ebpf_enabled(void)
return !sysctl_unprivileged_bpf_disabled;
}
+static inline bool destructive_ebpf_enabled(void)
+{
+ return sysctl_destructive_bpf_enabled;
+}
+
#else /* !CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
{
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 7d5af5b99f0d..1ce6541d90e1 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -57,6 +57,8 @@ static DEFINE_SPINLOCK(link_idr_lock);
int sysctl_unprivileged_bpf_disabled __read_mostly =
IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0;
+int sysctl_destructive_bpf_enabled __read_mostly = 0;
+
static const struct bpf_map_ops * const bpf_map_types[] = {
#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
#define BPF_MAP_TYPE(_id, _ops) \
@@ -5226,6 +5228,24 @@ static int bpf_unpriv_handler(struct ctl_table *table, int write,
return ret;
}
+static int bpf_destructive_handler(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ int ret, destructive_enable = *(int *)table->data;
+ struct ctl_table tmp = *table;
+
+ if (write && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ tmp.data = &destructive_enable;
+ ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
+ if (write && !ret) {
+ *(int *)table->data = destructive_enable;
+ }
+
+ return ret;
+}
+
static struct ctl_table bpf_syscall_table[] = {
{
.procname = "unprivileged_bpf_disabled",
@@ -5236,6 +5256,15 @@ static struct ctl_table bpf_syscall_table[] = {
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_TWO,
},
+ {
+ .procname = "destructive_bpf_enabled",
+ .data = &sysctl_destructive_bpf_enabled,
+ .maxlen = sizeof(sysctl_destructive_bpf_enabled),
+ .mode = 0644,
+ .proc_handler = bpf_destructive_handler,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
{
.procname = "bpf_stats_enabled",
.data = &bpf_stats_enabled_key.key,
--
2.35.3
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [RFC PATCH bpf-next 2/4] bpf: add BPF_F_DESTRUCTIVE flag for BPF_PROG_LOAD
2022-07-11 8:32 [RFC PATCH bpf-next 0/4] bpf_panic() helper Artem Savkov
2022-07-11 8:32 ` [RFC PATCH bpf-next 1/4] bpf: add a sysctl to enable destructive bpf helpers Artem Savkov
@ 2022-07-11 8:32 ` Artem Savkov
2022-07-11 10:56 ` Jiri Olsa
2022-07-11 8:32 ` [RFC PATCH bpf-next 3/4] bpf: add bpf_panic() helper Artem Savkov
` (2 subsequent siblings)
4 siblings, 1 reply; 18+ messages in thread
From: Artem Savkov @ 2022-07-11 8:32 UTC (permalink / raw)
To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko, bpf, netdev
Cc: linux-kernel, Andrea Arcangeli, Artem Savkov
Add a BPF_F_DESTRUCTIVE will be required to be supplied to
BPF_PROG_LOAD for programs to utilize destructive helpers such as
bpf_panic().
Signed-off-by: Artem Savkov <asavkov@redhat.com>
---
include/linux/bpf.h | 1 +
include/uapi/linux/bpf.h | 6 ++++++
kernel/bpf/syscall.c | 4 +++-
tools/include/uapi/linux/bpf.h | 6 ++++++
4 files changed, 16 insertions(+), 1 deletion(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 77972724bed7..43c008e3587a 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1041,6 +1041,7 @@ struct bpf_prog_aux {
bool sleepable;
bool tail_call_reachable;
bool xdp_has_frags;
+ bool destructive;
bool use_bpf_prog_pack;
/* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
const struct btf_type *attach_func_proto;
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index e81362891596..4423874b5da4 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -1121,6 +1121,12 @@ enum bpf_link_type {
*/
#define BPF_F_XDP_HAS_FRAGS (1U << 5)
+/* If BPF_F_DESTRUCTIVE is used in BPF_PROG_LOAD command, the loaded program
+ * will be able to perform destructive operations such as calling bpf_panic()
+ * helper.
+ */
+#define BPF_F_DESTRUCTIVE (1U << 6)
+
/* link_create.kprobe_multi.flags used in LINK_CREATE command for
* BPF_TRACE_KPROBE_MULTI attach type to create return probe.
*/
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 1ce6541d90e1..779feac2dc7d 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -2449,7 +2449,8 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr)
BPF_F_TEST_STATE_FREQ |
BPF_F_SLEEPABLE |
BPF_F_TEST_RND_HI32 |
- BPF_F_XDP_HAS_FRAGS))
+ BPF_F_XDP_HAS_FRAGS |
+ BPF_F_DESTRUCTIVE))
return -EINVAL;
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
@@ -2536,6 +2537,7 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr)
prog->aux->offload_requested = !!attr->prog_ifindex;
prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE;
prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS;
+ prog->aux->destructive = attr->prog_flags & BPF_F_DESTRUCTIVE;
err = security_bpf_prog_alloc(prog->aux);
if (err)
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index e81362891596..4423874b5da4 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -1121,6 +1121,12 @@ enum bpf_link_type {
*/
#define BPF_F_XDP_HAS_FRAGS (1U << 5)
+/* If BPF_F_DESTRUCTIVE is used in BPF_PROG_LOAD command, the loaded program
+ * will be able to perform destructive operations such as calling bpf_panic()
+ * helper.
+ */
+#define BPF_F_DESTRUCTIVE (1U << 6)
+
/* link_create.kprobe_multi.flags used in LINK_CREATE command for
* BPF_TRACE_KPROBE_MULTI attach type to create return probe.
*/
--
2.35.3
^ permalink raw reply related [flat|nested] 18+ messages in thread
* Re: [RFC PATCH bpf-next 2/4] bpf: add BPF_F_DESTRUCTIVE flag for BPF_PROG_LOAD
2022-07-11 8:32 ` [RFC PATCH bpf-next 2/4] bpf: add BPF_F_DESTRUCTIVE flag for BPF_PROG_LOAD Artem Savkov
@ 2022-07-11 10:56 ` Jiri Olsa
2022-07-11 11:48 ` Artem Savkov
0 siblings, 1 reply; 18+ messages in thread
From: Jiri Olsa @ 2022-07-11 10:56 UTC (permalink / raw)
To: Artem Savkov
Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko, bpf,
netdev, linux-kernel, Andrea Arcangeli
On Mon, Jul 11, 2022 at 10:32:18AM +0200, Artem Savkov wrote:
> Add a BPF_F_DESTRUCTIVE will be required to be supplied to
> BPF_PROG_LOAD for programs to utilize destructive helpers such as
> bpf_panic().
I'd think that having kernel.destructive_bpf_enabled sysctl knob enabled
would be enough to enable that helper from any program, not sure having
extra load flag adds more security
jirka
>
> Signed-off-by: Artem Savkov <asavkov@redhat.com>
> ---
> include/linux/bpf.h | 1 +
> include/uapi/linux/bpf.h | 6 ++++++
> kernel/bpf/syscall.c | 4 +++-
> tools/include/uapi/linux/bpf.h | 6 ++++++
> 4 files changed, 16 insertions(+), 1 deletion(-)
>
> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> index 77972724bed7..43c008e3587a 100644
> --- a/include/linux/bpf.h
> +++ b/include/linux/bpf.h
> @@ -1041,6 +1041,7 @@ struct bpf_prog_aux {
> bool sleepable;
> bool tail_call_reachable;
> bool xdp_has_frags;
> + bool destructive;
> bool use_bpf_prog_pack;
> /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
> const struct btf_type *attach_func_proto;
> diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
> index e81362891596..4423874b5da4 100644
> --- a/include/uapi/linux/bpf.h
> +++ b/include/uapi/linux/bpf.h
> @@ -1121,6 +1121,12 @@ enum bpf_link_type {
> */
> #define BPF_F_XDP_HAS_FRAGS (1U << 5)
>
> +/* If BPF_F_DESTRUCTIVE is used in BPF_PROG_LOAD command, the loaded program
> + * will be able to perform destructive operations such as calling bpf_panic()
> + * helper.
> + */
> +#define BPF_F_DESTRUCTIVE (1U << 6)
> +
> /* link_create.kprobe_multi.flags used in LINK_CREATE command for
> * BPF_TRACE_KPROBE_MULTI attach type to create return probe.
> */
> diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
> index 1ce6541d90e1..779feac2dc7d 100644
> --- a/kernel/bpf/syscall.c
> +++ b/kernel/bpf/syscall.c
> @@ -2449,7 +2449,8 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr)
> BPF_F_TEST_STATE_FREQ |
> BPF_F_SLEEPABLE |
> BPF_F_TEST_RND_HI32 |
> - BPF_F_XDP_HAS_FRAGS))
> + BPF_F_XDP_HAS_FRAGS |
> + BPF_F_DESTRUCTIVE))
> return -EINVAL;
>
> if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
> @@ -2536,6 +2537,7 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr)
> prog->aux->offload_requested = !!attr->prog_ifindex;
> prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE;
> prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS;
> + prog->aux->destructive = attr->prog_flags & BPF_F_DESTRUCTIVE;
>
> err = security_bpf_prog_alloc(prog->aux);
> if (err)
> diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
> index e81362891596..4423874b5da4 100644
> --- a/tools/include/uapi/linux/bpf.h
> +++ b/tools/include/uapi/linux/bpf.h
> @@ -1121,6 +1121,12 @@ enum bpf_link_type {
> */
> #define BPF_F_XDP_HAS_FRAGS (1U << 5)
>
> +/* If BPF_F_DESTRUCTIVE is used in BPF_PROG_LOAD command, the loaded program
> + * will be able to perform destructive operations such as calling bpf_panic()
> + * helper.
> + */
> +#define BPF_F_DESTRUCTIVE (1U << 6)
> +
> /* link_create.kprobe_multi.flags used in LINK_CREATE command for
> * BPF_TRACE_KPROBE_MULTI attach type to create return probe.
> */
> --
> 2.35.3
>
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [RFC PATCH bpf-next 2/4] bpf: add BPF_F_DESTRUCTIVE flag for BPF_PROG_LOAD
2022-07-11 10:56 ` Jiri Olsa
@ 2022-07-11 11:48 ` Artem Savkov
0 siblings, 0 replies; 18+ messages in thread
From: Artem Savkov @ 2022-07-11 11:48 UTC (permalink / raw)
To: Jiri Olsa
Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko, bpf,
netdev, linux-kernel, Andrea Arcangeli
On Mon, Jul 11, 2022 at 12:56:28PM +0200, Jiri Olsa wrote:
> On Mon, Jul 11, 2022 at 10:32:18AM +0200, Artem Savkov wrote:
> > Add a BPF_F_DESTRUCTIVE will be required to be supplied to
> > BPF_PROG_LOAD for programs to utilize destructive helpers such as
> > bpf_panic().
>
> I'd think that having kernel.destructive_bpf_enabled sysctl knob enabled
> would be enough to enable that helper from any program, not sure having
> extra load flag adds more security
I agree it doesn't add more security. The idea was to have a way for a
developer to explicitly state he understand this will be dangerous. This
flag can also translate well into something like a --destructive option
for bpftrace without needing to keep a list of destructive helpers on
their side.
--
Artem
^ permalink raw reply [flat|nested] 18+ messages in thread
* [RFC PATCH bpf-next 3/4] bpf: add bpf_panic() helper
2022-07-11 8:32 [RFC PATCH bpf-next 0/4] bpf_panic() helper Artem Savkov
2022-07-11 8:32 ` [RFC PATCH bpf-next 1/4] bpf: add a sysctl to enable destructive bpf helpers Artem Savkov
2022-07-11 8:32 ` [RFC PATCH bpf-next 2/4] bpf: add BPF_F_DESTRUCTIVE flag for BPF_PROG_LOAD Artem Savkov
@ 2022-07-11 8:32 ` Artem Savkov
2022-07-11 10:42 ` Jiri Olsa
` (2 more replies)
2022-07-11 8:32 ` [RFC PATCH bpf-next 4/4] selftests/bpf: bpf_panic selftest Artem Savkov
2022-07-11 10:51 ` [RFC PATCH bpf-next 0/4] bpf_panic() helper Jiri Olsa
4 siblings, 3 replies; 18+ messages in thread
From: Artem Savkov @ 2022-07-11 8:32 UTC (permalink / raw)
To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko, bpf, netdev
Cc: linux-kernel, Andrea Arcangeli, Artem Savkov
Add a helper that will make the kernel panic immediately with specified
message. Using this helper requires kernel.destructive_bpf_enabled sysctl
to be enabled, BPF_F_DESTRUCTIVE flag to be supplied on program load as
well as CAP_SYS_BOOT capabilities.
Signed-off-by: Artem Savkov <asavkov@redhat.com>
---
include/linux/bpf.h | 1 +
include/uapi/linux/bpf.h | 7 +++++++
kernel/bpf/core.c | 1 +
kernel/bpf/helpers.c | 13 +++++++++++++
kernel/bpf/verifier.c | 7 +++++++
kernel/trace/bpf_trace.c | 2 ++
tools/include/uapi/linux/bpf.h | 7 +++++++
7 files changed, 38 insertions(+)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 43c008e3587a..77c20ba9ca8e 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -2339,6 +2339,7 @@ extern const struct bpf_func_proto bpf_strtol_proto;
extern const struct bpf_func_proto bpf_strtoul_proto;
extern const struct bpf_func_proto bpf_tcp_sock_proto;
extern const struct bpf_func_proto bpf_jiffies64_proto;
+extern const struct bpf_func_proto bpf_panic_proto;
extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto;
extern const struct bpf_func_proto bpf_event_output_data_proto;
extern const struct bpf_func_proto bpf_ringbuf_output_proto;
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 4423874b5da4..e2e2c4de44ee 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -3927,6 +3927,12 @@ union bpf_attr {
* Return
* The 64 bit jiffies
*
+ * void bpf_panic(const char *msg)
+ * Description
+ * Make the kernel panic immediately
+ * Return
+ * void
+ *
* long bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags)
* Description
* For an eBPF program attached to a perf event, retrieve the
@@ -5452,6 +5458,7 @@ union bpf_attr {
FN(tcp_send_ack), \
FN(send_signal_thread), \
FN(jiffies64), \
+ FN(panic), \
FN(read_branch_records), \
FN(get_ns_current_pid_tgid), \
FN(xdp_output), \
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index b5ffebcce6cc..0f333a0e85a5 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -2649,6 +2649,7 @@ const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto __weak;
const struct bpf_func_proto bpf_spin_lock_proto __weak;
const struct bpf_func_proto bpf_spin_unlock_proto __weak;
const struct bpf_func_proto bpf_jiffies64_proto __weak;
+const struct bpf_func_proto bpf_panic_proto __weak;
const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index a1c84d256f83..5cb90208a264 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -374,6 +374,19 @@ const struct bpf_func_proto bpf_jiffies64_proto = {
.ret_type = RET_INTEGER,
};
+BPF_CALL_1(bpf_panic, const char *, msg)
+{
+ panic(msg);
+ return 0;
+}
+
+const struct bpf_func_proto bpf_panic_proto = {
+ .func = bpf_panic,
+ .gpl_only = false,
+ .ret_type = RET_VOID,
+ .arg1_type = ARG_PTR_TO_CONST_STR,
+};
+
#ifdef CONFIG_CGROUPS
BPF_CALL_0(bpf_get_current_cgroup_id)
{
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 2859901ffbe3..f49c026917c5 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -7285,6 +7285,13 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
reg_type_str(env, regs[BPF_REG_1].type));
return -EACCES;
}
+ break;
+ case BPF_FUNC_panic:
+ struct bpf_prog_aux *aux = env->prog->aux;
+ if (!aux->destructive) {
+ verbose(env, "bpf_panic() calls require BPF_F_DESTRUCTIVE flag\n");
+ return -EACCES;
+ }
}
if (err)
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 4be976cf7d63..3ee888507795 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1304,6 +1304,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_find_vma_proto;
case BPF_FUNC_trace_vprintk:
return bpf_get_trace_vprintk_proto();
+ case BPF_FUNC_panic:
+ return capable(CAP_SYS_BOOT) && destructive_ebpf_enabled() ? &bpf_panic_proto : NULL;
default:
return bpf_base_func_proto(func_id);
}
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 4423874b5da4..e2e2c4de44ee 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -3927,6 +3927,12 @@ union bpf_attr {
* Return
* The 64 bit jiffies
*
+ * void bpf_panic(const char *msg)
+ * Description
+ * Make the kernel panic immediately
+ * Return
+ * void
+ *
* long bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags)
* Description
* For an eBPF program attached to a perf event, retrieve the
@@ -5452,6 +5458,7 @@ union bpf_attr {
FN(tcp_send_ack), \
FN(send_signal_thread), \
FN(jiffies64), \
+ FN(panic), \
FN(read_branch_records), \
FN(get_ns_current_pid_tgid), \
FN(xdp_output), \
--
2.35.3
^ permalink raw reply related [flat|nested] 18+ messages in thread
* Re: [RFC PATCH bpf-next 3/4] bpf: add bpf_panic() helper
2022-07-11 8:32 ` [RFC PATCH bpf-next 3/4] bpf: add bpf_panic() helper Artem Savkov
@ 2022-07-11 10:42 ` Jiri Olsa
2022-07-12 17:53 ` Song Liu
2022-07-14 17:03 ` kernel test robot
2 siblings, 0 replies; 18+ messages in thread
From: Jiri Olsa @ 2022-07-11 10:42 UTC (permalink / raw)
To: Artem Savkov
Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko, bpf,
netdev, linux-kernel, Andrea Arcangeli
On Mon, Jul 11, 2022 at 10:32:19AM +0200, Artem Savkov wrote:
SNIP
> diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
> index 4423874b5da4..e2e2c4de44ee 100644
> --- a/tools/include/uapi/linux/bpf.h
> +++ b/tools/include/uapi/linux/bpf.h
> @@ -3927,6 +3927,12 @@ union bpf_attr {
> * Return
> * The 64 bit jiffies
> *
> + * void bpf_panic(const char *msg)
> + * Description
> + * Make the kernel panic immediately
> + * Return
> + * void
> + *
> * long bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags)
> * Description
> * For an eBPF program attached to a perf event, retrieve the
> @@ -5452,6 +5458,7 @@ union bpf_attr {
> FN(tcp_send_ack), \
> FN(send_signal_thread), \
> FN(jiffies64), \
> + FN(panic), \
> FN(read_branch_records), \
> FN(get_ns_current_pid_tgid), \
> FN(xdp_output), \
new helper needs to be added at the end of the list
jirka
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [RFC PATCH bpf-next 3/4] bpf: add bpf_panic() helper
2022-07-11 8:32 ` [RFC PATCH bpf-next 3/4] bpf: add bpf_panic() helper Artem Savkov
2022-07-11 10:42 ` Jiri Olsa
@ 2022-07-12 17:53 ` Song Liu
2022-07-12 18:08 ` Alexei Starovoitov
2022-07-14 17:03 ` kernel test robot
2 siblings, 1 reply; 18+ messages in thread
From: Song Liu @ 2022-07-12 17:53 UTC (permalink / raw)
To: Artem Savkov
Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko, bpf,
Networking, open list, Andrea Arcangeli
On Mon, Jul 11, 2022 at 1:32 AM Artem Savkov <asavkov@redhat.com> wrote:
>
> Add a helper that will make the kernel panic immediately with specified
> message. Using this helper requires kernel.destructive_bpf_enabled sysctl
> to be enabled, BPF_F_DESTRUCTIVE flag to be supplied on program load as
> well as CAP_SYS_BOOT capabilities.
>
> Signed-off-by: Artem Savkov <asavkov@redhat.com>
> ---
> include/linux/bpf.h | 1 +
> include/uapi/linux/bpf.h | 7 +++++++
> kernel/bpf/core.c | 1 +
> kernel/bpf/helpers.c | 13 +++++++++++++
> kernel/bpf/verifier.c | 7 +++++++
> kernel/trace/bpf_trace.c | 2 ++
> tools/include/uapi/linux/bpf.h | 7 +++++++
> 7 files changed, 38 insertions(+)
>
> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> index 43c008e3587a..77c20ba9ca8e 100644
> --- a/include/linux/bpf.h
> +++ b/include/linux/bpf.h
> @@ -2339,6 +2339,7 @@ extern const struct bpf_func_proto bpf_strtol_proto;
> extern const struct bpf_func_proto bpf_strtoul_proto;
> extern const struct bpf_func_proto bpf_tcp_sock_proto;
> extern const struct bpf_func_proto bpf_jiffies64_proto;
> +extern const struct bpf_func_proto bpf_panic_proto;
> extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto;
> extern const struct bpf_func_proto bpf_event_output_data_proto;
> extern const struct bpf_func_proto bpf_ringbuf_output_proto;
> diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
> index 4423874b5da4..e2e2c4de44ee 100644
> --- a/include/uapi/linux/bpf.h
> +++ b/include/uapi/linux/bpf.h
> @@ -3927,6 +3927,12 @@ union bpf_attr {
> * Return
> * The 64 bit jiffies
> *
> + * void bpf_panic(const char *msg)
> + * Description
> + * Make the kernel panic immediately
> + * Return
> + * void
> + *
> * long bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags)
> * Description
> * For an eBPF program attached to a perf event, retrieve the
> @@ -5452,6 +5458,7 @@ union bpf_attr {
> FN(tcp_send_ack), \
> FN(send_signal_thread), \
> FN(jiffies64), \
> + FN(panic), \
> FN(read_branch_records), \
> FN(get_ns_current_pid_tgid), \
> FN(xdp_output), \
> diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
> index b5ffebcce6cc..0f333a0e85a5 100644
> --- a/kernel/bpf/core.c
> +++ b/kernel/bpf/core.c
> @@ -2649,6 +2649,7 @@ const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto __weak;
> const struct bpf_func_proto bpf_spin_lock_proto __weak;
> const struct bpf_func_proto bpf_spin_unlock_proto __weak;
> const struct bpf_func_proto bpf_jiffies64_proto __weak;
> +const struct bpf_func_proto bpf_panic_proto __weak;
>
> const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
> const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
> diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
> index a1c84d256f83..5cb90208a264 100644
> --- a/kernel/bpf/helpers.c
> +++ b/kernel/bpf/helpers.c
> @@ -374,6 +374,19 @@ const struct bpf_func_proto bpf_jiffies64_proto = {
> .ret_type = RET_INTEGER,
> };
>
> +BPF_CALL_1(bpf_panic, const char *, msg)
> +{
> + panic(msg);
I think we should also check
capable(CAP_SYS_BOOT) && destructive_ebpf_enabled()
here. Or at least, destructive_ebpf_enabled(). Otherwise, we
may trigger panic after the sysctl is disabled.
In general, I don't think sysctl is a good API, as it is global, and
the user can easily forget to turn it back off. If possible, I would
rather avoid adding new BPF related sysctls.
Thanks,
Song
> + return 0;
> +}
> +
> +const struct bpf_func_proto bpf_panic_proto = {
> + .func = bpf_panic,
> + .gpl_only = false,
> + .ret_type = RET_VOID,
> + .arg1_type = ARG_PTR_TO_CONST_STR,
> +};
> +
> #ifdef CONFIG_CGROUPS
> BPF_CALL_0(bpf_get_current_cgroup_id)
> {
> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> index 2859901ffbe3..f49c026917c5 100644
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c
> @@ -7285,6 +7285,13 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
> reg_type_str(env, regs[BPF_REG_1].type));
> return -EACCES;
> }
> + break;
> + case BPF_FUNC_panic:
> + struct bpf_prog_aux *aux = env->prog->aux;
> + if (!aux->destructive) {
> + verbose(env, "bpf_panic() calls require BPF_F_DESTRUCTIVE flag\n");
> + return -EACCES;
> + }
> }
>
> if (err)
> diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> index 4be976cf7d63..3ee888507795 100644
> --- a/kernel/trace/bpf_trace.c
> +++ b/kernel/trace/bpf_trace.c
> @@ -1304,6 +1304,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
> return &bpf_find_vma_proto;
> case BPF_FUNC_trace_vprintk:
> return bpf_get_trace_vprintk_proto();
> + case BPF_FUNC_panic:
> + return capable(CAP_SYS_BOOT) && destructive_ebpf_enabled() ? &bpf_panic_proto : NULL;
> default:
> return bpf_base_func_proto(func_id);
> }
> diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
> index 4423874b5da4..e2e2c4de44ee 100644
> --- a/tools/include/uapi/linux/bpf.h
> +++ b/tools/include/uapi/linux/bpf.h
> @@ -3927,6 +3927,12 @@ union bpf_attr {
> * Return
> * The 64 bit jiffies
> *
> + * void bpf_panic(const char *msg)
> + * Description
> + * Make the kernel panic immediately
> + * Return
> + * void
> + *
> * long bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags)
> * Description
> * For an eBPF program attached to a perf event, retrieve the
> @@ -5452,6 +5458,7 @@ union bpf_attr {
> FN(tcp_send_ack), \
> FN(send_signal_thread), \
> FN(jiffies64), \
> + FN(panic), \
> FN(read_branch_records), \
> FN(get_ns_current_pid_tgid), \
> FN(xdp_output), \
> --
> 2.35.3
>
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [RFC PATCH bpf-next 3/4] bpf: add bpf_panic() helper
2022-07-12 17:53 ` Song Liu
@ 2022-07-12 18:08 ` Alexei Starovoitov
2022-07-13 13:31 ` Artem Savkov
0 siblings, 1 reply; 18+ messages in thread
From: Alexei Starovoitov @ 2022-07-12 18:08 UTC (permalink / raw)
To: Song Liu
Cc: Artem Savkov, Alexei Starovoitov, Daniel Borkmann,
Andrii Nakryiko, bpf, Networking, open list, Andrea Arcangeli
On Tue, Jul 12, 2022 at 10:53 AM Song Liu <song@kernel.org> wrote:
>
> >
> > +BPF_CALL_1(bpf_panic, const char *, msg)
> > +{
> > + panic(msg);
>
> I think we should also check
>
> capable(CAP_SYS_BOOT) && destructive_ebpf_enabled()
>
> here. Or at least, destructive_ebpf_enabled(). Otherwise, we
> may trigger panic after the sysctl is disabled.
>
> In general, I don't think sysctl is a good API, as it is global, and
> the user can easily forget to turn it back off. If possible, I would
> rather avoid adding new BPF related sysctls.
+1. New syscal isn't warranted here.
Just CAP_SYS_BOOT would be enough here.
Also full blown panic() seems unnecessary.
If the motivation is to get a memory dump then crash_kexec() helper
would be more suitable.
If the goal is to reboot the system then the wrapper of sys_reboot()
is better.
Unfortunately the cover letter lacks these details.
Why this destructive action cannot be delegated to user space?
btw, we should avoid adding new uapi helpers in most cases.
Ideally all of them should be added as new kfunc-s, because they're
unstable and we can rip them out later if our judgement call
turns out to be problematic for whatever reason.
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [RFC PATCH bpf-next 3/4] bpf: add bpf_panic() helper
2022-07-12 18:08 ` Alexei Starovoitov
@ 2022-07-13 13:31 ` Artem Savkov
2022-07-13 22:20 ` Alexei Starovoitov
0 siblings, 1 reply; 18+ messages in thread
From: Artem Savkov @ 2022-07-13 13:31 UTC (permalink / raw)
To: Alexei Starovoitov
Cc: Song Liu, Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
bpf, Networking, open list, Andrea Arcangeli, dvacek
On Tue, Jul 12, 2022 at 11:08:54AM -0700, Alexei Starovoitov wrote:
> On Tue, Jul 12, 2022 at 10:53 AM Song Liu <song@kernel.org> wrote:
> >
> > >
> > > +BPF_CALL_1(bpf_panic, const char *, msg)
> > > +{
> > > + panic(msg);
> >
> > I think we should also check
> >
> > capable(CAP_SYS_BOOT) && destructive_ebpf_enabled()
> >
> > here. Or at least, destructive_ebpf_enabled(). Otherwise, we
> > may trigger panic after the sysctl is disabled.
> >
> > In general, I don't think sysctl is a good API, as it is global, and
> > the user can easily forget to turn it back off. If possible, I would
> > rather avoid adding new BPF related sysctls.
>
> +1. New syscal isn't warranted here.
> Just CAP_SYS_BOOT would be enough here.
Point taken, I'll remove sysctl knob in any further versions.
> Also full blown panic() seems unnecessary.
> If the motivation is to get a memory dump then crash_kexec() helper
> would be more suitable.
> If the goal is to reboot the system then the wrapper of sys_reboot()
> is better.
> Unfortunately the cover letter lacks these details.
The main goal is to get the memory dump, so crash_kexec() should be enough.
However panic() is a bit more versatile and it's consequences are configurable
to some extent. Are there any downsides to using it?
> Why this destructive action cannot be delegated to user space?
Going through userspace adds delays and makes it impossible to hit "exactly
the right moment" thus making it unusable in most cases.
I'll add this to the cover letter.
> btw, we should avoid adding new uapi helpers in most cases.
> Ideally all of them should be added as new kfunc-s, because they're
> unstable and we can rip them out later if our judgement call
> turns out to be problematic for whatever reason.
Ok, I'll look into doing it this way.
--
Regards,
Artem
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [RFC PATCH bpf-next 3/4] bpf: add bpf_panic() helper
2022-07-13 13:31 ` Artem Savkov
@ 2022-07-13 22:20 ` Alexei Starovoitov
2022-07-15 12:52 ` Artem Savkov
0 siblings, 1 reply; 18+ messages in thread
From: Alexei Starovoitov @ 2022-07-13 22:20 UTC (permalink / raw)
To: Artem Savkov
Cc: Song Liu, Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
bpf, Networking, open list, Andrea Arcangeli, dvacek
On Wed, Jul 13, 2022 at 6:31 AM Artem Savkov <asavkov@redhat.com> wrote:
>
> On Tue, Jul 12, 2022 at 11:08:54AM -0700, Alexei Starovoitov wrote:
> > On Tue, Jul 12, 2022 at 10:53 AM Song Liu <song@kernel.org> wrote:
> > >
> > > >
> > > > +BPF_CALL_1(bpf_panic, const char *, msg)
> > > > +{
> > > > + panic(msg);
> > >
> > > I think we should also check
> > >
> > > capable(CAP_SYS_BOOT) && destructive_ebpf_enabled()
> > >
> > > here. Or at least, destructive_ebpf_enabled(). Otherwise, we
> > > may trigger panic after the sysctl is disabled.
> > >
> > > In general, I don't think sysctl is a good API, as it is global, and
> > > the user can easily forget to turn it back off. If possible, I would
> > > rather avoid adding new BPF related sysctls.
> >
> > +1. New syscal isn't warranted here.
> > Just CAP_SYS_BOOT would be enough here.
>
> Point taken, I'll remove sysctl knob in any further versions.
>
> > Also full blown panic() seems unnecessary.
> > If the motivation is to get a memory dump then crash_kexec() helper
> > would be more suitable.
> > If the goal is to reboot the system then the wrapper of sys_reboot()
> > is better.
> > Unfortunately the cover letter lacks these details.
>
> The main goal is to get the memory dump, so crash_kexec() should be enough.
> However panic() is a bit more versatile and it's consequences are configurable
> to some extent. Are there any downsides to using it?
versatile? In what sense? That it does a lot more than kexec?
That's a disadvantage.
We should provide bpf with minimal building blocks and let
bpf program decide what to do.
If dmesg (that is part of panic) is useful it should be its
own kfunc.
If halt is necessary -> separate kfunc as well.
reboot -> another kfunc.
Also panic() is not guaranteed to do kexec and just
panic is not what you stated is the goal of the helper.
>
> > Why this destructive action cannot be delegated to user space?
>
> Going through userspace adds delays and makes it impossible to hit "exactly
> the right moment" thus making it unusable in most cases.
What would be an example of that?
kexec is not instant either.
> I'll add this to the cover letter.
>
> > btw, we should avoid adding new uapi helpers in most cases.
> > Ideally all of them should be added as new kfunc-s, because they're
> > unstable and we can rip them out later if our judgement call
> > turns out to be problematic for whatever reason.
>
> Ok, I'll look into doing it this way.
>
> --
> Regards,
> Artem
>
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [RFC PATCH bpf-next 3/4] bpf: add bpf_panic() helper
2022-07-13 22:20 ` Alexei Starovoitov
@ 2022-07-15 12:52 ` Artem Savkov
2022-07-18 21:01 ` Alexei Starovoitov
0 siblings, 1 reply; 18+ messages in thread
From: Artem Savkov @ 2022-07-15 12:52 UTC (permalink / raw)
To: Alexei Starovoitov
Cc: Song Liu, Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
bpf, Networking, open list, Andrea Arcangeli, dvacek
On Wed, Jul 13, 2022 at 03:20:22PM -0700, Alexei Starovoitov wrote:
> On Wed, Jul 13, 2022 at 6:31 AM Artem Savkov <asavkov@redhat.com> wrote:
> >
> > On Tue, Jul 12, 2022 at 11:08:54AM -0700, Alexei Starovoitov wrote:
> > > On Tue, Jul 12, 2022 at 10:53 AM Song Liu <song@kernel.org> wrote:
> > > >
> > > > >
> > > > > +BPF_CALL_1(bpf_panic, const char *, msg)
> > > > > +{
> > > > > + panic(msg);
> > > >
> > > > I think we should also check
> > > >
> > > > capable(CAP_SYS_BOOT) && destructive_ebpf_enabled()
> > > >
> > > > here. Or at least, destructive_ebpf_enabled(). Otherwise, we
> > > > may trigger panic after the sysctl is disabled.
> > > >
> > > > In general, I don't think sysctl is a good API, as it is global, and
> > > > the user can easily forget to turn it back off. If possible, I would
> > > > rather avoid adding new BPF related sysctls.
> > >
> > > +1. New syscal isn't warranted here.
> > > Just CAP_SYS_BOOT would be enough here.
> >
> > Point taken, I'll remove sysctl knob in any further versions.
> >
> > > Also full blown panic() seems unnecessary.
> > > If the motivation is to get a memory dump then crash_kexec() helper
> > > would be more suitable.
> > > If the goal is to reboot the system then the wrapper of sys_reboot()
> > > is better.
> > > Unfortunately the cover letter lacks these details.
> >
> > The main goal is to get the memory dump, so crash_kexec() should be enough.
> > However panic() is a bit more versatile and it's consequences are configurable
> > to some extent. Are there any downsides to using it?
>
> versatile? In what sense? That it does a lot more than kexec?
> That's a disadvantage.
> We should provide bpf with minimal building blocks and let
> bpf program decide what to do.
> If dmesg (that is part of panic) is useful it should be its
> own kfunc.
> If halt is necessary -> separate kfunc as well.
> reboot -> another kfunc.
>
> Also panic() is not guaranteed to do kexec and just
> panic is not what you stated is the goal of the helper.
Alright, if the aim is to provide the smallest building blocks then
crash_kexec() is a better choice.
> >
> > > Why this destructive action cannot be delegated to user space?
> >
> > Going through userspace adds delays and makes it impossible to hit "exactly
> > the right moment" thus making it unusable in most cases.
>
> What would be an example of that?
> kexec is not instant either.
With kexec at least the thread it got called in is in a proper state. I
guess it is possible to achieve this by signalling userspace to do
kexec/panic and then block the thread somehow but that won't work in a
single-cpu case. Or am I missing something?
--
Artem
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [RFC PATCH bpf-next 3/4] bpf: add bpf_panic() helper
2022-07-15 12:52 ` Artem Savkov
@ 2022-07-18 21:01 ` Alexei Starovoitov
0 siblings, 0 replies; 18+ messages in thread
From: Alexei Starovoitov @ 2022-07-18 21:01 UTC (permalink / raw)
To: Alexei Starovoitov, Song Liu, Alexei Starovoitov,
Daniel Borkmann, Andrii Nakryiko, bpf, Networking, open list,
Andrea Arcangeli, dvacek
On Fri, Jul 15, 2022 at 5:52 AM Artem Savkov <asavkov@redhat.com> wrote:
>
> On Wed, Jul 13, 2022 at 03:20:22PM -0700, Alexei Starovoitov wrote:
> > On Wed, Jul 13, 2022 at 6:31 AM Artem Savkov <asavkov@redhat.com> wrote:
> > >
> > > On Tue, Jul 12, 2022 at 11:08:54AM -0700, Alexei Starovoitov wrote:
> > > > On Tue, Jul 12, 2022 at 10:53 AM Song Liu <song@kernel.org> wrote:
> > > > >
> > > > > >
> > > > > > +BPF_CALL_1(bpf_panic, const char *, msg)
> > > > > > +{
> > > > > > + panic(msg);
> > > > >
> > > > > I think we should also check
> > > > >
> > > > > capable(CAP_SYS_BOOT) && destructive_ebpf_enabled()
> > > > >
> > > > > here. Or at least, destructive_ebpf_enabled(). Otherwise, we
> > > > > may trigger panic after the sysctl is disabled.
> > > > >
> > > > > In general, I don't think sysctl is a good API, as it is global, and
> > > > > the user can easily forget to turn it back off. If possible, I would
> > > > > rather avoid adding new BPF related sysctls.
> > > >
> > > > +1. New syscal isn't warranted here.
> > > > Just CAP_SYS_BOOT would be enough here.
> > >
> > > Point taken, I'll remove sysctl knob in any further versions.
> > >
> > > > Also full blown panic() seems unnecessary.
> > > > If the motivation is to get a memory dump then crash_kexec() helper
> > > > would be more suitable.
> > > > If the goal is to reboot the system then the wrapper of sys_reboot()
> > > > is better.
> > > > Unfortunately the cover letter lacks these details.
> > >
> > > The main goal is to get the memory dump, so crash_kexec() should be enough.
> > > However panic() is a bit more versatile and it's consequences are configurable
> > > to some extent. Are there any downsides to using it?
> >
> > versatile? In what sense? That it does a lot more than kexec?
> > That's a disadvantage.
> > We should provide bpf with minimal building blocks and let
> > bpf program decide what to do.
> > If dmesg (that is part of panic) is useful it should be its
> > own kfunc.
> > If halt is necessary -> separate kfunc as well.
> > reboot -> another kfunc.
> >
> > Also panic() is not guaranteed to do kexec and just
> > panic is not what you stated is the goal of the helper.
>
> Alright, if the aim is to provide the smallest building blocks then
> crash_kexec() is a better choice.
>
> > >
> > > > Why this destructive action cannot be delegated to user space?
> > >
> > > Going through userspace adds delays and makes it impossible to hit "exactly
> > > the right moment" thus making it unusable in most cases.
> >
> > What would be an example of that?
> > kexec is not instant either.
>
> With kexec at least the thread it got called in is in a proper state. I
> guess it is possible to achieve this by signalling userspace to do
> kexec/panic and then block the thread somehow but that won't work in a
> single-cpu case. Or am I missing something?
Something like this.
We can extend bpf_send_signal to send a signal to pid 1
or another user agent.
It's still not clear to me why you want that memory dump.
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [RFC PATCH bpf-next 3/4] bpf: add bpf_panic() helper
2022-07-11 8:32 ` [RFC PATCH bpf-next 3/4] bpf: add bpf_panic() helper Artem Savkov
2022-07-11 10:42 ` Jiri Olsa
2022-07-12 17:53 ` Song Liu
@ 2022-07-14 17:03 ` kernel test robot
2 siblings, 0 replies; 18+ messages in thread
From: kernel test robot @ 2022-07-14 17:03 UTC (permalink / raw)
To: Artem Savkov; +Cc: llvm, kbuild-all
Hi Artem,
[FYI, it's a private test report for your RFC patch.]
[auto build test ERROR on bpf/master]
[also build test ERROR on net/master linus/master v5.19-rc6]
[cannot apply to bpf-next/master net-next/master next-20220714]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Artem-Savkov/bpf_panic-helper/20220711-163442
base: https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git master
config: arm64-buildonly-randconfig-r006-20220714 (https://download.01.org/0day-ci/archive/20220715/202207150009.BPNTmC1o-lkp@intel.com/config)
compiler: clang version 15.0.0 (https://github.com/llvm/llvm-project 5e61b9c556267086ef9b743a0b57df302eef831b)
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# install arm64 cross compiling tool for clang build
# apt-get install binutils-aarch64-linux-gnu
# https://github.com/intel-lab-lkp/linux/commit/8aba500c3f61dcf538c8458a34f90f81279269a2
git remote add linux-review https://github.com/intel-lab-lkp/linux
git fetch --no-tags linux-review Artem-Savkov/bpf_panic-helper/20220711-163442
git checkout 8aba500c3f61dcf538c8458a34f90f81279269a2
# save the config file
mkdir build_dir && cp config build_dir/.config
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=arm64 SHELL=/bin/bash
If you fix the issue, kindly add following tag where applicable
Reported-by: kernel test robot <lkp@intel.com>
All errors (new ones prefixed by >>):
>> kernel/bpf/verifier.c:7266:3: error: expected expression
struct bpf_prog_aux *aux = env->prog->aux;
^
>> kernel/bpf/verifier.c:7267:8: error: use of undeclared identifier 'aux'
if (!aux->destructive) {
^
2 errors generated.
vim +7266 kernel/bpf/verifier.c
7102
7103 static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
7104 int *insn_idx_p)
7105 {
7106 const struct bpf_func_proto *fn = NULL;
7107 enum bpf_return_type ret_type;
7108 enum bpf_type_flag ret_flag;
7109 struct bpf_reg_state *regs;
7110 struct bpf_call_arg_meta meta;
7111 int insn_idx = *insn_idx_p;
7112 bool changes_data;
7113 int i, err, func_id;
7114
7115 /* find function prototype */
7116 func_id = insn->imm;
7117 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
7118 verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
7119 func_id);
7120 return -EINVAL;
7121 }
7122
7123 if (env->ops->get_func_proto)
7124 fn = env->ops->get_func_proto(func_id, env->prog);
7125 if (!fn) {
7126 verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
7127 func_id);
7128 return -EINVAL;
7129 }
7130
7131 /* eBPF programs must be GPL compatible to use GPL-ed functions */
7132 if (!env->prog->gpl_compatible && fn->gpl_only) {
7133 verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n");
7134 return -EINVAL;
7135 }
7136
7137 if (fn->allowed && !fn->allowed(env->prog)) {
7138 verbose(env, "helper call is not allowed in probe\n");
7139 return -EINVAL;
7140 }
7141
7142 /* With LD_ABS/IND some JITs save/restore skb from r1. */
7143 changes_data = bpf_helper_changes_pkt_data(fn->func);
7144 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
7145 verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
7146 func_id_name(func_id), func_id);
7147 return -EINVAL;
7148 }
7149
7150 memset(&meta, 0, sizeof(meta));
7151 meta.pkt_access = fn->pkt_access;
7152
7153 err = check_func_proto(fn, func_id, &meta);
7154 if (err) {
7155 verbose(env, "kernel subsystem misconfigured func %s#%d\n",
7156 func_id_name(func_id), func_id);
7157 return err;
7158 }
7159
7160 meta.func_id = func_id;
7161 /* check args */
7162 for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
7163 err = check_func_arg(env, i, &meta, fn);
7164 if (err)
7165 return err;
7166 }
7167
7168 err = record_func_map(env, &meta, func_id, insn_idx);
7169 if (err)
7170 return err;
7171
7172 err = record_func_key(env, &meta, func_id, insn_idx);
7173 if (err)
7174 return err;
7175
7176 /* Mark slots with STACK_MISC in case of raw mode, stack offset
7177 * is inferred from register state.
7178 */
7179 for (i = 0; i < meta.access_size; i++) {
7180 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
7181 BPF_WRITE, -1, false);
7182 if (err)
7183 return err;
7184 }
7185
7186 regs = cur_regs(env);
7187
7188 if (meta.uninit_dynptr_regno) {
7189 /* we write BPF_DW bits (8 bytes) at a time */
7190 for (i = 0; i < BPF_DYNPTR_SIZE; i += 8) {
7191 err = check_mem_access(env, insn_idx, meta.uninit_dynptr_regno,
7192 i, BPF_DW, BPF_WRITE, -1, false);
7193 if (err)
7194 return err;
7195 }
7196
7197 err = mark_stack_slots_dynptr(env, ®s[meta.uninit_dynptr_regno],
7198 fn->arg_type[meta.uninit_dynptr_regno - BPF_REG_1],
7199 insn_idx);
7200 if (err)
7201 return err;
7202 }
7203
7204 if (meta.release_regno) {
7205 err = -EINVAL;
7206 if (arg_type_is_dynptr(fn->arg_type[meta.release_regno - BPF_REG_1]))
7207 err = unmark_stack_slots_dynptr(env, ®s[meta.release_regno]);
7208 else if (meta.ref_obj_id)
7209 err = release_reference(env, meta.ref_obj_id);
7210 /* meta.ref_obj_id can only be 0 if register that is meant to be
7211 * released is NULL, which must be > R0.
7212 */
7213 else if (register_is_null(®s[meta.release_regno]))
7214 err = 0;
7215 if (err) {
7216 verbose(env, "func %s#%d reference has not been acquired before\n",
7217 func_id_name(func_id), func_id);
7218 return err;
7219 }
7220 }
7221
7222 switch (func_id) {
7223 case BPF_FUNC_tail_call:
7224 err = check_reference_leak(env);
7225 if (err) {
7226 verbose(env, "tail_call would lead to reference leak\n");
7227 return err;
7228 }
7229 break;
7230 case BPF_FUNC_get_local_storage:
7231 /* check that flags argument in get_local_storage(map, flags) is 0,
7232 * this is required because get_local_storage() can't return an error.
7233 */
7234 if (!register_is_null(®s[BPF_REG_2])) {
7235 verbose(env, "get_local_storage() doesn't support non-zero flags\n");
7236 return -EINVAL;
7237 }
7238 break;
7239 case BPF_FUNC_for_each_map_elem:
7240 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
7241 set_map_elem_callback_state);
7242 break;
7243 case BPF_FUNC_timer_set_callback:
7244 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
7245 set_timer_callback_state);
7246 break;
7247 case BPF_FUNC_find_vma:
7248 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
7249 set_find_vma_callback_state);
7250 break;
7251 case BPF_FUNC_snprintf:
7252 err = check_bpf_snprintf_call(env, regs);
7253 break;
7254 case BPF_FUNC_loop:
7255 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
7256 set_loop_callback_state);
7257 break;
7258 case BPF_FUNC_dynptr_from_mem:
7259 if (regs[BPF_REG_1].type != PTR_TO_MAP_VALUE) {
7260 verbose(env, "Unsupported reg type %s for bpf_dynptr_from_mem data\n",
7261 reg_type_str(env, regs[BPF_REG_1].type));
7262 return -EACCES;
7263 }
7264 break;
7265 case BPF_FUNC_panic:
> 7266 struct bpf_prog_aux *aux = env->prog->aux;
> 7267 if (!aux->destructive) {
7268 verbose(env, "bpf_panic() calls require BPF_F_DESTRUCTIVE flag\n");
7269 return -EACCES;
7270 }
7271 }
7272
7273 if (err)
7274 return err;
7275
7276 /* reset caller saved regs */
7277 for (i = 0; i < CALLER_SAVED_REGS; i++) {
7278 mark_reg_not_init(env, regs, caller_saved[i]);
7279 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
7280 }
7281
7282 /* helper call returns 64-bit value. */
7283 regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
7284
7285 /* update return register (already marked as written above) */
7286 ret_type = fn->ret_type;
7287 ret_flag = type_flag(fn->ret_type);
7288 if (ret_type == RET_INTEGER) {
7289 /* sets type to SCALAR_VALUE */
7290 mark_reg_unknown(env, regs, BPF_REG_0);
7291 } else if (ret_type == RET_VOID) {
7292 regs[BPF_REG_0].type = NOT_INIT;
7293 } else if (base_type(ret_type) == RET_PTR_TO_MAP_VALUE) {
7294 /* There is no offset yet applied, variable or fixed */
7295 mark_reg_known_zero(env, regs, BPF_REG_0);
7296 /* remember map_ptr, so that check_map_access()
7297 * can check 'value_size' boundary of memory access
7298 * to map element returned from bpf_map_lookup_elem()
7299 */
7300 if (meta.map_ptr == NULL) {
7301 verbose(env,
7302 "kernel subsystem misconfigured verifier\n");
7303 return -EINVAL;
7304 }
7305 regs[BPF_REG_0].map_ptr = meta.map_ptr;
7306 regs[BPF_REG_0].map_uid = meta.map_uid;
7307 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE | ret_flag;
7308 if (!type_may_be_null(ret_type) &&
7309 map_value_has_spin_lock(meta.map_ptr)) {
7310 regs[BPF_REG_0].id = ++env->id_gen;
7311 }
7312 } else if (base_type(ret_type) == RET_PTR_TO_SOCKET) {
7313 mark_reg_known_zero(env, regs, BPF_REG_0);
7314 regs[BPF_REG_0].type = PTR_TO_SOCKET | ret_flag;
7315 } else if (base_type(ret_type) == RET_PTR_TO_SOCK_COMMON) {
7316 mark_reg_known_zero(env, regs, BPF_REG_0);
7317 regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON | ret_flag;
7318 } else if (base_type(ret_type) == RET_PTR_TO_TCP_SOCK) {
7319 mark_reg_known_zero(env, regs, BPF_REG_0);
7320 regs[BPF_REG_0].type = PTR_TO_TCP_SOCK | ret_flag;
7321 } else if (base_type(ret_type) == RET_PTR_TO_ALLOC_MEM) {
7322 mark_reg_known_zero(env, regs, BPF_REG_0);
7323 regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
7324 regs[BPF_REG_0].mem_size = meta.mem_size;
7325 } else if (base_type(ret_type) == RET_PTR_TO_MEM_OR_BTF_ID) {
7326 const struct btf_type *t;
7327
7328 mark_reg_known_zero(env, regs, BPF_REG_0);
7329 t = btf_type_skip_modifiers(meta.ret_btf, meta.ret_btf_id, NULL);
7330 if (!btf_type_is_struct(t)) {
7331 u32 tsize;
7332 const struct btf_type *ret;
7333 const char *tname;
7334
7335 /* resolve the type size of ksym. */
7336 ret = btf_resolve_size(meta.ret_btf, t, &tsize);
7337 if (IS_ERR(ret)) {
7338 tname = btf_name_by_offset(meta.ret_btf, t->name_off);
7339 verbose(env, "unable to resolve the size of type '%s': %ld\n",
7340 tname, PTR_ERR(ret));
7341 return -EINVAL;
7342 }
7343 regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
7344 regs[BPF_REG_0].mem_size = tsize;
7345 } else {
7346 /* MEM_RDONLY may be carried from ret_flag, but it
7347 * doesn't apply on PTR_TO_BTF_ID. Fold it, otherwise
7348 * it will confuse the check of PTR_TO_BTF_ID in
7349 * check_mem_access().
7350 */
7351 ret_flag &= ~MEM_RDONLY;
7352
7353 regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
7354 regs[BPF_REG_0].btf = meta.ret_btf;
7355 regs[BPF_REG_0].btf_id = meta.ret_btf_id;
7356 }
7357 } else if (base_type(ret_type) == RET_PTR_TO_BTF_ID) {
7358 struct btf *ret_btf;
7359 int ret_btf_id;
7360
7361 mark_reg_known_zero(env, regs, BPF_REG_0);
7362 regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
7363 if (func_id == BPF_FUNC_kptr_xchg) {
7364 ret_btf = meta.kptr_off_desc->kptr.btf;
7365 ret_btf_id = meta.kptr_off_desc->kptr.btf_id;
7366 } else {
7367 ret_btf = btf_vmlinux;
7368 ret_btf_id = *fn->ret_btf_id;
7369 }
7370 if (ret_btf_id == 0) {
7371 verbose(env, "invalid return type %u of func %s#%d\n",
7372 base_type(ret_type), func_id_name(func_id),
7373 func_id);
7374 return -EINVAL;
7375 }
7376 regs[BPF_REG_0].btf = ret_btf;
7377 regs[BPF_REG_0].btf_id = ret_btf_id;
7378 } else {
7379 verbose(env, "unknown return type %u of func %s#%d\n",
7380 base_type(ret_type), func_id_name(func_id), func_id);
7381 return -EINVAL;
7382 }
7383
7384 if (type_may_be_null(regs[BPF_REG_0].type))
7385 regs[BPF_REG_0].id = ++env->id_gen;
7386
7387 if (is_ptr_cast_function(func_id)) {
7388 /* For release_reference() */
7389 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
7390 } else if (is_acquire_function(func_id, meta.map_ptr)) {
7391 int id = acquire_reference_state(env, insn_idx);
7392
7393 if (id < 0)
7394 return id;
7395 /* For mark_ptr_or_null_reg() */
7396 regs[BPF_REG_0].id = id;
7397 /* For release_reference() */
7398 regs[BPF_REG_0].ref_obj_id = id;
7399 } else if (func_id == BPF_FUNC_dynptr_data) {
7400 int dynptr_id = 0, i;
7401
7402 /* Find the id of the dynptr we're acquiring a reference to */
7403 for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
7404 if (arg_type_is_dynptr(fn->arg_type[i])) {
7405 if (dynptr_id) {
7406 verbose(env, "verifier internal error: multiple dynptr args in func\n");
7407 return -EFAULT;
7408 }
7409 dynptr_id = stack_slot_get_id(env, ®s[BPF_REG_1 + i]);
7410 }
7411 }
7412 /* For release_reference() */
7413 regs[BPF_REG_0].ref_obj_id = dynptr_id;
7414 }
7415
7416 do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
7417
7418 err = check_map_func_compatibility(env, meta.map_ptr, func_id);
7419 if (err)
7420 return err;
7421
7422 if ((func_id == BPF_FUNC_get_stack ||
7423 func_id == BPF_FUNC_get_task_stack) &&
7424 !env->prog->has_callchain_buf) {
7425 const char *err_str;
7426
--
0-DAY CI Kernel Test Service
https://01.org/lkp
^ permalink raw reply [flat|nested] 18+ messages in thread
* [RFC PATCH bpf-next 4/4] selftests/bpf: bpf_panic selftest
2022-07-11 8:32 [RFC PATCH bpf-next 0/4] bpf_panic() helper Artem Savkov
` (2 preceding siblings ...)
2022-07-11 8:32 ` [RFC PATCH bpf-next 3/4] bpf: add bpf_panic() helper Artem Savkov
@ 2022-07-11 8:32 ` Artem Savkov
2022-07-11 10:51 ` [RFC PATCH bpf-next 0/4] bpf_panic() helper Jiri Olsa
4 siblings, 0 replies; 18+ messages in thread
From: Artem Savkov @ 2022-07-11 8:32 UTC (permalink / raw)
To: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko, bpf, netdev
Cc: linux-kernel, Andrea Arcangeli, Artem Savkov
Add a selftest for bpf_panic() checking that the program will only load
if all the prerequisites are met.
Signed-off-by: Artem Savkov <asavkov@redhat.com>
---
.../selftests/bpf/prog_tests/bpf_panic.c | 144 ++++++++++++++++++
1 file changed, 144 insertions(+)
create mode 100644 tools/testing/selftests/bpf/prog_tests/bpf_panic.c
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_panic.c b/tools/testing/selftests/bpf/prog_tests/bpf_panic.c
new file mode 100644
index 000000000000..9d008c0a5140
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_panic.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Red Hat, Inc. */
+
+#include <test_progs.h>
+#include <bpf/btf.h>
+
+#include "cap_helpers.h"
+
+static int sysctl_get(const char *sysctl_path, char *old_val)
+{
+ int ret = 0;
+ FILE *fp;
+
+ fp = fopen(sysctl_path, "r");
+ if (!fp)
+ return -errno;
+
+ if (fscanf(fp, "%s", old_val) <= 0)
+ ret = -ENOENT;
+
+ fclose(fp);
+
+ return ret;
+}
+
+static int sysctl_set(const char *sysctl_path, const char *new_val)
+{
+ int ret = 0;
+ FILE *fp;
+
+ fp = fopen(sysctl_path, "w");
+ if (!fp)
+ return -errno;
+
+ if (fprintf(fp, "%s", new_val) < 0)
+ ret = -errno;
+
+ fclose(fp);
+
+ return ret;
+}
+
+static char bpf_vlog[UINT_MAX >> 8];
+
+static void test_bpf_panic_conditions(void)
+{
+ int fd_prog;
+ int map_fd;
+ struct bpf_insn prog_insns[] = {
+ BPF_LD_MAP_VALUE(BPF_REG_1, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_panic),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ const size_t prog_insn_cnt = sizeof(prog_insns) / sizeof(struct bpf_insn);
+ LIBBPF_OPTS(bpf_prog_load_opts, load_opts);
+ LIBBPF_OPTS(bpf_map_create_opts, map_create_opts);
+ int attach_btf_id;
+ __u64 save_caps = 0;
+
+ if (!ASSERT_OK(sysctl_set("/proc/sys/kernel/destructive_bpf_enabled",
+ "1"), "set destructive_bpf_enabled"))
+ return;
+
+ load_opts.log_level = 1;
+ load_opts.log_buf = bpf_vlog;
+ load_opts.log_size = sizeof(bpf_vlog);
+ load_opts.expected_attach_type = BPF_TRACE_FENTRY;
+
+ attach_btf_id = libbpf_find_vmlinux_btf_id("dentry_open", load_opts.expected_attach_type);
+ if (!ASSERT_GE(attach_btf_id, 0, "attach_btf_id"))
+ return;
+
+ load_opts.attach_btf_id = attach_btf_id;
+
+ map_create_opts.map_flags = BPF_F_RDONLY_PROG;
+ map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, &map_create_opts);
+ if (!ASSERT_GE(map_fd, 0, "bpf_map_create"))
+ return;
+ ASSERT_OK(bpf_map_freeze(map_fd), "bpf_map_freeze");
+
+ prog_insns[0].imm = map_fd;
+
+ fd_prog = bpf_prog_load(BPF_PROG_TYPE_TRACING, "bpf_panic", "GPL", prog_insns, prog_insn_cnt, &load_opts);
+
+ if (ASSERT_EQ(fd_prog, -EACCES, "BPF_F_DESTRUCTIVE required")) {
+ if (!ASSERT_OK_PTR(
+ strstr(bpf_vlog, "require BPF_F_DESTRUCTIVE"),
+ "BPF_F_DESTRUCTIVE verifier log")) {
+ printf("verifier log:\n%s\n", bpf_vlog);
+ }
+ }
+
+ load_opts.prog_flags = BPF_F_DESTRUCTIVE;
+ fd_prog = bpf_prog_load(BPF_PROG_TYPE_TRACING, "bpf_panic", "GPL", prog_insns, prog_insn_cnt, &load_opts);
+
+ if (ASSERT_GE(fd_prog, 0, "successful load")) {
+ close(fd_prog);
+ } else {
+ printf("verifier log:\n%s\n", bpf_vlog);
+ }
+
+
+ if (ASSERT_OK(cap_disable_effective(1ULL << CAP_SYS_BOOT, &save_caps), "disable caps")) {
+ fd_prog = bpf_prog_load(BPF_PROG_TYPE_TRACING, "bpf_panic", "GPL", prog_insns, prog_insn_cnt, &load_opts);
+ ASSERT_EQ(fd_prog, -EINVAL, "CAP_SYS_BOOT required");
+ if (!ASSERT_OK_PTR(
+ strstr(bpf_vlog, "unknown func bpf_panic"),
+ "CAP_SYS_BOOT verifier log")) {
+ printf("verifier log:\n%s\n", bpf_vlog);
+ }
+ cap_enable_effective(save_caps, NULL);
+ }
+
+ if (ASSERT_OK(sysctl_set("/proc/sys/kernel/destructive_bpf_enabled",
+ "0"), "unset destructive_bpf_enabled")) {
+ fd_prog = bpf_prog_load(BPF_PROG_TYPE_TRACING, "bpf_panic", "GPL", prog_insns, prog_insn_cnt, &load_opts);
+ ASSERT_EQ(fd_prog, -EINVAL, "!destructive_bpf_enabled");
+ if (!ASSERT_OK_PTR(
+ strstr(bpf_vlog, "unknown func bpf_panic"),
+ "!destructive_bpf_enabled verifier log")) {
+ printf("verifier log:\n%s\n", bpf_vlog);
+ }
+ }
+ close(map_fd);
+}
+
+void test_bpf_panic(void)
+{
+ char destructive_bpf_enabled_orig[32] = {};
+
+ if (!ASSERT_OK(sysctl_get("/proc/sys/kernel/destructive_bpf_enabled",
+ destructive_bpf_enabled_orig), "read destructive_bpf_enabled"))
+ goto cleanup;
+
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+ if (test__start_subtest("bpf_panic_conditions"))
+ test_bpf_panic_conditions();
+
+cleanup:
+ if (strlen(destructive_bpf_enabled_orig) > 0)
+ sysctl_set("/proc/sys/kernel/destructive_bpf_enabled",
+ destructive_bpf_enabled_orig);
+}
--
2.35.3
^ permalink raw reply related [flat|nested] 18+ messages in thread
* Re: [RFC PATCH bpf-next 0/4] bpf_panic() helper
2022-07-11 8:32 [RFC PATCH bpf-next 0/4] bpf_panic() helper Artem Savkov
` (3 preceding siblings ...)
2022-07-11 8:32 ` [RFC PATCH bpf-next 4/4] selftests/bpf: bpf_panic selftest Artem Savkov
@ 2022-07-11 10:51 ` Jiri Olsa
2022-08-01 13:58 ` Daniel Vacek
4 siblings, 1 reply; 18+ messages in thread
From: Jiri Olsa @ 2022-07-11 10:51 UTC (permalink / raw)
To: Artem Savkov
Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko, bpf,
netdev, linux-kernel, Andrea Arcangeli, dvacek
On Mon, Jul 11, 2022 at 10:32:16AM +0200, Artem Savkov wrote:
> eBPF is often used for kernel debugging, and one of the widely used and
> powerful debugging techniques is post-mortem debugging with a full memory dump.
> Triggering a panic at exactly the right moment allows the user to get such a
> dump and thus a better view at the system's state. This patchset adds
> bpf_panic() helper to do exactly that.
FWIW I was asked for such helper some time ago from Daniel Vacek, cc-ed
jirka
>
> I realize that even though there are multiple guards present, a helper like
> this is contrary to BPF being "safe", so this is sent as RFC to have a
> discussion on whether adding destructive capabilities is deemed acceptable.
>
> Artem Savkov (4):
> bpf: add a sysctl to enable destructive bpf helpers
> bpf: add BPF_F_DESTRUCTIVE flag for BPF_PROG_LOAD
> bpf: add bpf_panic() helper
> selftests/bpf: bpf_panic selftest
>
> include/linux/bpf.h | 8 +
> include/uapi/linux/bpf.h | 13 ++
> kernel/bpf/core.c | 1 +
> kernel/bpf/helpers.c | 13 ++
> kernel/bpf/syscall.c | 33 +++-
> kernel/bpf/verifier.c | 7 +
> kernel/trace/bpf_trace.c | 2 +
> tools/include/uapi/linux/bpf.h | 13 ++
> .../selftests/bpf/prog_tests/bpf_panic.c | 144 ++++++++++++++++++
> 9 files changed, 233 insertions(+), 1 deletion(-)
> create mode 100644 tools/testing/selftests/bpf/prog_tests/bpf_panic.c
>
> --
> 2.35.3
>
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [RFC PATCH bpf-next 0/4] bpf_panic() helper
2022-07-11 10:51 ` [RFC PATCH bpf-next 0/4] bpf_panic() helper Jiri Olsa
@ 2022-08-01 13:58 ` Daniel Vacek
0 siblings, 0 replies; 18+ messages in thread
From: Daniel Vacek @ 2022-08-01 13:58 UTC (permalink / raw)
To: Jiri Olsa
Cc: Artem Savkov, Alexei Starovoitov, Daniel Borkmann,
Andrii Nakryiko, bpf, netdev, linux-kernel, Andrea Arcangeli
On Mon, Jul 11, 2022 at 12:51 PM Jiri Olsa <olsajiri@gmail.com> wrote:
>
> On Mon, Jul 11, 2022 at 10:32:16AM +0200, Artem Savkov wrote:
> > eBPF is often used for kernel debugging, and one of the widely used and
> > powerful debugging techniques is post-mortem debugging with a full memory dump.
> > Triggering a panic at exactly the right moment allows the user to get such a
> > dump and thus a better view at the system's state. This patchset adds
> > bpf_panic() helper to do exactly that.
>
> FWIW I was asked for such helper some time ago from Daniel Vacek, cc-ed
Nice :-)
This is totally welcome. Though, IIRC, I was asking if I could do a
NULL pointer dereference within perf probe (or ftrace) back then.
Still, the outcome is similar. So kudos to Artem.
--nX
> jirka
>
> >
> > I realize that even though there are multiple guards present, a helper like
> > this is contrary to BPF being "safe", so this is sent as RFC to have a
> > discussion on whether adding destructive capabilities is deemed acceptable.
> >
> > Artem Savkov (4):
> > bpf: add a sysctl to enable destructive bpf helpers
> > bpf: add BPF_F_DESTRUCTIVE flag for BPF_PROG_LOAD
> > bpf: add bpf_panic() helper
> > selftests/bpf: bpf_panic selftest
> >
> > include/linux/bpf.h | 8 +
> > include/uapi/linux/bpf.h | 13 ++
> > kernel/bpf/core.c | 1 +
> > kernel/bpf/helpers.c | 13 ++
> > kernel/bpf/syscall.c | 33 +++-
> > kernel/bpf/verifier.c | 7 +
> > kernel/trace/bpf_trace.c | 2 +
> > tools/include/uapi/linux/bpf.h | 13 ++
> > .../selftests/bpf/prog_tests/bpf_panic.c | 144 ++++++++++++++++++
> > 9 files changed, 233 insertions(+), 1 deletion(-)
> > create mode 100644 tools/testing/selftests/bpf/prog_tests/bpf_panic.c
> >
> > --
> > 2.35.3
> >
>
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [RFC PATCH bpf-next 3/4] bpf: add bpf_panic() helper
@ 2022-07-14 20:07 kernel test robot
0 siblings, 0 replies; 18+ messages in thread
From: kernel test robot @ 2022-07-14 20:07 UTC (permalink / raw)
To: kbuild
[-- Attachment #1: Type: text/plain, Size: 31433 bytes --]
::::::
:::::: Manual check reason: "low confidence static check warning: kernel/bpf/verifier.c:7266:17: sparse: sparse: typename in expression"
::::::
CC: kbuild-all(a)lists.01.org
BCC: lkp(a)intel.com
In-Reply-To: <20220711083220.2175036-4-asavkov@redhat.com>
References: <20220711083220.2175036-4-asavkov@redhat.com>
TO: Artem Savkov <asavkov@redhat.com>
Hi Artem,
[FYI, it's a private test report for your RFC patch.]
[auto build test WARNING on bpf/master]
[also build test WARNING on net/master linus/master v5.19-rc6]
[cannot apply to bpf-next/master horms-ipvs/master net-next/master next-20220714]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Artem-Savkov/bpf_panic-helper/20220711-163442
base: https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git master
:::::: branch date: 3 days ago
:::::: commit date: 3 days ago
config: i386-randconfig-s003 (https://download.01.org/0day-ci/archive/20220715/202207150346.TnA47mD1-lkp(a)intel.com/config)
compiler: gcc-11 (Debian 11.3.0-3) 11.3.0
reproduce:
# apt-get install sparse
# sparse version: v0.6.4-39-gce1a6720-dirty
# https://github.com/intel-lab-lkp/linux/commit/8aba500c3f61dcf538c8458a34f90f81279269a2
git remote add linux-review https://github.com/intel-lab-lkp/linux
git fetch --no-tags linux-review Artem-Savkov/bpf_panic-helper/20220711-163442
git checkout 8aba500c3f61dcf538c8458a34f90f81279269a2
# save the config file
mkdir build_dir && cp config build_dir/.config
make W=1 C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' O=build_dir ARCH=i386 SHELL=/bin/bash ./ kernel/bpf/
If you fix the issue, kindly add following tag where applicable
Reported-by: kernel test robot <lkp@intel.com>
sparse warnings: (new ones prefixed by >>)
>> kernel/bpf/verifier.c:7266:17: sparse: sparse: typename in expression
kernel/bpf/verifier.c:7266:24: sparse: sparse: Expected ; at end of statement
kernel/bpf/verifier.c:7266:24: sparse: sparse: got bpf_prog_aux
kernel/bpf/verifier.c:7266:17: sparse: sparse: undefined identifier 'struct'
kernel/bpf/verifier.c:7267:22: sparse: sparse: undefined identifier 'aux'
kernel/bpf/verifier.c:14260:38: sparse: sparse: subtraction of functions? Share your drugs
vim +7266 kernel/bpf/verifier.c
9b99edcae5c80c Jiri Olsa 2021-07-14 7102
69c087ba6225b5 Yonghong Song 2021-02-26 7103 static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
69c087ba6225b5 Yonghong Song 2021-02-26 7104 int *insn_idx_p)
17a5267067f3c3 Alexei Starovoitov 2014-09-26 7105 {
17a5267067f3c3 Alexei Starovoitov 2014-09-26 7106 const struct bpf_func_proto *fn = NULL;
3c4807322660d4 Hao Luo 2021-12-16 7107 enum bpf_return_type ret_type;
c25b2ae136039f Hao Luo 2021-12-16 7108 enum bpf_type_flag ret_flag;
638f5b90d46016 Alexei Starovoitov 2017-10-31 7109 struct bpf_reg_state *regs;
33ff9823c569f3 Daniel Borkmann 2016-04-13 7110 struct bpf_call_arg_meta meta;
69c087ba6225b5 Yonghong Song 2021-02-26 7111 int insn_idx = *insn_idx_p;
969bf05eb3cedd Alexei Starovoitov 2016-05-05 7112 bool changes_data;
69c087ba6225b5 Yonghong Song 2021-02-26 7113 int i, err, func_id;
17a5267067f3c3 Alexei Starovoitov 2014-09-26 7114
17a5267067f3c3 Alexei Starovoitov 2014-09-26 7115 /* find function prototype */
69c087ba6225b5 Yonghong Song 2021-02-26 7116 func_id = insn->imm;
17a5267067f3c3 Alexei Starovoitov 2014-09-26 7117 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
61bd5218eef349 Jakub Kicinski 2017-10-09 7118 verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
61bd5218eef349 Jakub Kicinski 2017-10-09 7119 func_id);
17a5267067f3c3 Alexei Starovoitov 2014-09-26 7120 return -EINVAL;
17a5267067f3c3 Alexei Starovoitov 2014-09-26 7121 }
17a5267067f3c3 Alexei Starovoitov 2014-09-26 7122
00176a34d9e27a Jakub Kicinski 2017-10-16 7123 if (env->ops->get_func_proto)
5e43f899b03a34 Andrey Ignatov 2018-03-30 7124 fn = env->ops->get_func_proto(func_id, env->prog);
17a5267067f3c3 Alexei Starovoitov 2014-09-26 7125 if (!fn) {
61bd5218eef349 Jakub Kicinski 2017-10-09 7126 verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
61bd5218eef349 Jakub Kicinski 2017-10-09 7127 func_id);
17a5267067f3c3 Alexei Starovoitov 2014-09-26 7128 return -EINVAL;
17a5267067f3c3 Alexei Starovoitov 2014-09-26 7129 }
17a5267067f3c3 Alexei Starovoitov 2014-09-26 7130
17a5267067f3c3 Alexei Starovoitov 2014-09-26 7131 /* eBPF programs must be GPL compatible to use GPL-ed functions */
24701ecea76b0b Daniel Borkmann 2015-03-01 7132 if (!env->prog->gpl_compatible && fn->gpl_only) {
3fe2867cdf088f Daniel Borkmann 2018-06-02 7133 verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n");
17a5267067f3c3 Alexei Starovoitov 2014-09-26 7134 return -EINVAL;
17a5267067f3c3 Alexei Starovoitov 2014-09-26 7135 }
17a5267067f3c3 Alexei Starovoitov 2014-09-26 7136
eae2e83e62633a Jiri Olsa 2020-08-25 7137 if (fn->allowed && !fn->allowed(env->prog)) {
eae2e83e62633a Jiri Olsa 2020-08-25 7138 verbose(env, "helper call is not allowed in probe\n");
eae2e83e62633a Jiri Olsa 2020-08-25 7139 return -EINVAL;
eae2e83e62633a Jiri Olsa 2020-08-25 7140 }
eae2e83e62633a Jiri Olsa 2020-08-25 7141
04514d13222f2c Daniel Borkmann 2017-12-14 7142 /* With LD_ABS/IND some JITs save/restore skb from r1. */
17bedab2723145 Martin KaFai Lau 2016-12-07 7143 changes_data = bpf_helper_changes_pkt_data(fn->func);
04514d13222f2c Daniel Borkmann 2017-12-14 7144 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
04514d13222f2c Daniel Borkmann 2017-12-14 7145 verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
04514d13222f2c Daniel Borkmann 2017-12-14 7146 func_id_name(func_id), func_id);
04514d13222f2c Daniel Borkmann 2017-12-14 7147 return -EINVAL;
04514d13222f2c Daniel Borkmann 2017-12-14 7148 }
969bf05eb3cedd Alexei Starovoitov 2016-05-05 7149
33ff9823c569f3 Daniel Borkmann 2016-04-13 7150 memset(&meta, 0, sizeof(meta));
36bbef52c7eb64 Daniel Borkmann 2016-09-20 7151 meta.pkt_access = fn->pkt_access;
33ff9823c569f3 Daniel Borkmann 2016-04-13 7152
8f14852e89113d Kumar Kartikeya Dwivedi 2022-04-25 7153 err = check_func_proto(fn, func_id, &meta);
435faee1aae9c1 Daniel Borkmann 2016-04-13 7154 if (err) {
61bd5218eef349 Jakub Kicinski 2017-10-09 7155 verbose(env, "kernel subsystem misconfigured func %s#%d\n",
ebb676daa1a340 Thomas Graf 2016-10-27 7156 func_id_name(func_id), func_id);
435faee1aae9c1 Daniel Borkmann 2016-04-13 7157 return err;
435faee1aae9c1 Daniel Borkmann 2016-04-13 7158 }
435faee1aae9c1 Daniel Borkmann 2016-04-13 7159
d83525ca62cf8e Alexei Starovoitov 2019-01-31 7160 meta.func_id = func_id;
17a5267067f3c3 Alexei Starovoitov 2014-09-26 7161 /* check args */
523a4cf491b3c9 Dmitrii Banshchikov 2021-02-26 7162 for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
af7ec13833619e Yonghong Song 2020-06-23 7163 err = check_func_arg(env, i, &meta, fn);
17a5267067f3c3 Alexei Starovoitov 2014-09-26 7164 if (err)
17a5267067f3c3 Alexei Starovoitov 2014-09-26 7165 return err;
a7658e1a4164ce Alexei Starovoitov 2019-10-15 7166 }
17a5267067f3c3 Alexei Starovoitov 2014-09-26 7167
c93552c443ebc6 Daniel Borkmann 2018-05-24 7168 err = record_func_map(env, &meta, func_id, insn_idx);
c93552c443ebc6 Daniel Borkmann 2018-05-24 7169 if (err)
c93552c443ebc6 Daniel Borkmann 2018-05-24 7170 return err;
c93552c443ebc6 Daniel Borkmann 2018-05-24 7171
d2e4c1e6c29472 Daniel Borkmann 2019-11-22 7172 err = record_func_key(env, &meta, func_id, insn_idx);
d2e4c1e6c29472 Daniel Borkmann 2019-11-22 7173 if (err)
d2e4c1e6c29472 Daniel Borkmann 2019-11-22 7174 return err;
d2e4c1e6c29472 Daniel Borkmann 2019-11-22 7175
435faee1aae9c1 Daniel Borkmann 2016-04-13 7176 /* Mark slots with STACK_MISC in case of raw mode, stack offset
435faee1aae9c1 Daniel Borkmann 2016-04-13 7177 * is inferred from register state.
435faee1aae9c1 Daniel Borkmann 2016-04-13 7178 */
435faee1aae9c1 Daniel Borkmann 2016-04-13 7179 for (i = 0; i < meta.access_size; i++) {
ca36960211eb22 Daniel Borkmann 2018-02-23 7180 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
ca36960211eb22 Daniel Borkmann 2018-02-23 7181 BPF_WRITE, -1, false);
435faee1aae9c1 Daniel Borkmann 2016-04-13 7182 if (err)
435faee1aae9c1 Daniel Borkmann 2016-04-13 7183 return err;
435faee1aae9c1 Daniel Borkmann 2016-04-13 7184 }
435faee1aae9c1 Daniel Borkmann 2016-04-13 7185
8f14852e89113d Kumar Kartikeya Dwivedi 2022-04-25 7186 regs = cur_regs(env);
8f14852e89113d Kumar Kartikeya Dwivedi 2022-04-25 7187
97e03f521050c0 Joanne Koong 2022-05-23 7188 if (meta.uninit_dynptr_regno) {
97e03f521050c0 Joanne Koong 2022-05-23 7189 /* we write BPF_DW bits (8 bytes)@a time */
97e03f521050c0 Joanne Koong 2022-05-23 7190 for (i = 0; i < BPF_DYNPTR_SIZE; i += 8) {
97e03f521050c0 Joanne Koong 2022-05-23 7191 err = check_mem_access(env, insn_idx, meta.uninit_dynptr_regno,
97e03f521050c0 Joanne Koong 2022-05-23 7192 i, BPF_DW, BPF_WRITE, -1, false);
97e03f521050c0 Joanne Koong 2022-05-23 7193 if (err)
97e03f521050c0 Joanne Koong 2022-05-23 7194 return err;
97e03f521050c0 Joanne Koong 2022-05-23 7195 }
97e03f521050c0 Joanne Koong 2022-05-23 7196
97e03f521050c0 Joanne Koong 2022-05-23 7197 err = mark_stack_slots_dynptr(env, ®s[meta.uninit_dynptr_regno],
97e03f521050c0 Joanne Koong 2022-05-23 7198 fn->arg_type[meta.uninit_dynptr_regno - BPF_REG_1],
97e03f521050c0 Joanne Koong 2022-05-23 7199 insn_idx);
97e03f521050c0 Joanne Koong 2022-05-23 7200 if (err)
97e03f521050c0 Joanne Koong 2022-05-23 7201 return err;
97e03f521050c0 Joanne Koong 2022-05-23 7202 }
97e03f521050c0 Joanne Koong 2022-05-23 7203
8f14852e89113d Kumar Kartikeya Dwivedi 2022-04-25 7204 if (meta.release_regno) {
8f14852e89113d Kumar Kartikeya Dwivedi 2022-04-25 7205 err = -EINVAL;
97e03f521050c0 Joanne Koong 2022-05-23 7206 if (arg_type_is_dynptr(fn->arg_type[meta.release_regno - BPF_REG_1]))
97e03f521050c0 Joanne Koong 2022-05-23 7207 err = unmark_stack_slots_dynptr(env, ®s[meta.release_regno]);
97e03f521050c0 Joanne Koong 2022-05-23 7208 else if (meta.ref_obj_id)
1b986589680a2a Martin KaFai Lau 2019-03-12 7209 err = release_reference(env, meta.ref_obj_id);
8f14852e89113d Kumar Kartikeya Dwivedi 2022-04-25 7210 /* meta.ref_obj_id can only be 0 if register that is meant to be
8f14852e89113d Kumar Kartikeya Dwivedi 2022-04-25 7211 * released is NULL, which must be > R0.
8f14852e89113d Kumar Kartikeya Dwivedi 2022-04-25 7212 */
8f14852e89113d Kumar Kartikeya Dwivedi 2022-04-25 7213 else if (register_is_null(®s[meta.release_regno]))
8f14852e89113d Kumar Kartikeya Dwivedi 2022-04-25 7214 err = 0;
46f8bc92758c62 Martin KaFai Lau 2019-02-09 7215 if (err) {
46f8bc92758c62 Martin KaFai Lau 2019-02-09 7216 verbose(env, "func %s#%d reference has not been acquired before\n",
46f8bc92758c62 Martin KaFai Lau 2019-02-09 7217 func_id_name(func_id), func_id);
fd978bf7fd3125 Joe Stringer 2018-10-02 7218 return err;
fd978bf7fd3125 Joe Stringer 2018-10-02 7219 }
46f8bc92758c62 Martin KaFai Lau 2019-02-09 7220 }
fd978bf7fd3125 Joe Stringer 2018-10-02 7221
e6f2dd0f80674e Joanne Koong 2021-11-29 7222 switch (func_id) {
e6f2dd0f80674e Joanne Koong 2021-11-29 7223 case BPF_FUNC_tail_call:
e6f2dd0f80674e Joanne Koong 2021-11-29 7224 err = check_reference_leak(env);
e6f2dd0f80674e Joanne Koong 2021-11-29 7225 if (err) {
e6f2dd0f80674e Joanne Koong 2021-11-29 7226 verbose(env, "tail_call would lead to reference leak\n");
e6f2dd0f80674e Joanne Koong 2021-11-29 7227 return err;
e6f2dd0f80674e Joanne Koong 2021-11-29 7228 }
e6f2dd0f80674e Joanne Koong 2021-11-29 7229 break;
e6f2dd0f80674e Joanne Koong 2021-11-29 7230 case BPF_FUNC_get_local_storage:
cd339431765383 Roman Gushchin 2018-08-02 7231 /* check that flags argument in get_local_storage(map, flags) is 0,
cd339431765383 Roman Gushchin 2018-08-02 7232 * this is required because get_local_storage() can't return an error.
cd339431765383 Roman Gushchin 2018-08-02 7233 */
e6f2dd0f80674e Joanne Koong 2021-11-29 7234 if (!register_is_null(®s[BPF_REG_2])) {
cd339431765383 Roman Gushchin 2018-08-02 7235 verbose(env, "get_local_storage() doesn't support non-zero flags\n");
cd339431765383 Roman Gushchin 2018-08-02 7236 return -EINVAL;
cd339431765383 Roman Gushchin 2018-08-02 7237 }
e6f2dd0f80674e Joanne Koong 2021-11-29 7238 break;
e6f2dd0f80674e Joanne Koong 2021-11-29 7239 case BPF_FUNC_for_each_map_elem:
69c087ba6225b5 Yonghong Song 2021-02-26 7240 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
69c087ba6225b5 Yonghong Song 2021-02-26 7241 set_map_elem_callback_state);
e6f2dd0f80674e Joanne Koong 2021-11-29 7242 break;
e6f2dd0f80674e Joanne Koong 2021-11-29 7243 case BPF_FUNC_timer_set_callback:
b00628b1c7d595 Alexei Starovoitov 2021-07-14 7244 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
b00628b1c7d595 Alexei Starovoitov 2021-07-14 7245 set_timer_callback_state);
e6f2dd0f80674e Joanne Koong 2021-11-29 7246 break;
e6f2dd0f80674e Joanne Koong 2021-11-29 7247 case BPF_FUNC_find_vma:
7c7e3d31e7856a Song Liu 2021-11-05 7248 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
7c7e3d31e7856a Song Liu 2021-11-05 7249 set_find_vma_callback_state);
e6f2dd0f80674e Joanne Koong 2021-11-29 7250 break;
e6f2dd0f80674e Joanne Koong 2021-11-29 7251 case BPF_FUNC_snprintf:
e6f2dd0f80674e Joanne Koong 2021-11-29 7252 err = check_bpf_snprintf_call(env, regs);
e6f2dd0f80674e Joanne Koong 2021-11-29 7253 break;
e6f2dd0f80674e Joanne Koong 2021-11-29 7254 case BPF_FUNC_loop:
e6f2dd0f80674e Joanne Koong 2021-11-29 7255 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
e6f2dd0f80674e Joanne Koong 2021-11-29 7256 set_loop_callback_state);
e6f2dd0f80674e Joanne Koong 2021-11-29 7257 break;
263ae152e96253 Joanne Koong 2022-05-23 7258 case BPF_FUNC_dynptr_from_mem:
263ae152e96253 Joanne Koong 2022-05-23 7259 if (regs[BPF_REG_1].type != PTR_TO_MAP_VALUE) {
263ae152e96253 Joanne Koong 2022-05-23 7260 verbose(env, "Unsupported reg type %s for bpf_dynptr_from_mem data\n",
263ae152e96253 Joanne Koong 2022-05-23 7261 reg_type_str(env, regs[BPF_REG_1].type));
263ae152e96253 Joanne Koong 2022-05-23 7262 return -EACCES;
263ae152e96253 Joanne Koong 2022-05-23 7263 }
8aba500c3f61dc Artem Savkov 2022-07-11 7264 break;
8aba500c3f61dc Artem Savkov 2022-07-11 7265 case BPF_FUNC_panic:
8aba500c3f61dc Artem Savkov 2022-07-11 @7266 struct bpf_prog_aux *aux = env->prog->aux;
8aba500c3f61dc Artem Savkov 2022-07-11 7267 if (!aux->destructive) {
8aba500c3f61dc Artem Savkov 2022-07-11 7268 verbose(env, "bpf_panic() calls require BPF_F_DESTRUCTIVE flag\n");
8aba500c3f61dc Artem Savkov 2022-07-11 7269 return -EACCES;
8aba500c3f61dc Artem Savkov 2022-07-11 7270 }
7c7e3d31e7856a Song Liu 2021-11-05 7271 }
7c7e3d31e7856a Song Liu 2021-11-05 7272
e6f2dd0f80674e Joanne Koong 2021-11-29 7273 if (err)
7b15523a989b63 Florent Revest 2021-04-19 7274 return err;
7b15523a989b63 Florent Revest 2021-04-19 7275
17a5267067f3c3 Alexei Starovoitov 2014-09-26 7276 /* reset caller saved regs */
dc503a8ad98474 Edward Cree 2017-08-15 7277 for (i = 0; i < CALLER_SAVED_REGS; i++) {
61bd5218eef349 Jakub Kicinski 2017-10-09 7278 mark_reg_not_init(env, regs, caller_saved[i]);
dc503a8ad98474 Edward Cree 2017-08-15 7279 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
dc503a8ad98474 Edward Cree 2017-08-15 7280 }
17a5267067f3c3 Alexei Starovoitov 2014-09-26 7281
5327ed3d44b754 Jiong Wang 2019-05-24 7282 /* helper call returns 64-bit value. */
5327ed3d44b754 Jiong Wang 2019-05-24 7283 regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
5327ed3d44b754 Jiong Wang 2019-05-24 7284
dc503a8ad98474 Edward Cree 2017-08-15 7285 /* update return register (already marked as written above) */
3c4807322660d4 Hao Luo 2021-12-16 7286 ret_type = fn->ret_type;
c25b2ae136039f Hao Luo 2021-12-16 7287 ret_flag = type_flag(fn->ret_type);
3c4807322660d4 Hao Luo 2021-12-16 7288 if (ret_type == RET_INTEGER) {
f1174f77b50c94 Edward Cree 2017-08-07 7289 /* sets type to SCALAR_VALUE */
61bd5218eef349 Jakub Kicinski 2017-10-09 7290 mark_reg_unknown(env, regs, BPF_REG_0);
3c4807322660d4 Hao Luo 2021-12-16 7291 } else if (ret_type == RET_VOID) {
17a5267067f3c3 Alexei Starovoitov 2014-09-26 7292 regs[BPF_REG_0].type = NOT_INIT;
3c4807322660d4 Hao Luo 2021-12-16 7293 } else if (base_type(ret_type) == RET_PTR_TO_MAP_VALUE) {
f1174f77b50c94 Edward Cree 2017-08-07 7294 /* There is no offset yet applied, variable or fixed */
61bd5218eef349 Jakub Kicinski 2017-10-09 7295 mark_reg_known_zero(env, regs, BPF_REG_0);
17a5267067f3c3 Alexei Starovoitov 2014-09-26 7296 /* remember map_ptr, so that check_map_access()
17a5267067f3c3 Alexei Starovoitov 2014-09-26 7297 * can check 'value_size' boundary of memory access
17a5267067f3c3 Alexei Starovoitov 2014-09-26 7298 * to map element returned from bpf_map_lookup_elem()
17a5267067f3c3 Alexei Starovoitov 2014-09-26 7299 */
33ff9823c569f3 Daniel Borkmann 2016-04-13 7300 if (meta.map_ptr == NULL) {
61bd5218eef349 Jakub Kicinski 2017-10-09 7301 verbose(env,
61bd5218eef349 Jakub Kicinski 2017-10-09 7302 "kernel subsystem misconfigured verifier\n");
17a5267067f3c3 Alexei Starovoitov 2014-09-26 7303 return -EINVAL;
17a5267067f3c3 Alexei Starovoitov 2014-09-26 7304 }
33ff9823c569f3 Daniel Borkmann 2016-04-13 7305 regs[BPF_REG_0].map_ptr = meta.map_ptr;
3e8ce29850f183 Alexei Starovoitov 2021-07-14 7306 regs[BPF_REG_0].map_uid = meta.map_uid;
c25b2ae136039f Hao Luo 2021-12-16 7307 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE | ret_flag;
c25b2ae136039f Hao Luo 2021-12-16 7308 if (!type_may_be_null(ret_type) &&
c25b2ae136039f Hao Luo 2021-12-16 7309 map_value_has_spin_lock(meta.map_ptr)) {
e16d2f1ab96849 Alexei Starovoitov 2019-01-31 7310 regs[BPF_REG_0].id = ++env->id_gen;
4d31f30148cea6 Daniel Borkmann 2018-11-01 7311 }
3c4807322660d4 Hao Luo 2021-12-16 7312 } else if (base_type(ret_type) == RET_PTR_TO_SOCKET) {
46f8bc92758c62 Martin KaFai Lau 2019-02-09 7313 mark_reg_known_zero(env, regs, BPF_REG_0);
c25b2ae136039f Hao Luo 2021-12-16 7314 regs[BPF_REG_0].type = PTR_TO_SOCKET | ret_flag;
3c4807322660d4 Hao Luo 2021-12-16 7315 } else if (base_type(ret_type) == RET_PTR_TO_SOCK_COMMON) {
85a51f8c28b981 Lorenz Bauer 2019-03-22 7316 mark_reg_known_zero(env, regs, BPF_REG_0);
c25b2ae136039f Hao Luo 2021-12-16 7317 regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON | ret_flag;
3c4807322660d4 Hao Luo 2021-12-16 7318 } else if (base_type(ret_type) == RET_PTR_TO_TCP_SOCK) {
655a51e536c09d Martin KaFai Lau 2019-02-09 7319 mark_reg_known_zero(env, regs, BPF_REG_0);
c25b2ae136039f Hao Luo 2021-12-16 7320 regs[BPF_REG_0].type = PTR_TO_TCP_SOCK | ret_flag;
3c4807322660d4 Hao Luo 2021-12-16 7321 } else if (base_type(ret_type) == RET_PTR_TO_ALLOC_MEM) {
457f44363a8894 Andrii Nakryiko 2020-05-29 7322 mark_reg_known_zero(env, regs, BPF_REG_0);
c25b2ae136039f Hao Luo 2021-12-16 7323 regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
457f44363a8894 Andrii Nakryiko 2020-05-29 7324 regs[BPF_REG_0].mem_size = meta.mem_size;
3c4807322660d4 Hao Luo 2021-12-16 7325 } else if (base_type(ret_type) == RET_PTR_TO_MEM_OR_BTF_ID) {
eaa6bcb71ef6ed Hao Luo 2020-09-29 7326 const struct btf_type *t;
eaa6bcb71ef6ed Hao Luo 2020-09-29 7327
eaa6bcb71ef6ed Hao Luo 2020-09-29 7328 mark_reg_known_zero(env, regs, BPF_REG_0);
22dc4a0f5ed11b Andrii Nakryiko 2020-12-03 7329 t = btf_type_skip_modifiers(meta.ret_btf, meta.ret_btf_id, NULL);
eaa6bcb71ef6ed Hao Luo 2020-09-29 7330 if (!btf_type_is_struct(t)) {
eaa6bcb71ef6ed Hao Luo 2020-09-29 7331 u32 tsize;
eaa6bcb71ef6ed Hao Luo 2020-09-29 7332 const struct btf_type *ret;
eaa6bcb71ef6ed Hao Luo 2020-09-29 7333 const char *tname;
eaa6bcb71ef6ed Hao Luo 2020-09-29 7334
eaa6bcb71ef6ed Hao Luo 2020-09-29 7335 /* resolve the type size of ksym. */
22dc4a0f5ed11b Andrii Nakryiko 2020-12-03 7336 ret = btf_resolve_size(meta.ret_btf, t, &tsize);
eaa6bcb71ef6ed Hao Luo 2020-09-29 7337 if (IS_ERR(ret)) {
22dc4a0f5ed11b Andrii Nakryiko 2020-12-03 7338 tname = btf_name_by_offset(meta.ret_btf, t->name_off);
eaa6bcb71ef6ed Hao Luo 2020-09-29 7339 verbose(env, "unable to resolve the size of type '%s': %ld\n",
eaa6bcb71ef6ed Hao Luo 2020-09-29 7340 tname, PTR_ERR(ret));
eaa6bcb71ef6ed Hao Luo 2020-09-29 7341 return -EINVAL;
eaa6bcb71ef6ed Hao Luo 2020-09-29 7342 }
c25b2ae136039f Hao Luo 2021-12-16 7343 regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
eaa6bcb71ef6ed Hao Luo 2020-09-29 7344 regs[BPF_REG_0].mem_size = tsize;
eaa6bcb71ef6ed Hao Luo 2020-09-29 7345 } else {
34d3a78c681e8e Hao Luo 2021-12-16 7346 /* MEM_RDONLY may be carried from ret_flag, but it
34d3a78c681e8e Hao Luo 2021-12-16 7347 * doesn't apply on PTR_TO_BTF_ID. Fold it, otherwise
34d3a78c681e8e Hao Luo 2021-12-16 7348 * it will confuse the check of PTR_TO_BTF_ID in
34d3a78c681e8e Hao Luo 2021-12-16 7349 * check_mem_access().
34d3a78c681e8e Hao Luo 2021-12-16 7350 */
34d3a78c681e8e Hao Luo 2021-12-16 7351 ret_flag &= ~MEM_RDONLY;
34d3a78c681e8e Hao Luo 2021-12-16 7352
c25b2ae136039f Hao Luo 2021-12-16 7353 regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
22dc4a0f5ed11b Andrii Nakryiko 2020-12-03 7354 regs[BPF_REG_0].btf = meta.ret_btf;
eaa6bcb71ef6ed Hao Luo 2020-09-29 7355 regs[BPF_REG_0].btf_id = meta.ret_btf_id;
eaa6bcb71ef6ed Hao Luo 2020-09-29 7356 }
3c4807322660d4 Hao Luo 2021-12-16 7357 } else if (base_type(ret_type) == RET_PTR_TO_BTF_ID) {
c0a5a21c25f37c Kumar Kartikeya Dwivedi 2022-04-25 7358 struct btf *ret_btf;
af7ec13833619e Yonghong Song 2020-06-23 7359 int ret_btf_id;
af7ec13833619e Yonghong Song 2020-06-23 7360
af7ec13833619e Yonghong Song 2020-06-23 7361 mark_reg_known_zero(env, regs, BPF_REG_0);
c25b2ae136039f Hao Luo 2021-12-16 7362 regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
c0a5a21c25f37c Kumar Kartikeya Dwivedi 2022-04-25 7363 if (func_id == BPF_FUNC_kptr_xchg) {
c0a5a21c25f37c Kumar Kartikeya Dwivedi 2022-04-25 7364 ret_btf = meta.kptr_off_desc->kptr.btf;
c0a5a21c25f37c Kumar Kartikeya Dwivedi 2022-04-25 7365 ret_btf_id = meta.kptr_off_desc->kptr.btf_id;
c0a5a21c25f37c Kumar Kartikeya Dwivedi 2022-04-25 7366 } else {
c0a5a21c25f37c Kumar Kartikeya Dwivedi 2022-04-25 7367 ret_btf = btf_vmlinux;
af7ec13833619e Yonghong Song 2020-06-23 7368 ret_btf_id = *fn->ret_btf_id;
c0a5a21c25f37c Kumar Kartikeya Dwivedi 2022-04-25 7369 }
af7ec13833619e Yonghong Song 2020-06-23 7370 if (ret_btf_id == 0) {
3c4807322660d4 Hao Luo 2021-12-16 7371 verbose(env, "invalid return type %u of func %s#%d\n",
3c4807322660d4 Hao Luo 2021-12-16 7372 base_type(ret_type), func_id_name(func_id),
3c4807322660d4 Hao Luo 2021-12-16 7373 func_id);
af7ec13833619e Yonghong Song 2020-06-23 7374 return -EINVAL;
af7ec13833619e Yonghong Song 2020-06-23 7375 }
c0a5a21c25f37c Kumar Kartikeya Dwivedi 2022-04-25 7376 regs[BPF_REG_0].btf = ret_btf;
af7ec13833619e Yonghong Song 2020-06-23 7377 regs[BPF_REG_0].btf_id = ret_btf_id;
17a5267067f3c3 Alexei Starovoitov 2014-09-26 7378 } else {
3c4807322660d4 Hao Luo 2021-12-16 7379 verbose(env, "unknown return type %u of func %s#%d\n",
3c4807322660d4 Hao Luo 2021-12-16 7380 base_type(ret_type), func_id_name(func_id), func_id);
17a5267067f3c3 Alexei Starovoitov 2014-09-26 7381 return -EINVAL;
17a5267067f3c3 Alexei Starovoitov 2014-09-26 7382 }
04fd61ab36ec06 Alexei Starovoitov 2015-05-19 7383
c25b2ae136039f Hao Luo 2021-12-16 7384 if (type_may_be_null(regs[BPF_REG_0].type))
93c230e3f5bd6e Martin KaFai Lau 2020-10-19 7385 regs[BPF_REG_0].id = ++env->id_gen;
93c230e3f5bd6e Martin KaFai Lau 2020-10-19 7386
0f3adc288df8ba Lorenz Bauer 2019-03-22 7387 if (is_ptr_cast_function(func_id)) {
1b986589680a2a Martin KaFai Lau 2019-03-12 7388 /* For release_reference() */
1b986589680a2a Martin KaFai Lau 2019-03-12 7389 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
64d85290d79c06 Jakub Sitnicki 2020-04-29 7390 } else if (is_acquire_function(func_id, meta.map_ptr)) {
0f3adc288df8ba Lorenz Bauer 2019-03-22 7391 int id = acquire_reference_state(env, insn_idx);
0f3adc288df8ba Lorenz Bauer 2019-03-22 7392
0f3adc288df8ba Lorenz Bauer 2019-03-22 7393 if (id < 0)
0f3adc288df8ba Lorenz Bauer 2019-03-22 7394 return id;
0f3adc288df8ba Lorenz Bauer 2019-03-22 7395 /* For mark_ptr_or_null_reg() */
0f3adc288df8ba Lorenz Bauer 2019-03-22 7396 regs[BPF_REG_0].id = id;
0f3adc288df8ba Lorenz Bauer 2019-03-22 7397 /* For release_reference() */
0f3adc288df8ba Lorenz Bauer 2019-03-22 7398 regs[BPF_REG_0].ref_obj_id = id;
34d4ef5775f776 Joanne Koong 2022-05-23 7399 } else if (func_id == BPF_FUNC_dynptr_data) {
34d4ef5775f776 Joanne Koong 2022-05-23 7400 int dynptr_id = 0, i;
34d4ef5775f776 Joanne Koong 2022-05-23 7401
34d4ef5775f776 Joanne Koong 2022-05-23 7402 /* Find the id of the dynptr we're acquiring a reference to */
34d4ef5775f776 Joanne Koong 2022-05-23 7403 for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
34d4ef5775f776 Joanne Koong 2022-05-23 7404 if (arg_type_is_dynptr(fn->arg_type[i])) {
34d4ef5775f776 Joanne Koong 2022-05-23 7405 if (dynptr_id) {
34d4ef5775f776 Joanne Koong 2022-05-23 7406 verbose(env, "verifier internal error: multiple dynptr args in func\n");
34d4ef5775f776 Joanne Koong 2022-05-23 7407 return -EFAULT;
34d4ef5775f776 Joanne Koong 2022-05-23 7408 }
34d4ef5775f776 Joanne Koong 2022-05-23 7409 dynptr_id = stack_slot_get_id(env, ®s[BPF_REG_1 + i]);
34d4ef5775f776 Joanne Koong 2022-05-23 7410 }
34d4ef5775f776 Joanne Koong 2022-05-23 7411 }
34d4ef5775f776 Joanne Koong 2022-05-23 7412 /* For release_reference() */
34d4ef5775f776 Joanne Koong 2022-05-23 7413 regs[BPF_REG_0].ref_obj_id = dynptr_id;
0f3adc288df8ba Lorenz Bauer 2019-03-22 7414 }
1b986589680a2a Martin KaFai Lau 2019-03-12 7415
849fa50662fbc8 Yonghong Song 2018-04-28 7416 do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
849fa50662fbc8 Yonghong Song 2018-04-28 7417
61bd5218eef349 Jakub Kicinski 2017-10-09 7418 err = check_map_func_compatibility(env, meta.map_ptr, func_id);
35578d79840030 Kaixu Xia 2015-08-06 7419 if (err)
35578d79840030 Kaixu Xia 2015-08-06 7420 return err;
04fd61ab36ec06 Alexei Starovoitov 2015-05-19 7421
fa28dcb82a38f8 Song Liu 2020-06-29 7422 if ((func_id == BPF_FUNC_get_stack ||
fa28dcb82a38f8 Song Liu 2020-06-29 7423 func_id == BPF_FUNC_get_task_stack) &&
fa28dcb82a38f8 Song Liu 2020-06-29 7424 !env->prog->has_callchain_buf) {
c195651e565ae7 Yonghong Song 2018-04-28 7425 const char *err_str;
c195651e565ae7 Yonghong Song 2018-04-28 7426
--
0-DAY CI Kernel Test Service
https://01.org/lkp
^ permalink raw reply [flat|nested] 18+ messages in thread
end of thread, other threads:[~2022-08-01 13:59 UTC | newest]
Thread overview: 18+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-07-11 8:32 [RFC PATCH bpf-next 0/4] bpf_panic() helper Artem Savkov
2022-07-11 8:32 ` [RFC PATCH bpf-next 1/4] bpf: add a sysctl to enable destructive bpf helpers Artem Savkov
2022-07-11 8:32 ` [RFC PATCH bpf-next 2/4] bpf: add BPF_F_DESTRUCTIVE flag for BPF_PROG_LOAD Artem Savkov
2022-07-11 10:56 ` Jiri Olsa
2022-07-11 11:48 ` Artem Savkov
2022-07-11 8:32 ` [RFC PATCH bpf-next 3/4] bpf: add bpf_panic() helper Artem Savkov
2022-07-11 10:42 ` Jiri Olsa
2022-07-12 17:53 ` Song Liu
2022-07-12 18:08 ` Alexei Starovoitov
2022-07-13 13:31 ` Artem Savkov
2022-07-13 22:20 ` Alexei Starovoitov
2022-07-15 12:52 ` Artem Savkov
2022-07-18 21:01 ` Alexei Starovoitov
2022-07-14 17:03 ` kernel test robot
2022-07-11 8:32 ` [RFC PATCH bpf-next 4/4] selftests/bpf: bpf_panic selftest Artem Savkov
2022-07-11 10:51 ` [RFC PATCH bpf-next 0/4] bpf_panic() helper Jiri Olsa
2022-08-01 13:58 ` Daniel Vacek
2022-07-14 20:07 [RFC PATCH bpf-next 3/4] bpf: add " kernel test robot
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.