* [PATCH v4 bpf-next 0/2] bpf: add cg_skb_is_valid_access
@ 2018-10-18 16:06 Song Liu
2018-10-18 16:06 ` [PATCH v4 bpf-next 1/2] bpf: add cg_skb_is_valid_access for BPF_PROG_TYPE_CGROUP_SKB Song Liu
` (2 more replies)
0 siblings, 3 replies; 8+ messages in thread
From: Song Liu @ 2018-10-18 16:06 UTC (permalink / raw)
To: netdev; +Cc: ast, daniel, kernel-team, Song Liu
Changes v3 -> v4:
1. Fixed crash issue reported by Alexei.
Changes v2 -> v3:
1. Added helper function bpf_compute_and_save_data_pointers() and
bpf_restore_data_pointers().
Changes v1 -> v2:
1. Updated the list of read-only fields, and read-write fields.
2. Added dummy sk to bpf_prog_test_run_skb().
This set enables BPF program of type BPF_PROG_TYPE_CGROUP_SKB to access
some __skb_buff data directly.
Song Liu (2):
bpf: add cg_skb_is_valid_access for BPF_PROG_TYPE_CGROUP_SKB
bpf: add tests for direct packet access from CGROUP_SKB
include/linux/filter.h | 24 +++
kernel/bpf/cgroup.c | 6 +
net/bpf/test_run.c | 7 +
net/core/filter.c | 36 ++++-
tools/testing/selftests/bpf/test_verifier.c | 170 ++++++++++++++++++++
5 files changed, 242 insertions(+), 1 deletion(-)
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH v4 bpf-next 1/2] bpf: add cg_skb_is_valid_access for BPF_PROG_TYPE_CGROUP_SKB
2018-10-18 16:06 [PATCH v4 bpf-next 0/2] bpf: add cg_skb_is_valid_access Song Liu
@ 2018-10-18 16:06 ` Song Liu
2018-10-19 0:14 ` Daniel Borkmann
2018-10-18 16:06 ` [PATCH v4 bpf-next 2/2] bpf: add tests for direct packet access from CGROUP_SKB Song Liu
2018-10-18 23:34 ` [PATCH v4 bpf-next 0/2] bpf: add cg_skb_is_valid_access Alexei Starovoitov
2 siblings, 1 reply; 8+ messages in thread
From: Song Liu @ 2018-10-18 16:06 UTC (permalink / raw)
To: netdev; +Cc: ast, daniel, kernel-team, Song Liu
BPF programs of BPF_PROG_TYPE_CGROUP_SKB need to access headers in the
skb. This patch enables direct access of skb for these programs.
Two helper functions bpf_compute_and_save_data_pointers() and
bpf_restore_data_pointers() are introduced. There are used in
__cgroup_bpf_run_filter_skb(), to compute proper data_end for the
BPF program, and restore original data afterwards.
Signed-off-by: Song Liu <songliubraving@fb.com>
---
include/linux/filter.h | 24 ++++++++++++++++++++++++
kernel/bpf/cgroup.c | 6 ++++++
net/core/filter.c | 36 +++++++++++++++++++++++++++++++++++-
3 files changed, 65 insertions(+), 1 deletion(-)
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 5771874bc01e..96b3ee7f14c9 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -548,6 +548,30 @@ static inline void bpf_compute_data_pointers(struct sk_buff *skb)
cb->data_end = skb->data + skb_headlen(skb);
}
+/* Similar to bpf_compute_data_pointers(), except that save orginal
+ * data in cb->data and cb->meta_data for restore.
+ */
+static inline void bpf_compute_and_save_data_pointers(
+ struct sk_buff *skb, void *saved_pointers[2])
+{
+ struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
+
+ saved_pointers[0] = cb->data_meta;
+ saved_pointers[1] = cb->data_end;
+ cb->data_meta = skb->data - skb_metadata_len(skb);
+ cb->data_end = skb->data + skb_headlen(skb);
+}
+
+/* Restore data saved by bpf_compute_data_pointers(). */
+static inline void bpf_restore_data_pointers(
+ struct sk_buff *skb, void *saved_pointers[2])
+{
+ struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
+
+ cb->data_meta = saved_pointers[0];
+ cb->data_end = saved_pointers[1];;
+}
+
static inline u8 *bpf_skb_cb(struct sk_buff *skb)
{
/* eBPF programs may read/write skb->cb[] area to transfer meta
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index 00f6ed2e4f9a..5f5180104ddc 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -554,6 +554,7 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
unsigned int offset = skb->data - skb_network_header(skb);
struct sock *save_sk;
struct cgroup *cgrp;
+ void *saved_pointers[2];
int ret;
if (!sk || !sk_fullsock(sk))
@@ -566,8 +567,13 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
save_sk = skb->sk;
skb->sk = sk;
__skb_push(skb, offset);
+
+ /* compute pointers for the bpf prog */
+ bpf_compute_and_save_data_pointers(skb, saved_pointers);
+
ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,
bpf_prog_run_save_cb);
+ bpf_restore_data_pointers(skb, saved_pointers);
__skb_pull(skb, offset);
skb->sk = save_sk;
return ret == 1 ? 0 : -EPERM;
diff --git a/net/core/filter.c b/net/core/filter.c
index 1a3ac6c46873..e3ca30bd6840 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -5346,6 +5346,40 @@ static bool sk_filter_is_valid_access(int off, int size,
return bpf_skb_is_valid_access(off, size, type, prog, info);
}
+static bool cg_skb_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ const struct bpf_prog *prog,
+ struct bpf_insn_access_aux *info)
+{
+ switch (off) {
+ case bpf_ctx_range(struct __sk_buff, tc_classid):
+ case bpf_ctx_range(struct __sk_buff, data_meta):
+ case bpf_ctx_range(struct __sk_buff, flow_keys):
+ return false;
+ }
+ if (type == BPF_WRITE) {
+ switch (off) {
+ case bpf_ctx_range(struct __sk_buff, mark):
+ case bpf_ctx_range(struct __sk_buff, priority):
+ case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
+ break;
+ default:
+ return false;
+ }
+ }
+
+ switch (off) {
+ case bpf_ctx_range(struct __sk_buff, data):
+ info->reg_type = PTR_TO_PACKET;
+ break;
+ case bpf_ctx_range(struct __sk_buff, data_end):
+ info->reg_type = PTR_TO_PACKET_END;
+ break;
+ }
+
+ return bpf_skb_is_valid_access(off, size, type, prog, info);
+}
+
static bool lwt_is_valid_access(int off, int size,
enum bpf_access_type type,
const struct bpf_prog *prog,
@@ -7038,7 +7072,7 @@ const struct bpf_prog_ops xdp_prog_ops = {
const struct bpf_verifier_ops cg_skb_verifier_ops = {
.get_func_proto = cg_skb_func_proto,
- .is_valid_access = sk_filter_is_valid_access,
+ .is_valid_access = cg_skb_is_valid_access,
.convert_ctx_access = bpf_convert_ctx_access,
};
--
2.17.1
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH v4 bpf-next 2/2] bpf: add tests for direct packet access from CGROUP_SKB
2018-10-18 16:06 [PATCH v4 bpf-next 0/2] bpf: add cg_skb_is_valid_access Song Liu
2018-10-18 16:06 ` [PATCH v4 bpf-next 1/2] bpf: add cg_skb_is_valid_access for BPF_PROG_TYPE_CGROUP_SKB Song Liu
@ 2018-10-18 16:06 ` Song Liu
2018-10-18 23:34 ` [PATCH v4 bpf-next 0/2] bpf: add cg_skb_is_valid_access Alexei Starovoitov
2 siblings, 0 replies; 8+ messages in thread
From: Song Liu @ 2018-10-18 16:06 UTC (permalink / raw)
To: netdev; +Cc: ast, daniel, kernel-team, Song Liu
Tests are added to make sure CGROUP_SKB cannot access:
tc_classid, data_meta, flow_keys
and can read and write:
mark, prority, and cb[0-4]
and can read other fields.
To make selftest with skb->sk work, a dummy sk is added in
bpf_prog_test_run_skb().
Signed-off-by: Song Liu <songliubraving@fb.com>
---
net/bpf/test_run.c | 7 +
tools/testing/selftests/bpf/test_verifier.c | 170 ++++++++++++++++++++
2 files changed, 177 insertions(+)
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index 0c423b8cd75c..8dccac305268 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -10,6 +10,8 @@
#include <linux/etherdevice.h>
#include <linux/filter.h>
#include <linux/sched/signal.h>
+#include <net/sock.h>
+#include <net/tcp.h>
static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
@@ -115,6 +117,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
u32 retval, duration;
int hh_len = ETH_HLEN;
struct sk_buff *skb;
+ struct sock sk = {0};
void *data;
int ret;
@@ -137,11 +140,15 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
break;
}
+ sock_net_set(&sk, &init_net);
+ sock_init_data(NULL, &sk);
+
skb = build_skb(data, 0);
if (!skb) {
kfree(data);
return -ENOMEM;
}
+ skb->sk = &sk;
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
__skb_put(skb, size);
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index cf4cd32b6772..5bfba7e8afd7 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -4862,6 +4862,176 @@ static struct bpf_test tests[] = {
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
+ {
+ "direct packet read test#1 for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct __sk_buff, len)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, pkt_type)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
+ offsetof(struct __sk_buff, mark)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, queue_mapping)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
+ offsetof(struct __sk_buff, protocol)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
+ offsetof(struct __sk_buff, vlan_present)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ },
+ {
+ "direct packet read test#2 for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct __sk_buff, vlan_tci)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, vlan_proto)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, priority)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
+ offsetof(struct __sk_buff, priority)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, ingress_ifindex)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_index)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
+ offsetof(struct __sk_buff, hash)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ },
+ {
+ "direct packet read test#3 for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
+ offsetof(struct __sk_buff, napi_id)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_4,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_5,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_8,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ },
+ {
+ "direct packet read test#4 for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, family)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip4)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip4)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_ip6[3])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, local_ip6[3])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, remote_port)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
+ offsetof(struct __sk_buff, local_port)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ },
+ {
+ "invalid access of tc_classid for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tc_classid)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ },
+ {
+ "invalid access of data_meta for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, data_meta)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ },
+ {
+ "invalid access of flow_keys for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, flow_keys)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ },
+ {
+ "invalid write access to napi_id for CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
+ offsetof(struct __sk_buff, napi_id)),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_9,
+ offsetof(struct __sk_buff, napi_id)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid bpf_context access",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ },
{
"valid cgroup storage access",
.insns = {
--
2.17.1
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [PATCH v4 bpf-next 0/2] bpf: add cg_skb_is_valid_access
2018-10-18 16:06 [PATCH v4 bpf-next 0/2] bpf: add cg_skb_is_valid_access Song Liu
2018-10-18 16:06 ` [PATCH v4 bpf-next 1/2] bpf: add cg_skb_is_valid_access for BPF_PROG_TYPE_CGROUP_SKB Song Liu
2018-10-18 16:06 ` [PATCH v4 bpf-next 2/2] bpf: add tests for direct packet access from CGROUP_SKB Song Liu
@ 2018-10-18 23:34 ` Alexei Starovoitov
2018-10-19 1:46 ` Alexei Starovoitov
2 siblings, 1 reply; 8+ messages in thread
From: Alexei Starovoitov @ 2018-10-18 23:34 UTC (permalink / raw)
To: Song Liu; +Cc: netdev, ast, daniel, kernel-team
On Thu, Oct 18, 2018 at 09:06:47AM -0700, Song Liu wrote:
> Changes v3 -> v4:
> 1. Fixed crash issue reported by Alexei.
>
> Changes v2 -> v3:
> 1. Added helper function bpf_compute_and_save_data_pointers() and
> bpf_restore_data_pointers().
>
> Changes v1 -> v2:
> 1. Updated the list of read-only fields, and read-write fields.
> 2. Added dummy sk to bpf_prog_test_run_skb().
>
> This set enables BPF program of type BPF_PROG_TYPE_CGROUP_SKB to access
> some __skb_buff data directly.
Applied, Thanks
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v4 bpf-next 1/2] bpf: add cg_skb_is_valid_access for BPF_PROG_TYPE_CGROUP_SKB
2018-10-18 16:06 ` [PATCH v4 bpf-next 1/2] bpf: add cg_skb_is_valid_access for BPF_PROG_TYPE_CGROUP_SKB Song Liu
@ 2018-10-19 0:14 ` Daniel Borkmann
2018-10-19 1:33 ` Alexei Starovoitov
0 siblings, 1 reply; 8+ messages in thread
From: Daniel Borkmann @ 2018-10-19 0:14 UTC (permalink / raw)
To: Song Liu, netdev; +Cc: ast, kernel-team
On 10/18/2018 06:06 PM, Song Liu wrote:
> BPF programs of BPF_PROG_TYPE_CGROUP_SKB need to access headers in the
> skb. This patch enables direct access of skb for these programs.
>
> Two helper functions bpf_compute_and_save_data_pointers() and
> bpf_restore_data_pointers() are introduced. There are used in
> __cgroup_bpf_run_filter_skb(), to compute proper data_end for the
> BPF program, and restore original data afterwards.
>
> Signed-off-by: Song Liu <songliubraving@fb.com>
> ---
> include/linux/filter.h | 24 ++++++++++++++++++++++++
> kernel/bpf/cgroup.c | 6 ++++++
> net/core/filter.c | 36 +++++++++++++++++++++++++++++++++++-
> 3 files changed, 65 insertions(+), 1 deletion(-)
>
> diff --git a/include/linux/filter.h b/include/linux/filter.h
> index 5771874bc01e..96b3ee7f14c9 100644
> --- a/include/linux/filter.h
> +++ b/include/linux/filter.h
> @@ -548,6 +548,30 @@ static inline void bpf_compute_data_pointers(struct sk_buff *skb)
> cb->data_end = skb->data + skb_headlen(skb);
> }
>
> +/* Similar to bpf_compute_data_pointers(), except that save orginal
> + * data in cb->data and cb->meta_data for restore.
> + */
> +static inline void bpf_compute_and_save_data_pointers(
> + struct sk_buff *skb, void *saved_pointers[2])
> +{
> + struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
> +
> + saved_pointers[0] = cb->data_meta;
> + saved_pointers[1] = cb->data_end;
> + cb->data_meta = skb->data - skb_metadata_len(skb);
> + cb->data_end = skb->data + skb_headlen(skb);
Hmm, can you elaborate why populating data_meta here ...
> +}
> +
> +/* Restore data saved by bpf_compute_data_pointers(). */
> +static inline void bpf_restore_data_pointers(
> + struct sk_buff *skb, void *saved_pointers[2])
> +{
> + struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
> +
> + cb->data_meta = saved_pointers[0];
> + cb->data_end = saved_pointers[1];;
> +}
> +
> static inline u8 *bpf_skb_cb(struct sk_buff *skb)
> {
> /* eBPF programs may read/write skb->cb[] area to transfer meta
> diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
> index 00f6ed2e4f9a..5f5180104ddc 100644
> --- a/kernel/bpf/cgroup.c
> +++ b/kernel/bpf/cgroup.c
> @@ -554,6 +554,7 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
> unsigned int offset = skb->data - skb_network_header(skb);
> struct sock *save_sk;
> struct cgroup *cgrp;
> + void *saved_pointers[2];
> int ret;
>
> if (!sk || !sk_fullsock(sk))
> @@ -566,8 +567,13 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
> save_sk = skb->sk;
> skb->sk = sk;
> __skb_push(skb, offset);
> +
> + /* compute pointers for the bpf prog */
> + bpf_compute_and_save_data_pointers(skb, saved_pointers);
> +
> ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,
> bpf_prog_run_save_cb);
> + bpf_restore_data_pointers(skb, saved_pointers);
> __skb_pull(skb, offset);
> skb->sk = save_sk;
> return ret == 1 ? 0 : -EPERM;
> diff --git a/net/core/filter.c b/net/core/filter.c
> index 1a3ac6c46873..e3ca30bd6840 100644
> --- a/net/core/filter.c
> +++ b/net/core/filter.c
> @@ -5346,6 +5346,40 @@ static bool sk_filter_is_valid_access(int off, int size,
> return bpf_skb_is_valid_access(off, size, type, prog, info);
> }
>
> +static bool cg_skb_is_valid_access(int off, int size,
> + enum bpf_access_type type,
> + const struct bpf_prog *prog,
> + struct bpf_insn_access_aux *info)
> +{
> + switch (off) {
> + case bpf_ctx_range(struct __sk_buff, tc_classid):
> + case bpf_ctx_range(struct __sk_buff, data_meta):
> + case bpf_ctx_range(struct __sk_buff, flow_keys):
> + return false;
... if it's disallowed anyway (disallowing it is the right thing to do,
but no need to save/restore then..)?
> + }
> + if (type == BPF_WRITE) {
> + switch (off) {
> + case bpf_ctx_range(struct __sk_buff, mark):
> + case bpf_ctx_range(struct __sk_buff, priority):
> + case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
> + break;
> + default:
> + return false;
> + }
> + }
> +
> + switch (off) {
> + case bpf_ctx_range(struct __sk_buff, data):
> + info->reg_type = PTR_TO_PACKET;
> + break;
> + case bpf_ctx_range(struct __sk_buff, data_end):
> + info->reg_type = PTR_TO_PACKET_END;
> + break;
> + }
> +
> + return bpf_skb_is_valid_access(off, size, type, prog, info);
> +}
> +
> static bool lwt_is_valid_access(int off, int size,
> enum bpf_access_type type,
> const struct bpf_prog *prog,
> @@ -7038,7 +7072,7 @@ const struct bpf_prog_ops xdp_prog_ops = {
>
> const struct bpf_verifier_ops cg_skb_verifier_ops = {
> .get_func_proto = cg_skb_func_proto,
> - .is_valid_access = sk_filter_is_valid_access,
> + .is_valid_access = cg_skb_is_valid_access,
> .convert_ctx_access = bpf_convert_ctx_access,
> };
>
>
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v4 bpf-next 1/2] bpf: add cg_skb_is_valid_access for BPF_PROG_TYPE_CGROUP_SKB
2018-10-19 0:14 ` Daniel Borkmann
@ 2018-10-19 1:33 ` Alexei Starovoitov
2018-10-19 2:03 ` Alexei Starovoitov
0 siblings, 1 reply; 8+ messages in thread
From: Alexei Starovoitov @ 2018-10-19 1:33 UTC (permalink / raw)
To: Daniel Borkmann, Song Liu, netdev; +Cc: ast, Kernel Team
On 10/18/18 5:14 PM, Daniel Borkmann wrote:
>> + case bpf_ctx_range(struct __sk_buff, data_meta):
>> + case bpf_ctx_range(struct __sk_buff, flow_keys):
>> + return false;
> ... if it's disallowed anyway (disallowing it is the right thing to do,
> but no need to save/restore then..)?
>
that's a good point.
why shouldn't we allow cg_skb to access data_meta?
xdp can set it and cgroup_skb_ingress will consume it here.
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v4 bpf-next 0/2] bpf: add cg_skb_is_valid_access
2018-10-18 23:34 ` [PATCH v4 bpf-next 0/2] bpf: add cg_skb_is_valid_access Alexei Starovoitov
@ 2018-10-19 1:46 ` Alexei Starovoitov
0 siblings, 0 replies; 8+ messages in thread
From: Alexei Starovoitov @ 2018-10-19 1:46 UTC (permalink / raw)
To: Alexei Starovoitov, Song Liu; +Cc: netdev, ast, daniel, Kernel Team
On 10/18/18 4:34 PM, Alexei Starovoitov wrote:
> On Thu, Oct 18, 2018 at 09:06:47AM -0700, Song Liu wrote:
>> Changes v3 -> v4:
>> 1. Fixed crash issue reported by Alexei.
>>
>> Changes v2 -> v3:
>> 1. Added helper function bpf_compute_and_save_data_pointers() and
>> bpf_restore_data_pointers().
>>
>> Changes v1 -> v2:
>> 1. Updated the list of read-only fields, and read-write fields.
>> 2. Added dummy sk to bpf_prog_test_run_skb().
>>
>> This set enables BPF program of type BPF_PROG_TYPE_CGROUP_SKB to access
>> some __skb_buff data directly.
>
> Applied, Thanks
Daniel brought up good point re: data_meta. Let's address it one way
or the other and respin the series instead of adding follow up patches.
So I tossed this series out of bpf-next.
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v4 bpf-next 1/2] bpf: add cg_skb_is_valid_access for BPF_PROG_TYPE_CGROUP_SKB
2018-10-19 1:33 ` Alexei Starovoitov
@ 2018-10-19 2:03 ` Alexei Starovoitov
0 siblings, 0 replies; 8+ messages in thread
From: Alexei Starovoitov @ 2018-10-19 2:03 UTC (permalink / raw)
To: Daniel Borkmann, Song Liu, netdev; +Cc: ast, Kernel Team
On 10/18/18 6:33 PM, Alexei Starovoitov wrote:
> On 10/18/18 5:14 PM, Daniel Borkmann wrote:
>>> + case bpf_ctx_range(struct __sk_buff, data_meta):
>>> + case bpf_ctx_range(struct __sk_buff, flow_keys):
>>> + return false;
>> ... if it's disallowed anyway (disallowing it is the right thing to do,
>> but no need to save/restore then..)?
>>
>
> that's a good point.
> why shouldn't we allow cg_skb to access data_meta?
> xdp can set it and cgroup_skb_ingress will consume it here.
I'll take it back.
When xdp doesn't set meta_data it will be zero and
bpf_compute_data_pointers() will point data_meta to skb->data.
On ingress that's eth header, but for tx it will point
to reserved space for future eth header.
So we cannot do that.
Let's keep it disabled and adjust
bpf_compute_and_save_data_pointers() to save only 'data' pointer.
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2018-10-19 10:07 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-10-18 16:06 [PATCH v4 bpf-next 0/2] bpf: add cg_skb_is_valid_access Song Liu
2018-10-18 16:06 ` [PATCH v4 bpf-next 1/2] bpf: add cg_skb_is_valid_access for BPF_PROG_TYPE_CGROUP_SKB Song Liu
2018-10-19 0:14 ` Daniel Borkmann
2018-10-19 1:33 ` Alexei Starovoitov
2018-10-19 2:03 ` Alexei Starovoitov
2018-10-18 16:06 ` [PATCH v4 bpf-next 2/2] bpf: add tests for direct packet access from CGROUP_SKB Song Liu
2018-10-18 23:34 ` [PATCH v4 bpf-next 0/2] bpf: add cg_skb_is_valid_access Alexei Starovoitov
2018-10-19 1:46 ` Alexei Starovoitov
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.