* [PATCH bpf-next] bpf: use bpf_prog_run_array_cg_flags everywhere
@ 2022-04-19 22:22 Stanislav Fomichev
2022-04-20 22:04 ` Andrii Nakryiko
0 siblings, 1 reply; 5+ messages in thread
From: Stanislav Fomichev @ 2022-04-19 22:22 UTC (permalink / raw)
To: netdev, bpf; +Cc: ast, daniel, andrii, Stanislav Fomichev
Rename bpf_prog_run_array_cg_flags to bpf_prog_run_array_cg and
use it everywhere. check_return_code already enforces sane
return ranges for all cgroup types. (only egress and bind hooks have
uncanonical return ranges, the rest is using [0, 1])
No functional changes.
Suggested-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Stanislav Fomichev <sdf@google.com>
---
include/linux/bpf-cgroup.h | 8 ++---
kernel/bpf/cgroup.c | 70 ++++++++++++--------------------------
2 files changed, 24 insertions(+), 54 deletions(-)
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 88a51b242adc..669d96d074ad 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -225,24 +225,20 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) \
({ \
- u32 __unused_flags; \
int __ret = 0; \
if (cgroup_bpf_enabled(atype)) \
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
- NULL, \
- &__unused_flags); \
+ NULL, NULL); \
__ret; \
})
#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) \
({ \
- u32 __unused_flags; \
int __ret = 0; \
if (cgroup_bpf_enabled(atype)) { \
lock_sock(sk); \
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
- t_ctx, \
- &__unused_flags); \
+ t_ctx, NULL); \
release_sock(sk); \
} \
__ret; \
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index 0cb6211fcb58..f61eca32c747 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -25,50 +25,18 @@ EXPORT_SYMBOL(cgroup_bpf_enabled_key);
/* __always_inline is necessary to prevent indirect call through run_prog
* function pointer.
*/
-static __always_inline int
-bpf_prog_run_array_cg_flags(const struct cgroup_bpf *cgrp,
- enum cgroup_bpf_attach_type atype,
- const void *ctx, bpf_prog_run_fn run_prog,
- int retval, u32 *ret_flags)
-{
- const struct bpf_prog_array_item *item;
- const struct bpf_prog *prog;
- const struct bpf_prog_array *array;
- struct bpf_run_ctx *old_run_ctx;
- struct bpf_cg_run_ctx run_ctx;
- u32 func_ret;
-
- run_ctx.retval = retval;
- migrate_disable();
- rcu_read_lock();
- array = rcu_dereference(cgrp->effective[atype]);
- item = &array->items[0];
- old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
- while ((prog = READ_ONCE(item->prog))) {
- run_ctx.prog_item = item;
- func_ret = run_prog(prog, ctx);
- if (!(func_ret & 1) && !IS_ERR_VALUE((long)run_ctx.retval))
- run_ctx.retval = -EPERM;
- *(ret_flags) |= (func_ret >> 1);
- item++;
- }
- bpf_reset_run_ctx(old_run_ctx);
- rcu_read_unlock();
- migrate_enable();
- return run_ctx.retval;
-}
-
static __always_inline int
bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp,
enum cgroup_bpf_attach_type atype,
const void *ctx, bpf_prog_run_fn run_prog,
- int retval)
+ int retval, u32 *ret_flags)
{
const struct bpf_prog_array_item *item;
const struct bpf_prog *prog;
const struct bpf_prog_array *array;
struct bpf_run_ctx *old_run_ctx;
struct bpf_cg_run_ctx run_ctx;
+ u32 func_ret;
run_ctx.retval = retval;
migrate_disable();
@@ -78,8 +46,11 @@ bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp,
old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
while ((prog = READ_ONCE(item->prog))) {
run_ctx.prog_item = item;
- if (!run_prog(prog, ctx) && !IS_ERR_VALUE((long)run_ctx.retval))
+ func_ret = run_prog(prog, ctx);
+ if (!(func_ret & 1) && !IS_ERR_VALUE((long)run_ctx.retval))
run_ctx.retval = -EPERM;
+ if (ret_flags)
+ *(ret_flags) |= (func_ret >> 1);
item++;
}
bpf_reset_run_ctx(old_run_ctx);
@@ -1144,9 +1115,8 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
u32 flags = 0;
bool cn;
- ret = bpf_prog_run_array_cg_flags(
- &cgrp->bpf, atype,
- skb, __bpf_prog_run_save_cb, 0, &flags);
+ ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, skb,
+ __bpf_prog_run_save_cb, 0, &flags);
/* Return values of CGROUP EGRESS BPF programs are:
* 0: drop packet
@@ -1172,7 +1142,8 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
ret = (cn ? NET_XMIT_DROP : ret);
} else {
ret = bpf_prog_run_array_cg(&cgrp->bpf, atype,
- skb, __bpf_prog_run_save_cb, 0);
+ skb, __bpf_prog_run_save_cb, 0,
+ NULL);
if (ret && !IS_ERR_VALUE((long)ret))
ret = -EFAULT;
}
@@ -1202,7 +1173,8 @@ int __cgroup_bpf_run_filter_sk(struct sock *sk,
{
struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
- return bpf_prog_run_array_cg(&cgrp->bpf, atype, sk, bpf_prog_run, 0);
+ return bpf_prog_run_array_cg(&cgrp->bpf, atype, sk, bpf_prog_run, 0,
+ NULL);
}
EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
@@ -1247,8 +1219,8 @@ int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
}
cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
- return bpf_prog_run_array_cg_flags(&cgrp->bpf, atype,
- &ctx, bpf_prog_run, 0, flags);
+ return bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run,
+ 0, flags);
}
EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
@@ -1275,7 +1247,7 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
return bpf_prog_run_array_cg(&cgrp->bpf, atype, sock_ops, bpf_prog_run,
- 0);
+ 0, NULL);
}
EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
@@ -1292,7 +1264,8 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
rcu_read_lock();
cgrp = task_dfl_cgroup(current);
- ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0);
+ ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0,
+ NULL);
rcu_read_unlock();
return ret;
@@ -1457,7 +1430,8 @@ int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
rcu_read_lock();
cgrp = task_dfl_cgroup(current);
- ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0);
+ ret = bpf_prog_run_array_cg(&cgrp->bpf, atype, &ctx, bpf_prog_run, 0,
+ NULL);
rcu_read_unlock();
kfree(ctx.cur_val);
@@ -1550,7 +1524,7 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
lock_sock(sk);
ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_SETSOCKOPT,
- &ctx, bpf_prog_run, 0);
+ &ctx, bpf_prog_run, 0, NULL);
release_sock(sk);
if (ret)
@@ -1650,7 +1624,7 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
lock_sock(sk);
ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_GETSOCKOPT,
- &ctx, bpf_prog_run, retval);
+ &ctx, bpf_prog_run, retval, NULL);
release_sock(sk);
if (ret < 0)
@@ -1699,7 +1673,7 @@ int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
*/
ret = bpf_prog_run_array_cg(&cgrp->bpf, CGROUP_GETSOCKOPT,
- &ctx, bpf_prog_run, retval);
+ &ctx, bpf_prog_run, retval, NULL);
if (ret < 0)
return ret;
--
2.36.0.rc0.470.gd361397f0d-goog
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [PATCH bpf-next] bpf: use bpf_prog_run_array_cg_flags everywhere
2022-04-19 22:22 [PATCH bpf-next] bpf: use bpf_prog_run_array_cg_flags everywhere Stanislav Fomichev
@ 2022-04-20 22:04 ` Andrii Nakryiko
2022-04-20 22:30 ` Stanislav Fomichev
0 siblings, 1 reply; 5+ messages in thread
From: Andrii Nakryiko @ 2022-04-20 22:04 UTC (permalink / raw)
To: Stanislav Fomichev
Cc: Networking, bpf, Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
On Tue, Apr 19, 2022 at 3:23 PM Stanislav Fomichev <sdf@google.com> wrote:
>
> Rename bpf_prog_run_array_cg_flags to bpf_prog_run_array_cg and
> use it everywhere. check_return_code already enforces sane
> return ranges for all cgroup types. (only egress and bind hooks have
> uncanonical return ranges, the rest is using [0, 1])
>
> No functional changes.
>
> Suggested-by: Alexei Starovoitov <ast@kernel.org>
> Signed-off-by: Stanislav Fomichev <sdf@google.com>
> ---
> include/linux/bpf-cgroup.h | 8 ++---
> kernel/bpf/cgroup.c | 70 ++++++++++++--------------------------
> 2 files changed, 24 insertions(+), 54 deletions(-)
>
> diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
> index 88a51b242adc..669d96d074ad 100644
> --- a/include/linux/bpf-cgroup.h
> +++ b/include/linux/bpf-cgroup.h
> @@ -225,24 +225,20 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
>
> #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) \
> ({ \
> - u32 __unused_flags; \
> int __ret = 0; \
> if (cgroup_bpf_enabled(atype)) \
> __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
> - NULL, \
> - &__unused_flags); \
> + NULL, NULL); \
> __ret; \
> })
>
> #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) \
> ({ \
> - u32 __unused_flags; \
> int __ret = 0; \
> if (cgroup_bpf_enabled(atype)) { \
> lock_sock(sk); \
> __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
> - t_ctx, \
> - &__unused_flags); \
> + t_ctx, NULL); \
> release_sock(sk); \
> } \
> __ret; \
> diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
> index 0cb6211fcb58..f61eca32c747 100644
> --- a/kernel/bpf/cgroup.c
> +++ b/kernel/bpf/cgroup.c
> @@ -25,50 +25,18 @@ EXPORT_SYMBOL(cgroup_bpf_enabled_key);
> /* __always_inline is necessary to prevent indirect call through run_prog
> * function pointer.
> */
> -static __always_inline int
> -bpf_prog_run_array_cg_flags(const struct cgroup_bpf *cgrp,
> - enum cgroup_bpf_attach_type atype,
> - const void *ctx, bpf_prog_run_fn run_prog,
> - int retval, u32 *ret_flags)
> -{
> - const struct bpf_prog_array_item *item;
> - const struct bpf_prog *prog;
> - const struct bpf_prog_array *array;
> - struct bpf_run_ctx *old_run_ctx;
> - struct bpf_cg_run_ctx run_ctx;
> - u32 func_ret;
> -
> - run_ctx.retval = retval;
> - migrate_disable();
> - rcu_read_lock();
> - array = rcu_dereference(cgrp->effective[atype]);
> - item = &array->items[0];
> - old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
> - while ((prog = READ_ONCE(item->prog))) {
> - run_ctx.prog_item = item;
> - func_ret = run_prog(prog, ctx);
> - if (!(func_ret & 1) && !IS_ERR_VALUE((long)run_ctx.retval))
> - run_ctx.retval = -EPERM;
> - *(ret_flags) |= (func_ret >> 1);
> - item++;
> - }
> - bpf_reset_run_ctx(old_run_ctx);
> - rcu_read_unlock();
> - migrate_enable();
> - return run_ctx.retval;
> -}
> -
> static __always_inline int
> bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp,
> enum cgroup_bpf_attach_type atype,
> const void *ctx, bpf_prog_run_fn run_prog,
> - int retval)
> + int retval, u32 *ret_flags)
> {
> const struct bpf_prog_array_item *item;
> const struct bpf_prog *prog;
> const struct bpf_prog_array *array;
> struct bpf_run_ctx *old_run_ctx;
> struct bpf_cg_run_ctx run_ctx;
> + u32 func_ret;
>
> run_ctx.retval = retval;
> migrate_disable();
> @@ -78,8 +46,11 @@ bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp,
> old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
> while ((prog = READ_ONCE(item->prog))) {
> run_ctx.prog_item = item;
> - if (!run_prog(prog, ctx) && !IS_ERR_VALUE((long)run_ctx.retval))
> + func_ret = run_prog(prog, ctx);
> + if (!(func_ret & 1) && !IS_ERR_VALUE((long)run_ctx.retval))
to be completely true to previous behavior, shouldn't there be
if (ret_flags)
func_ret &= 1;
if (!func_ret && !IS_ERR_VALUE(...))
here?
This might have been discussed previously and I missed it. If that's
so, please ignore.
> run_ctx.retval = -EPERM;
> + if (ret_flags)
> + *(ret_flags) |= (func_ret >> 1);
> item++;
> }
> bpf_reset_run_ctx(old_run_ctx);
[..]
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH bpf-next] bpf: use bpf_prog_run_array_cg_flags everywhere
2022-04-20 22:04 ` Andrii Nakryiko
@ 2022-04-20 22:30 ` Stanislav Fomichev
2022-04-25 20:37 ` Martin KaFai Lau
0 siblings, 1 reply; 5+ messages in thread
From: Stanislav Fomichev @ 2022-04-20 22:30 UTC (permalink / raw)
To: Andrii Nakryiko
Cc: Networking, bpf, Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko
On Wed, Apr 20, 2022 at 3:04 PM Andrii Nakryiko
<andrii.nakryiko@gmail.com> wrote:
>
> On Tue, Apr 19, 2022 at 3:23 PM Stanislav Fomichev <sdf@google.com> wrote:
> >
> > Rename bpf_prog_run_array_cg_flags to bpf_prog_run_array_cg and
> > use it everywhere. check_return_code already enforces sane
> > return ranges for all cgroup types. (only egress and bind hooks have
> > uncanonical return ranges, the rest is using [0, 1])
> >
> > No functional changes.
> >
> > Suggested-by: Alexei Starovoitov <ast@kernel.org>
> > Signed-off-by: Stanislav Fomichev <sdf@google.com>
> > ---
> > include/linux/bpf-cgroup.h | 8 ++---
> > kernel/bpf/cgroup.c | 70 ++++++++++++--------------------------
> > 2 files changed, 24 insertions(+), 54 deletions(-)
> >
> > diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
> > index 88a51b242adc..669d96d074ad 100644
> > --- a/include/linux/bpf-cgroup.h
> > +++ b/include/linux/bpf-cgroup.h
> > @@ -225,24 +225,20 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
> >
> > #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) \
> > ({ \
> > - u32 __unused_flags; \
> > int __ret = 0; \
> > if (cgroup_bpf_enabled(atype)) \
> > __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
> > - NULL, \
> > - &__unused_flags); \
> > + NULL, NULL); \
> > __ret; \
> > })
> >
> > #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) \
> > ({ \
> > - u32 __unused_flags; \
> > int __ret = 0; \
> > if (cgroup_bpf_enabled(atype)) { \
> > lock_sock(sk); \
> > __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
> > - t_ctx, \
> > - &__unused_flags); \
> > + t_ctx, NULL); \
> > release_sock(sk); \
> > } \
> > __ret; \
> > diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
> > index 0cb6211fcb58..f61eca32c747 100644
> > --- a/kernel/bpf/cgroup.c
> > +++ b/kernel/bpf/cgroup.c
> > @@ -25,50 +25,18 @@ EXPORT_SYMBOL(cgroup_bpf_enabled_key);
> > /* __always_inline is necessary to prevent indirect call through run_prog
> > * function pointer.
> > */
> > -static __always_inline int
> > -bpf_prog_run_array_cg_flags(const struct cgroup_bpf *cgrp,
> > - enum cgroup_bpf_attach_type atype,
> > - const void *ctx, bpf_prog_run_fn run_prog,
> > - int retval, u32 *ret_flags)
> > -{
> > - const struct bpf_prog_array_item *item;
> > - const struct bpf_prog *prog;
> > - const struct bpf_prog_array *array;
> > - struct bpf_run_ctx *old_run_ctx;
> > - struct bpf_cg_run_ctx run_ctx;
> > - u32 func_ret;
> > -
> > - run_ctx.retval = retval;
> > - migrate_disable();
> > - rcu_read_lock();
> > - array = rcu_dereference(cgrp->effective[atype]);
> > - item = &array->items[0];
> > - old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
> > - while ((prog = READ_ONCE(item->prog))) {
> > - run_ctx.prog_item = item;
> > - func_ret = run_prog(prog, ctx);
> > - if (!(func_ret & 1) && !IS_ERR_VALUE((long)run_ctx.retval))
> > - run_ctx.retval = -EPERM;
> > - *(ret_flags) |= (func_ret >> 1);
> > - item++;
> > - }
> > - bpf_reset_run_ctx(old_run_ctx);
> > - rcu_read_unlock();
> > - migrate_enable();
> > - return run_ctx.retval;
> > -}
> > -
> > static __always_inline int
> > bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp,
> > enum cgroup_bpf_attach_type atype,
> > const void *ctx, bpf_prog_run_fn run_prog,
> > - int retval)
> > + int retval, u32 *ret_flags)
> > {
> > const struct bpf_prog_array_item *item;
> > const struct bpf_prog *prog;
> > const struct bpf_prog_array *array;
> > struct bpf_run_ctx *old_run_ctx;
> > struct bpf_cg_run_ctx run_ctx;
> > + u32 func_ret;
> >
> > run_ctx.retval = retval;
> > migrate_disable();
> > @@ -78,8 +46,11 @@ bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp,
> > old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
> > while ((prog = READ_ONCE(item->prog))) {
> > run_ctx.prog_item = item;
> > - if (!run_prog(prog, ctx) && !IS_ERR_VALUE((long)run_ctx.retval))
> > + func_ret = run_prog(prog, ctx);
> > + if (!(func_ret & 1) && !IS_ERR_VALUE((long)run_ctx.retval))
>
> to be completely true to previous behavior, shouldn't there be
>
> if (ret_flags)
> func_ret &= 1;
> if (!func_ret && !IS_ERR_VALUE(...))
>
> here?
>
> This might have been discussed previously and I missed it. If that's
> so, please ignore.
We are converting the cases where run_prog(prog, ctx) returns 0 or 1,
so it seems like we don't have to reproduce the existing behavior
1-to-1?
So I'm not sure it matters, or am I missing something?
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH bpf-next] bpf: use bpf_prog_run_array_cg_flags everywhere
2022-04-20 22:30 ` Stanislav Fomichev
@ 2022-04-25 20:37 ` Martin KaFai Lau
2022-04-25 21:48 ` Stanislav Fomichev
0 siblings, 1 reply; 5+ messages in thread
From: Martin KaFai Lau @ 2022-04-25 20:37 UTC (permalink / raw)
To: Stanislav Fomichev
Cc: Andrii Nakryiko, Networking, bpf, Alexei Starovoitov,
Daniel Borkmann, Andrii Nakryiko
On Wed, Apr 20, 2022 at 03:30:43PM -0700, Stanislav Fomichev wrote:
> On Wed, Apr 20, 2022 at 3:04 PM Andrii Nakryiko
> <andrii.nakryiko@gmail.com> wrote:
> >
> > On Tue, Apr 19, 2022 at 3:23 PM Stanislav Fomichev <sdf@google.com> wrote:
> > >
> > > Rename bpf_prog_run_array_cg_flags to bpf_prog_run_array_cg and
> > > use it everywhere. check_return_code already enforces sane
> > > return ranges for all cgroup types. (only egress and bind hooks have
> > > uncanonical return ranges, the rest is using [0, 1])
> > >
> > > No functional changes.
> > >
> > > Suggested-by: Alexei Starovoitov <ast@kernel.org>
> > > Signed-off-by: Stanislav Fomichev <sdf@google.com>
> > > ---
> > > include/linux/bpf-cgroup.h | 8 ++---
> > > kernel/bpf/cgroup.c | 70 ++++++++++++--------------------------
> > > 2 files changed, 24 insertions(+), 54 deletions(-)
> > >
> > > diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
> > > index 88a51b242adc..669d96d074ad 100644
> > > --- a/include/linux/bpf-cgroup.h
> > > +++ b/include/linux/bpf-cgroup.h
> > > @@ -225,24 +225,20 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
> > >
> > > #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) \
> > > ({ \
> > > - u32 __unused_flags; \
> > > int __ret = 0; \
> > > if (cgroup_bpf_enabled(atype)) \
> > > __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
> > > - NULL, \
> > > - &__unused_flags); \
> > > + NULL, NULL); \
> > > __ret; \
> > > })
> > >
> > > #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) \
> > > ({ \
> > > - u32 __unused_flags; \
> > > int __ret = 0; \
> > > if (cgroup_bpf_enabled(atype)) { \
> > > lock_sock(sk); \
> > > __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
> > > - t_ctx, \
> > > - &__unused_flags); \
> > > + t_ctx, NULL); \
> > > release_sock(sk); \
> > > } \
> > > __ret; \
> > > diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
> > > index 0cb6211fcb58..f61eca32c747 100644
> > > --- a/kernel/bpf/cgroup.c
> > > +++ b/kernel/bpf/cgroup.c
> > > @@ -25,50 +25,18 @@ EXPORT_SYMBOL(cgroup_bpf_enabled_key);
> > > /* __always_inline is necessary to prevent indirect call through run_prog
> > > * function pointer.
> > > */
> > > -static __always_inline int
> > > -bpf_prog_run_array_cg_flags(const struct cgroup_bpf *cgrp,
> > > - enum cgroup_bpf_attach_type atype,
> > > - const void *ctx, bpf_prog_run_fn run_prog,
> > > - int retval, u32 *ret_flags)
> > > -{
> > > - const struct bpf_prog_array_item *item;
> > > - const struct bpf_prog *prog;
> > > - const struct bpf_prog_array *array;
> > > - struct bpf_run_ctx *old_run_ctx;
> > > - struct bpf_cg_run_ctx run_ctx;
> > > - u32 func_ret;
> > > -
> > > - run_ctx.retval = retval;
> > > - migrate_disable();
> > > - rcu_read_lock();
> > > - array = rcu_dereference(cgrp->effective[atype]);
> > > - item = &array->items[0];
> > > - old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
> > > - while ((prog = READ_ONCE(item->prog))) {
> > > - run_ctx.prog_item = item;
> > > - func_ret = run_prog(prog, ctx);
> > > - if (!(func_ret & 1) && !IS_ERR_VALUE((long)run_ctx.retval))
> > > - run_ctx.retval = -EPERM;
> > > - *(ret_flags) |= (func_ret >> 1);
> > > - item++;
> > > - }
> > > - bpf_reset_run_ctx(old_run_ctx);
> > > - rcu_read_unlock();
> > > - migrate_enable();
> > > - return run_ctx.retval;
> > > -}
> > > -
> > > static __always_inline int
> > > bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp,
> > > enum cgroup_bpf_attach_type atype,
> > > const void *ctx, bpf_prog_run_fn run_prog,
> > > - int retval)
> > > + int retval, u32 *ret_flags)
> > > {
> > > const struct bpf_prog_array_item *item;
> > > const struct bpf_prog *prog;
> > > const struct bpf_prog_array *array;
> > > struct bpf_run_ctx *old_run_ctx;
> > > struct bpf_cg_run_ctx run_ctx;
> > > + u32 func_ret;
> > >
> > > run_ctx.retval = retval;
> > > migrate_disable();
> > > @@ -78,8 +46,11 @@ bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp,
> > > old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
> > > while ((prog = READ_ONCE(item->prog))) {
> > > run_ctx.prog_item = item;
> > > - if (!run_prog(prog, ctx) && !IS_ERR_VALUE((long)run_ctx.retval))
> > > + func_ret = run_prog(prog, ctx);
> > > + if (!(func_ret & 1) && !IS_ERR_VALUE((long)run_ctx.retval))
> >
> > to be completely true to previous behavior, shouldn't there be
> >
> > if (ret_flags)
> > func_ret &= 1;
> > if (!func_ret && !IS_ERR_VALUE(...))
> >
> > here?
> >
> > This might have been discussed previously and I missed it. If that's
> > so, please ignore.
>
> We are converting the cases where run_prog(prog, ctx) returns 0 or 1,
> so it seems like we don't have to reproduce the existing behavior
> 1-to-1?
> So I'm not sure it matters, or am I missing something?
A nit, how about testing 'if (ret_flags)' first such that
it is obvious which case will use higher bits in the return value.
The compiler may be able to optimize the ret_flags == NULL case also ?
Something like:
func_ret = run_prog(prog, ctx);
/* The cg bpf prog uses the higher bits of the return value */
if (ret_flags) {
*(ret_flags) |= (func_ret >> 1);
func_ret &= 1;
}
if (!func_ret && !IS_ERR_VALUE((long)run_ctx.retval))
run_ctx.retval = -EPERM;
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH bpf-next] bpf: use bpf_prog_run_array_cg_flags everywhere
2022-04-25 20:37 ` Martin KaFai Lau
@ 2022-04-25 21:48 ` Stanislav Fomichev
0 siblings, 0 replies; 5+ messages in thread
From: Stanislav Fomichev @ 2022-04-25 21:48 UTC (permalink / raw)
To: Martin KaFai Lau
Cc: Andrii Nakryiko, Networking, bpf, Alexei Starovoitov,
Daniel Borkmann, Andrii Nakryiko
On Mon, Apr 25, 2022 at 1:38 PM Martin KaFai Lau <kafai@fb.com> wrote:
>
> On Wed, Apr 20, 2022 at 03:30:43PM -0700, Stanislav Fomichev wrote:
> > On Wed, Apr 20, 2022 at 3:04 PM Andrii Nakryiko
> > <andrii.nakryiko@gmail.com> wrote:
> > >
> > > On Tue, Apr 19, 2022 at 3:23 PM Stanislav Fomichev <sdf@google.com> wrote:
> > > >
> > > > Rename bpf_prog_run_array_cg_flags to bpf_prog_run_array_cg and
> > > > use it everywhere. check_return_code already enforces sane
> > > > return ranges for all cgroup types. (only egress and bind hooks have
> > > > uncanonical return ranges, the rest is using [0, 1])
> > > >
> > > > No functional changes.
> > > >
> > > > Suggested-by: Alexei Starovoitov <ast@kernel.org>
> > > > Signed-off-by: Stanislav Fomichev <sdf@google.com>
> > > > ---
> > > > include/linux/bpf-cgroup.h | 8 ++---
> > > > kernel/bpf/cgroup.c | 70 ++++++++++++--------------------------
> > > > 2 files changed, 24 insertions(+), 54 deletions(-)
> > > >
> > > > diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
> > > > index 88a51b242adc..669d96d074ad 100644
> > > > --- a/include/linux/bpf-cgroup.h
> > > > +++ b/include/linux/bpf-cgroup.h
> > > > @@ -225,24 +225,20 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
> > > >
> > > > #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) \
> > > > ({ \
> > > > - u32 __unused_flags; \
> > > > int __ret = 0; \
> > > > if (cgroup_bpf_enabled(atype)) \
> > > > __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
> > > > - NULL, \
> > > > - &__unused_flags); \
> > > > + NULL, NULL); \
> > > > __ret; \
> > > > })
> > > >
> > > > #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) \
> > > > ({ \
> > > > - u32 __unused_flags; \
> > > > int __ret = 0; \
> > > > if (cgroup_bpf_enabled(atype)) { \
> > > > lock_sock(sk); \
> > > > __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
> > > > - t_ctx, \
> > > > - &__unused_flags); \
> > > > + t_ctx, NULL); \
> > > > release_sock(sk); \
> > > > } \
> > > > __ret; \
> > > > diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
> > > > index 0cb6211fcb58..f61eca32c747 100644
> > > > --- a/kernel/bpf/cgroup.c
> > > > +++ b/kernel/bpf/cgroup.c
> > > > @@ -25,50 +25,18 @@ EXPORT_SYMBOL(cgroup_bpf_enabled_key);
> > > > /* __always_inline is necessary to prevent indirect call through run_prog
> > > > * function pointer.
> > > > */
> > > > -static __always_inline int
> > > > -bpf_prog_run_array_cg_flags(const struct cgroup_bpf *cgrp,
> > > > - enum cgroup_bpf_attach_type atype,
> > > > - const void *ctx, bpf_prog_run_fn run_prog,
> > > > - int retval, u32 *ret_flags)
> > > > -{
> > > > - const struct bpf_prog_array_item *item;
> > > > - const struct bpf_prog *prog;
> > > > - const struct bpf_prog_array *array;
> > > > - struct bpf_run_ctx *old_run_ctx;
> > > > - struct bpf_cg_run_ctx run_ctx;
> > > > - u32 func_ret;
> > > > -
> > > > - run_ctx.retval = retval;
> > > > - migrate_disable();
> > > > - rcu_read_lock();
> > > > - array = rcu_dereference(cgrp->effective[atype]);
> > > > - item = &array->items[0];
> > > > - old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
> > > > - while ((prog = READ_ONCE(item->prog))) {
> > > > - run_ctx.prog_item = item;
> > > > - func_ret = run_prog(prog, ctx);
> > > > - if (!(func_ret & 1) && !IS_ERR_VALUE((long)run_ctx.retval))
> > > > - run_ctx.retval = -EPERM;
> > > > - *(ret_flags) |= (func_ret >> 1);
> > > > - item++;
> > > > - }
> > > > - bpf_reset_run_ctx(old_run_ctx);
> > > > - rcu_read_unlock();
> > > > - migrate_enable();
> > > > - return run_ctx.retval;
> > > > -}
> > > > -
> > > > static __always_inline int
> > > > bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp,
> > > > enum cgroup_bpf_attach_type atype,
> > > > const void *ctx, bpf_prog_run_fn run_prog,
> > > > - int retval)
> > > > + int retval, u32 *ret_flags)
> > > > {
> > > > const struct bpf_prog_array_item *item;
> > > > const struct bpf_prog *prog;
> > > > const struct bpf_prog_array *array;
> > > > struct bpf_run_ctx *old_run_ctx;
> > > > struct bpf_cg_run_ctx run_ctx;
> > > > + u32 func_ret;
> > > >
> > > > run_ctx.retval = retval;
> > > > migrate_disable();
> > > > @@ -78,8 +46,11 @@ bpf_prog_run_array_cg(const struct cgroup_bpf *cgrp,
> > > > old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
> > > > while ((prog = READ_ONCE(item->prog))) {
> > > > run_ctx.prog_item = item;
> > > > - if (!run_prog(prog, ctx) && !IS_ERR_VALUE((long)run_ctx.retval))
> > > > + func_ret = run_prog(prog, ctx);
> > > > + if (!(func_ret & 1) && !IS_ERR_VALUE((long)run_ctx.retval))
> > >
> > > to be completely true to previous behavior, shouldn't there be
> > >
> > > if (ret_flags)
> > > func_ret &= 1;
> > > if (!func_ret && !IS_ERR_VALUE(...))
> > >
> > > here?
> > >
> > > This might have been discussed previously and I missed it. If that's
> > > so, please ignore.
> >
> > We are converting the cases where run_prog(prog, ctx) returns 0 or 1,
> > so it seems like we don't have to reproduce the existing behavior
> > 1-to-1?
> > So I'm not sure it matters, or am I missing something?
> A nit, how about testing 'if (ret_flags)' first such that
> it is obvious which case will use higher bits in the return value.
> The compiler may be able to optimize the ret_flags == NULL case also ?
>
> Something like:
>
> func_ret = run_prog(prog, ctx);
> /* The cg bpf prog uses the higher bits of the return value */
> if (ret_flags) {
> *(ret_flags) |= (func_ret >> 1);
> func_ret &= 1;
> }
> if (!func_ret && !IS_ERR_VALUE((long)run_ctx.retval))
> run_ctx.retval = -EPERM;
Sure, this should also address Andrii's point I think. Will resend a v2.
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2022-04-25 22:21 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-04-19 22:22 [PATCH bpf-next] bpf: use bpf_prog_run_array_cg_flags everywhere Stanislav Fomichev
2022-04-20 22:04 ` Andrii Nakryiko
2022-04-20 22:30 ` Stanislav Fomichev
2022-04-25 20:37 ` Martin KaFai Lau
2022-04-25 21:48 ` Stanislav Fomichev
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.