* [PATCH bpf-next] libbpf: Improve BPF_PROG2 macro code quality and description
@ 2022-09-10 2:52 Yonghong Song
2022-09-10 20:35 ` sdf
2022-09-22 0:10 ` patchwork-bot+netdevbpf
0 siblings, 2 replies; 4+ messages in thread
From: Yonghong Song @ 2022-09-10 2:52 UTC (permalink / raw)
To: bpf; +Cc: Alexei Starovoitov, Andrii Nakryiko, Daniel Borkmann, kernel-team
Commit 34586d29f8df ("libbpf: Add new BPF_PROG2 macro") added BPF_PROG2
macro for trampoline based programs with struct arguments. Andrii
made a few suggestions to improve code quality and description.
This patch implemented these suggestions including better internal
macro name, consistent usage pattern for __builtin_choose_expr(),
simpler macro definition for always-inline func arguments and
better macro description.
Signed-off-by: Yonghong Song <yhs@fb.com>
---
tools/lib/bpf/bpf_tracing.h | 77 ++++++++++++++++++++++---------------
1 file changed, 47 insertions(+), 30 deletions(-)
diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
index 8d4bdd18cb3d..a71ca48ea479 100644
--- a/tools/lib/bpf/bpf_tracing.h
+++ b/tools/lib/bpf/bpf_tracing.h
@@ -438,39 +438,45 @@ typeof(name(0)) name(unsigned long long *ctx) \
static __always_inline typeof(name(0)) \
____##name(unsigned long long *ctx, ##args)
-#ifndef ____bpf_nth
-#define ____bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, N, ...) N
+#ifndef ___bpf_nth2
+#define ___bpf_nth2(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, \
+ _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, N, ...) N
#endif
-#ifndef ____bpf_narg
-#define ____bpf_narg(...) ____bpf_nth(_, ##__VA_ARGS__, 12, 12, 11, 11, 10, 10, 9, 9, 8, 8, 7, 7, 6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 1, 1, 0)
+#ifndef ___bpf_narg2
+#define ___bpf_narg2(...) \
+ ___bpf_nth2(_, ##__VA_ARGS__, 12, 12, 11, 11, 10, 10, 9, 9, 8, 8, 7, 7, \
+ 6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 1, 1, 0)
#endif
-#define BPF_REG_CNT(t) \
- (__builtin_choose_expr(sizeof(t) == 1 || sizeof(t) == 2 || sizeof(t) == 4 || sizeof(t) == 8, 1, \
- __builtin_choose_expr(sizeof(t) == 16, 2, \
- (void)0)))
+#define ___bpf_reg_cnt(t) \
+ __builtin_choose_expr(sizeof(t) == 1, 1, \
+ __builtin_choose_expr(sizeof(t) == 2, 1, \
+ __builtin_choose_expr(sizeof(t) == 4, 1, \
+ __builtin_choose_expr(sizeof(t) == 8, 1, \
+ __builtin_choose_expr(sizeof(t) == 16, 2, \
+ (void)0)))))
#define ____bpf_reg_cnt0() (0)
-#define ____bpf_reg_cnt1(t, x) (____bpf_reg_cnt0() + BPF_REG_CNT(t))
-#define ____bpf_reg_cnt2(t, x, args...) (____bpf_reg_cnt1(args) + BPF_REG_CNT(t))
-#define ____bpf_reg_cnt3(t, x, args...) (____bpf_reg_cnt2(args) + BPF_REG_CNT(t))
-#define ____bpf_reg_cnt4(t, x, args...) (____bpf_reg_cnt3(args) + BPF_REG_CNT(t))
-#define ____bpf_reg_cnt5(t, x, args...) (____bpf_reg_cnt4(args) + BPF_REG_CNT(t))
-#define ____bpf_reg_cnt6(t, x, args...) (____bpf_reg_cnt5(args) + BPF_REG_CNT(t))
-#define ____bpf_reg_cnt7(t, x, args...) (____bpf_reg_cnt6(args) + BPF_REG_CNT(t))
-#define ____bpf_reg_cnt8(t, x, args...) (____bpf_reg_cnt7(args) + BPF_REG_CNT(t))
-#define ____bpf_reg_cnt9(t, x, args...) (____bpf_reg_cnt8(args) + BPF_REG_CNT(t))
-#define ____bpf_reg_cnt10(t, x, args...) (____bpf_reg_cnt9(args) + BPF_REG_CNT(t))
-#define ____bpf_reg_cnt11(t, x, args...) (____bpf_reg_cnt10(args) + BPF_REG_CNT(t))
-#define ____bpf_reg_cnt12(t, x, args...) (____bpf_reg_cnt11(args) + BPF_REG_CNT(t))
-#define ____bpf_reg_cnt(args...) ___bpf_apply(____bpf_reg_cnt, ____bpf_narg(args))(args)
+#define ____bpf_reg_cnt1(t, x) (____bpf_reg_cnt0() + ___bpf_reg_cnt(t))
+#define ____bpf_reg_cnt2(t, x, args...) (____bpf_reg_cnt1(args) + ___bpf_reg_cnt(t))
+#define ____bpf_reg_cnt3(t, x, args...) (____bpf_reg_cnt2(args) + ___bpf_reg_cnt(t))
+#define ____bpf_reg_cnt4(t, x, args...) (____bpf_reg_cnt3(args) + ___bpf_reg_cnt(t))
+#define ____bpf_reg_cnt5(t, x, args...) (____bpf_reg_cnt4(args) + ___bpf_reg_cnt(t))
+#define ____bpf_reg_cnt6(t, x, args...) (____bpf_reg_cnt5(args) + ___bpf_reg_cnt(t))
+#define ____bpf_reg_cnt7(t, x, args...) (____bpf_reg_cnt6(args) + ___bpf_reg_cnt(t))
+#define ____bpf_reg_cnt8(t, x, args...) (____bpf_reg_cnt7(args) + ___bpf_reg_cnt(t))
+#define ____bpf_reg_cnt9(t, x, args...) (____bpf_reg_cnt8(args) + ___bpf_reg_cnt(t))
+#define ____bpf_reg_cnt10(t, x, args...) (____bpf_reg_cnt9(args) + ___bpf_reg_cnt(t))
+#define ____bpf_reg_cnt11(t, x, args...) (____bpf_reg_cnt10(args) + ___bpf_reg_cnt(t))
+#define ____bpf_reg_cnt12(t, x, args...) (____bpf_reg_cnt11(args) + ___bpf_reg_cnt(t))
+#define ____bpf_reg_cnt(args...) ___bpf_apply(____bpf_reg_cnt, ___bpf_narg2(args))(args)
#define ____bpf_union_arg(t, x, n) \
- __builtin_choose_expr(sizeof(t) == 1, ({ union { struct { __u8 x; } ___z; t x; } ___tmp = { .___z = {ctx[n]}}; ___tmp.x; }), \
- __builtin_choose_expr(sizeof(t) == 2, ({ union { struct { __u16 x; } ___z; t x; } ___tmp = { .___z = {ctx[n]} }; ___tmp.x; }), \
- __builtin_choose_expr(sizeof(t) == 4, ({ union { struct { __u32 x; } ___z; t x; } ___tmp = { .___z = {ctx[n]} }; ___tmp.x; }), \
- __builtin_choose_expr(sizeof(t) == 8, ({ union { struct { __u64 x; } ___z; t x; } ___tmp = {.___z = {ctx[n]} }; ___tmp.x; }), \
- __builtin_choose_expr(sizeof(t) == 16, ({ union { struct { __u64 x, y; } ___z; t x; } ___tmp = {.___z = {ctx[n], ctx[n + 1]} }; ___tmp.x; }), \
+ __builtin_choose_expr(sizeof(t) == 1, ({ union { __u8 ___z[1]; t x; } ___tmp = { .___z = {ctx[n]}}; ___tmp.x; }), \
+ __builtin_choose_expr(sizeof(t) == 2, ({ union { __u16 ___z[1]; t x; } ___tmp = { .___z = {ctx[n]} }; ___tmp.x; }), \
+ __builtin_choose_expr(sizeof(t) == 4, ({ union { __u32 ___z[1]; t x; } ___tmp = { .___z = {ctx[n]} }; ___tmp.x; }), \
+ __builtin_choose_expr(sizeof(t) == 8, ({ union { __u64 ___z[1]; t x; } ___tmp = {.___z = {ctx[n]} }; ___tmp.x; }), \
+ __builtin_choose_expr(sizeof(t) == 16, ({ union { __u64 ___z[2]; t x; } ___tmp = {.___z = {ctx[n], ctx[n + 1]} }; ___tmp.x; }), \
(void)0)))))
#define ____bpf_ctx_arg0(n, args...)
@@ -486,7 +492,7 @@ ____##name(unsigned long long *ctx, ##args)
#define ____bpf_ctx_arg10(n, t, x, args...) , ____bpf_union_arg(t, x, n - ____bpf_reg_cnt10(t, x, args)) ____bpf_ctx_arg9(n, args)
#define ____bpf_ctx_arg11(n, t, x, args...) , ____bpf_union_arg(t, x, n - ____bpf_reg_cnt11(t, x, args)) ____bpf_ctx_arg10(n, args)
#define ____bpf_ctx_arg12(n, t, x, args...) , ____bpf_union_arg(t, x, n - ____bpf_reg_cnt12(t, x, args)) ____bpf_ctx_arg11(n, args)
-#define ____bpf_ctx_arg(n, args...) ___bpf_apply(____bpf_ctx_arg, ____bpf_narg(args))(n, args)
+#define ____bpf_ctx_arg(args...) ___bpf_apply(____bpf_ctx_arg, ___bpf_narg2(args))(____bpf_reg_cnt(args), args)
#define ____bpf_ctx_decl0()
#define ____bpf_ctx_decl1(t, x) , t x
@@ -501,10 +507,21 @@ ____##name(unsigned long long *ctx, ##args)
#define ____bpf_ctx_decl10(t, x, args...) , t x ____bpf_ctx_decl9(args)
#define ____bpf_ctx_decl11(t, x, args...) , t x ____bpf_ctx_decl10(args)
#define ____bpf_ctx_decl12(t, x, args...) , t x ____bpf_ctx_decl11(args)
-#define ____bpf_ctx_decl(args...) ___bpf_apply(____bpf_ctx_decl, ____bpf_narg(args))(args)
+#define ____bpf_ctx_decl(args...) ___bpf_apply(____bpf_ctx_decl, ___bpf_narg2(args))(args)
/*
- * BPF_PROG2 can handle struct arguments.
+ * BPF_PROG2 is an enhanced version of BPF_PROG in order to handle struct
+ * arguments. Since each struct argument might take one or two u64 values
+ * in the trampoline stack, argument type size is needed to place proper number
+ * of u64 values for each argument. Therefore, BPF_PROG2 has different
+ * syntax from BPF_PROG. For example, for the following BPF_PROG syntax,
+ * int BPF_PROG(test2, int a, int b)
+ * the corresponding BPF_PROG2 synx is,
+ * int BPF_PROG2(test2, int, a, int, b)
+ * where type and the corresponding argument name are separated by comma.
+ * If one or more argument is of struct type, BPF_PROG2 macro should be used,
+ * int BPF_PROG2(test_struct_arg, struct bpf_testmod_struct_arg_1, a, int, b,
+ * int, c, int, d, struct bpf_testmod_struct_arg_2, e, int, ret)
*/
#define BPF_PROG2(name, args...) \
name(unsigned long long *ctx); \
@@ -512,7 +529,7 @@ static __always_inline typeof(name(0)) \
____##name(unsigned long long *ctx ____bpf_ctx_decl(args)); \
typeof(name(0)) name(unsigned long long *ctx) \
{ \
- return ____##name(ctx ____bpf_ctx_arg(____bpf_reg_cnt(args), args)); \
+ return ____##name(ctx ____bpf_ctx_arg(args)); \
} \
static __always_inline typeof(name(0)) \
____##name(unsigned long long *ctx ____bpf_ctx_decl(args))
--
2.30.2
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCH bpf-next] libbpf: Improve BPF_PROG2 macro code quality and description
2022-09-10 2:52 [PATCH bpf-next] libbpf: Improve BPF_PROG2 macro code quality and description Yonghong Song
@ 2022-09-10 20:35 ` sdf
2022-09-22 0:05 ` Andrii Nakryiko
2022-09-22 0:10 ` patchwork-bot+netdevbpf
1 sibling, 1 reply; 4+ messages in thread
From: sdf @ 2022-09-10 20:35 UTC (permalink / raw)
To: Yonghong Song
Cc: bpf, Alexei Starovoitov, Andrii Nakryiko, Daniel Borkmann, kernel-team
On 09/09, Yonghong Song wrote:
> Commit 34586d29f8df ("libbpf: Add new BPF_PROG2 macro") added BPF_PROG2
> macro for trampoline based programs with struct arguments. Andrii
> made a few suggestions to improve code quality and description.
> This patch implemented these suggestions including better internal
> macro name, consistent usage pattern for __builtin_choose_expr(),
> simpler macro definition for always-inline func arguments and
> better macro description.
Not sure if Andrii wants to take a look, if not feel free to use:
Acked-by: Stanislav Fomichev <sdf@google.com>
> Signed-off-by: Yonghong Song <yhs@fb.com>
> ---
> tools/lib/bpf/bpf_tracing.h | 77 ++++++++++++++++++++++---------------
> 1 file changed, 47 insertions(+), 30 deletions(-)
> diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
> index 8d4bdd18cb3d..a71ca48ea479 100644
> --- a/tools/lib/bpf/bpf_tracing.h
> +++ b/tools/lib/bpf/bpf_tracing.h
> @@ -438,39 +438,45 @@ typeof(name(0)) name(unsigned long long *ctx)
> \
> static __always_inline typeof(name(0)) \
> ____##name(unsigned long long *ctx, ##args)
> -#ifndef ____bpf_nth
> -#define ____bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11,
> _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, N, ...) N
> +#ifndef ___bpf_nth2
> +#define ___bpf_nth2(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11,
> _12, _13, \
> + _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, N, ...) N
> #endif
> -#ifndef ____bpf_narg
> -#define ____bpf_narg(...) ____bpf_nth(_, ##__VA_ARGS__, 12, 12, 11, 11,
> 10, 10, 9, 9, 8, 8, 7, 7, 6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 1, 1, 0)
> +#ifndef ___bpf_narg2
> +#define ___bpf_narg2(...) \
> + ___bpf_nth2(_, ##__VA_ARGS__, 12, 12, 11, 11, 10, 10, 9, 9, 8, 8, 7, 7,
> \
> + 6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 1, 1, 0)
> #endif
> -#define BPF_REG_CNT(t) \
> - (__builtin_choose_expr(sizeof(t) == 1 || sizeof(t) == 2 || sizeof(t) ==
> 4 || sizeof(t) == 8, 1, \
> - __builtin_choose_expr(sizeof(t) == 16, 2, \
> - (void)0)))
> +#define ___bpf_reg_cnt(t) \
> + __builtin_choose_expr(sizeof(t) == 1, 1, \
> + __builtin_choose_expr(sizeof(t) == 2, 1, \
> + __builtin_choose_expr(sizeof(t) == 4, 1, \
> + __builtin_choose_expr(sizeof(t) == 8, 1, \
> + __builtin_choose_expr(sizeof(t) == 16, 2, \
> + (void)0)))))
> #define ____bpf_reg_cnt0() (0)
> -#define ____bpf_reg_cnt1(t, x) (____bpf_reg_cnt0() + BPF_REG_CNT(t))
> -#define ____bpf_reg_cnt2(t, x, args...) (____bpf_reg_cnt1(args) +
> BPF_REG_CNT(t))
> -#define ____bpf_reg_cnt3(t, x, args...) (____bpf_reg_cnt2(args) +
> BPF_REG_CNT(t))
> -#define ____bpf_reg_cnt4(t, x, args...) (____bpf_reg_cnt3(args) +
> BPF_REG_CNT(t))
> -#define ____bpf_reg_cnt5(t, x, args...) (____bpf_reg_cnt4(args) +
> BPF_REG_CNT(t))
> -#define ____bpf_reg_cnt6(t, x, args...) (____bpf_reg_cnt5(args) +
> BPF_REG_CNT(t))
> -#define ____bpf_reg_cnt7(t, x, args...) (____bpf_reg_cnt6(args) +
> BPF_REG_CNT(t))
> -#define ____bpf_reg_cnt8(t, x, args...) (____bpf_reg_cnt7(args) +
> BPF_REG_CNT(t))
> -#define ____bpf_reg_cnt9(t, x, args...) (____bpf_reg_cnt8(args) +
> BPF_REG_CNT(t))
> -#define ____bpf_reg_cnt10(t, x, args...) (____bpf_reg_cnt9(args) +
> BPF_REG_CNT(t))
> -#define ____bpf_reg_cnt11(t, x, args...) (____bpf_reg_cnt10(args) +
> BPF_REG_CNT(t))
> -#define ____bpf_reg_cnt12(t, x, args...) (____bpf_reg_cnt11(args) +
> BPF_REG_CNT(t))
> -#define ____bpf_reg_cnt(args...) ___bpf_apply(____bpf_reg_cnt,
> ____bpf_narg(args))(args)
> +#define ____bpf_reg_cnt1(t, x) (____bpf_reg_cnt0() + ___bpf_reg_cnt(t))
> +#define ____bpf_reg_cnt2(t, x, args...) (____bpf_reg_cnt1(args) +
> ___bpf_reg_cnt(t))
> +#define ____bpf_reg_cnt3(t, x, args...) (____bpf_reg_cnt2(args) +
> ___bpf_reg_cnt(t))
> +#define ____bpf_reg_cnt4(t, x, args...) (____bpf_reg_cnt3(args) +
> ___bpf_reg_cnt(t))
> +#define ____bpf_reg_cnt5(t, x, args...) (____bpf_reg_cnt4(args) +
> ___bpf_reg_cnt(t))
> +#define ____bpf_reg_cnt6(t, x, args...) (____bpf_reg_cnt5(args) +
> ___bpf_reg_cnt(t))
> +#define ____bpf_reg_cnt7(t, x, args...) (____bpf_reg_cnt6(args) +
> ___bpf_reg_cnt(t))
> +#define ____bpf_reg_cnt8(t, x, args...) (____bpf_reg_cnt7(args) +
> ___bpf_reg_cnt(t))
> +#define ____bpf_reg_cnt9(t, x, args...) (____bpf_reg_cnt8(args) +
> ___bpf_reg_cnt(t))
> +#define ____bpf_reg_cnt10(t, x, args...) (____bpf_reg_cnt9(args) +
> ___bpf_reg_cnt(t))
> +#define ____bpf_reg_cnt11(t, x, args...) (____bpf_reg_cnt10(args) +
> ___bpf_reg_cnt(t))
> +#define ____bpf_reg_cnt12(t, x, args...) (____bpf_reg_cnt11(args) +
> ___bpf_reg_cnt(t))
> +#define ____bpf_reg_cnt(args...) ___bpf_apply(____bpf_reg_cnt,
> ___bpf_narg2(args))(args)
> #define ____bpf_union_arg(t, x, n) \
> - __builtin_choose_expr(sizeof(t) == 1, ({ union { struct { __u8 x; }
> ___z; t x; } ___tmp = { .___z = {ctx[n]}}; ___tmp.x; }), \
> - __builtin_choose_expr(sizeof(t) == 2, ({ union { struct { __u16 x; }
> ___z; t x; } ___tmp = { .___z = {ctx[n]} }; ___tmp.x; }), \
> - __builtin_choose_expr(sizeof(t) == 4, ({ union { struct { __u32 x; }
> ___z; t x; } ___tmp = { .___z = {ctx[n]} }; ___tmp.x; }), \
> - __builtin_choose_expr(sizeof(t) == 8, ({ union { struct { __u64 x; }
> ___z; t x; } ___tmp = {.___z = {ctx[n]} }; ___tmp.x; }), \
> - __builtin_choose_expr(sizeof(t) == 16, ({ union { struct { __u64 x, y;
> } ___z; t x; } ___tmp = {.___z = {ctx[n], ctx[n + 1]} }; ___tmp.x; }), \
> + __builtin_choose_expr(sizeof(t) == 1, ({ union { __u8 ___z[1]; t x; }
> ___tmp = { .___z = {ctx[n]}}; ___tmp.x; }), \
> + __builtin_choose_expr(sizeof(t) == 2, ({ union { __u16 ___z[1]; t x; }
> ___tmp = { .___z = {ctx[n]} }; ___tmp.x; }), \
> + __builtin_choose_expr(sizeof(t) == 4, ({ union { __u32 ___z[1]; t x; }
> ___tmp = { .___z = {ctx[n]} }; ___tmp.x; }), \
> + __builtin_choose_expr(sizeof(t) == 8, ({ union { __u64 ___z[1]; t x; }
> ___tmp = {.___z = {ctx[n]} }; ___tmp.x; }), \
> + __builtin_choose_expr(sizeof(t) == 16, ({ union { __u64 ___z[2]; t x; }
> ___tmp = {.___z = {ctx[n], ctx[n + 1]} }; ___tmp.x; }), \
> (void)0)))))
> #define ____bpf_ctx_arg0(n, args...)
> @@ -486,7 +492,7 @@ ____##name(unsigned long long *ctx, ##args)
> #define ____bpf_ctx_arg10(n, t, x, args...) , ____bpf_union_arg(t, x, n
> - ____bpf_reg_cnt10(t, x, args)) ____bpf_ctx_arg9(n, args)
> #define ____bpf_ctx_arg11(n, t, x, args...) , ____bpf_union_arg(t, x, n
> - ____bpf_reg_cnt11(t, x, args)) ____bpf_ctx_arg10(n, args)
> #define ____bpf_ctx_arg12(n, t, x, args...) , ____bpf_union_arg(t, x, n
> - ____bpf_reg_cnt12(t, x, args)) ____bpf_ctx_arg11(n, args)
> -#define ____bpf_ctx_arg(n, args...) ___bpf_apply(____bpf_ctx_arg,
> ____bpf_narg(args))(n, args)
> +#define ____bpf_ctx_arg(args...) ___bpf_apply(____bpf_ctx_arg,
> ___bpf_narg2(args))(____bpf_reg_cnt(args), args)
> #define ____bpf_ctx_decl0()
> #define ____bpf_ctx_decl1(t, x) , t x
> @@ -501,10 +507,21 @@ ____##name(unsigned long long *ctx, ##args)
> #define ____bpf_ctx_decl10(t, x, args...) , t x ____bpf_ctx_decl9(args)
> #define ____bpf_ctx_decl11(t, x, args...) , t x ____bpf_ctx_decl10(args)
> #define ____bpf_ctx_decl12(t, x, args...) , t x ____bpf_ctx_decl11(args)
> -#define ____bpf_ctx_decl(args...) ___bpf_apply(____bpf_ctx_decl,
> ____bpf_narg(args))(args)
> +#define ____bpf_ctx_decl(args...) ___bpf_apply(____bpf_ctx_decl,
> ___bpf_narg2(args))(args)
> /*
> - * BPF_PROG2 can handle struct arguments.
> + * BPF_PROG2 is an enhanced version of BPF_PROG in order to handle struct
> + * arguments. Since each struct argument might take one or two u64 values
> + * in the trampoline stack, argument type size is needed to place proper
> number
> + * of u64 values for each argument. Therefore, BPF_PROG2 has different
> + * syntax from BPF_PROG. For example, for the following BPF_PROG syntax,
> + * int BPF_PROG(test2, int a, int b)
> + * the corresponding BPF_PROG2 synx is,
> + * int BPF_PROG2(test2, int, a, int, b)
> + * where type and the corresponding argument name are separated by comma.
> + * If one or more argument is of struct type, BPF_PROG2 macro should be
> used,
> + * int BPF_PROG2(test_struct_arg, struct bpf_testmod_struct_arg_1, a,
> int, b,
> + * int, c, int, d, struct bpf_testmod_struct_arg_2, e, int, ret)
> */
> #define BPF_PROG2(name, args...) \
> name(unsigned long long *ctx); \
> @@ -512,7 +529,7 @@ static __always_inline typeof(name(0)) \
> ____##name(unsigned long long *ctx ____bpf_ctx_decl(args)); \
> typeof(name(0)) name(unsigned long long *ctx) \
> { \
> - return ____##name(ctx ____bpf_ctx_arg(____bpf_reg_cnt(args), args)); \
> + return ____##name(ctx ____bpf_ctx_arg(args)); \
> } \
> static __always_inline typeof(name(0)) \
> ____##name(unsigned long long *ctx ____bpf_ctx_decl(args))
> --
> 2.30.2
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH bpf-next] libbpf: Improve BPF_PROG2 macro code quality and description
2022-09-10 20:35 ` sdf
@ 2022-09-22 0:05 ` Andrii Nakryiko
0 siblings, 0 replies; 4+ messages in thread
From: Andrii Nakryiko @ 2022-09-22 0:05 UTC (permalink / raw)
To: sdf
Cc: Yonghong Song, bpf, Alexei Starovoitov, Andrii Nakryiko,
Daniel Borkmann, kernel-team
On Sat, Sep 10, 2022 at 1:35 PM <sdf@google.com> wrote:
>
> On 09/09, Yonghong Song wrote:
> > Commit 34586d29f8df ("libbpf: Add new BPF_PROG2 macro") added BPF_PROG2
> > macro for trampoline based programs with struct arguments. Andrii
> > made a few suggestions to improve code quality and description.
> > This patch implemented these suggestions including better internal
> > macro name, consistent usage pattern for __builtin_choose_expr(),
> > simpler macro definition for always-inline func arguments and
> > better macro description.
>
> Not sure if Andrii wants to take a look, if not feel free to use:
>
Andrii wanted to :) But thanks for looking and acking as well.
I've slightly reformatted the "doccomment" for BPF_PROG2 and also
removed other uses of quadruple underscore for consistency. Thanks.
Applied to bpf-next.
> Acked-by: Stanislav Fomichev <sdf@google.com>
>
> > Signed-off-by: Yonghong Song <yhs@fb.com>
> > ---
> > tools/lib/bpf/bpf_tracing.h | 77 ++++++++++++++++++++++---------------
> > 1 file changed, 47 insertions(+), 30 deletions(-)
>
> > diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
> > index 8d4bdd18cb3d..a71ca48ea479 100644
> > --- a/tools/lib/bpf/bpf_tracing.h
> > +++ b/tools/lib/bpf/bpf_tracing.h
> > @@ -438,39 +438,45 @@ typeof(name(0)) name(unsigned long long *ctx)
> > \
> > static __always_inline typeof(name(0)) \
> > ____##name(unsigned long long *ctx, ##args)
>
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH bpf-next] libbpf: Improve BPF_PROG2 macro code quality and description
2022-09-10 2:52 [PATCH bpf-next] libbpf: Improve BPF_PROG2 macro code quality and description Yonghong Song
2022-09-10 20:35 ` sdf
@ 2022-09-22 0:10 ` patchwork-bot+netdevbpf
1 sibling, 0 replies; 4+ messages in thread
From: patchwork-bot+netdevbpf @ 2022-09-22 0:10 UTC (permalink / raw)
To: Yonghong Song; +Cc: bpf, ast, andrii, daniel, kernel-team
Hello:
This patch was applied to bpf/bpf-next.git (master)
by Andrii Nakryiko <andrii@kernel.org>:
On Fri, 9 Sep 2022 19:52:14 -0700 you wrote:
> Commit 34586d29f8df ("libbpf: Add new BPF_PROG2 macro") added BPF_PROG2
> macro for trampoline based programs with struct arguments. Andrii
> made a few suggestions to improve code quality and description.
> This patch implemented these suggestions including better internal
> macro name, consistent usage pattern for __builtin_choose_expr(),
> simpler macro definition for always-inline func arguments and
> better macro description.
>
> [...]
Here is the summary with links:
- [bpf-next] libbpf: Improve BPF_PROG2 macro code quality and description
https://git.kernel.org/bpf/bpf-next/c/9f2f5d7830dd
You are awesome, thank you!
--
Deet-doot-dot, I am a bot.
https://korg.docs.kernel.org/patchwork/pwbot.html
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2022-09-22 0:10 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-09-10 2:52 [PATCH bpf-next] libbpf: Improve BPF_PROG2 macro code quality and description Yonghong Song
2022-09-10 20:35 ` sdf
2022-09-22 0:05 ` Andrii Nakryiko
2022-09-22 0:10 ` patchwork-bot+netdevbpf
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.