bpf.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH bpf-next v1 0/6] Typeless/weak ksym for gen_loader + misc fixups
@ 2021-10-06  0:28 Kumar Kartikeya Dwivedi
  2021-10-06  0:28 ` [PATCH bpf-next v1 1/6] bpf: Add bpf_kallsyms_lookup_name helper Kumar Kartikeya Dwivedi
                   ` (5 more replies)
  0 siblings, 6 replies; 27+ messages in thread
From: Kumar Kartikeya Dwivedi @ 2021-10-06  0:28 UTC (permalink / raw)
  To: bpf
  Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Song Liu, Yonghong Song,
	Jesper Dangaard Brouer, Toke Høiland-Jørgensen, netdev

Patches (1,2,4) add typeless and weak ksym support to gen_loader. It is follow
up for the recent kfunc from modules series.

The later patches (5,6) are misc fixes for selftests, and patch 3 for libbpf
where we try to be careful to not end up with mod_btf->fd set as 0 (as that
leads to a confusing error message about btf_id not being found on load and it
is not clear what went wrong, instead we can just dup fd 0).

Kumar Kartikeya Dwivedi (6):
  bpf: Add bpf_kallsyms_lookup_name helper
  libbpf: Add typeless and weak ksym support to gen_loader
  libbpf: Ensure that module BTF fd is never 0
  bpf: selftests: Move test_ksyms_weak test to lskel, add libbpf test
  bpf: selftests: Fix fd cleanup in sk_lookup test
  bpf: selftests: Fix memory leak in test_ima

 include/linux/bpf.h                           |   1 +
 include/uapi/linux/bpf.h                      |  14 ++
 kernel/bpf/syscall.c                          |  24 ++++
 tools/include/uapi/linux/bpf.h                |  14 ++
 tools/lib/bpf/bpf_gen_internal.h              |  12 +-
 tools/lib/bpf/gen_loader.c                    | 123 ++++++++++++++++--
 tools/lib/bpf/libbpf.c                        |  27 ++--
 tools/testing/selftests/bpf/Makefile          |   2 +-
 .../selftests/bpf/prog_tests/ksyms_btf.c      |   6 +-
 .../bpf/prog_tests/ksyms_weak_libbpf.c        |  31 +++++
 .../selftests/bpf/prog_tests/sk_lookup.c      |  20 ++-
 .../selftests/bpf/prog_tests/test_ima.c       |   3 +-
 .../selftests/bpf/progs/test_ksyms_weak.c     |   3 +-
 13 files changed, 247 insertions(+), 33 deletions(-)
 create mode 100644 tools/testing/selftests/bpf/prog_tests/ksyms_weak_libbpf.c

--
2.33.0


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [PATCH bpf-next v1 1/6] bpf: Add bpf_kallsyms_lookup_name helper
  2021-10-06  0:28 [PATCH bpf-next v1 0/6] Typeless/weak ksym for gen_loader + misc fixups Kumar Kartikeya Dwivedi
@ 2021-10-06  0:28 ` Kumar Kartikeya Dwivedi
  2021-10-07 20:23   ` Song Liu
  2021-10-06  0:28 ` [PATCH bpf-next v1 2/6] libbpf: Add typeless and weak ksym support to gen_loader Kumar Kartikeya Dwivedi
                   ` (4 subsequent siblings)
  5 siblings, 1 reply; 27+ messages in thread
From: Kumar Kartikeya Dwivedi @ 2021-10-06  0:28 UTC (permalink / raw)
  To: bpf
  Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Song Liu, Yonghong Song,
	Jesper Dangaard Brouer, Toke Høiland-Jørgensen, netdev

This helper allows us to get the address of a kernel symbol from inside
a BPF_PROG_TYPE_SYSCALL prog (used by gen_loader), so that we can
relocate weak and typeless ksym vars.

Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
---
 include/linux/bpf.h            |  1 +
 include/uapi/linux/bpf.h       | 14 ++++++++++++++
 kernel/bpf/syscall.c           | 24 ++++++++++++++++++++++++
 tools/include/uapi/linux/bpf.h | 14 ++++++++++++++
 4 files changed, 53 insertions(+)

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index d604c8251d88..17206aae329d 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -2107,6 +2107,7 @@ extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
 extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
 extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
 extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
+extern const struct bpf_func_proto bpf_kallsyms_lookup_name_proto;
 
 const struct bpf_func_proto *tracing_prog_func_proto(
   enum bpf_func_id func_id, const struct bpf_prog *prog);
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 6fc59d61937a..bbd0a3f4e5f6 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -4909,6 +4909,19 @@ union bpf_attr {
  *	Return
  *		The number of bytes written to the buffer, or a negative error
  *		in case of failure.
+ *
+ * long bpf_kallsyms_lookup_name(const char *name, int name_sz, int flags, u64 *res)
+ *	Description
+ *		Get the address of a kernel symbol, returned in *res*. *res* is
+ *		set to 0 if the symbol is not found.
+ *	Return
+ *		On success, zero. On error, a negative value.
+ *
+ *		**-EINVAL** if *flags* is not zero.
+ *
+ *		**-EINVAL** if string *name* is not the same size as *name_sz*.
+ *
+ *		**-ENOENT** if symbol is not found.
  */
 #define __BPF_FUNC_MAPPER(FN)		\
 	FN(unspec),			\
@@ -5089,6 +5102,7 @@ union bpf_attr {
 	FN(task_pt_regs),		\
 	FN(get_branch_snapshot),	\
 	FN(trace_vprintk),		\
+	FN(kallsyms_lookup_name),	\
 	/* */
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 4e50c0bfdb7d..073ca9ebe58b 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -4753,6 +4753,28 @@ static const struct bpf_func_proto bpf_sys_close_proto = {
 	.arg1_type	= ARG_ANYTHING,
 };
 
+BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res)
+{
+	if (flags)
+		return -EINVAL;
+
+	if (name_sz <= 1 || name[name_sz - 1])
+		return -EINVAL;
+
+	*res = kallsyms_lookup_name(name);
+	return *res ? 0 : -ENOENT;
+}
+
+const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = {
+	.func		= bpf_kallsyms_lookup_name,
+	.gpl_only	= true,
+	.ret_type	= RET_INTEGER,
+	.arg1_type	= ARG_PTR_TO_MEM,
+	.arg2_type	= ARG_CONST_SIZE,
+	.arg3_type	= ARG_ANYTHING,
+	.arg4_type	= ARG_PTR_TO_LONG,
+};
+
 static const struct bpf_func_proto *
 syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 {
@@ -4763,6 +4785,8 @@ syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 		return &bpf_btf_find_by_name_kind_proto;
 	case BPF_FUNC_sys_close:
 		return &bpf_sys_close_proto;
+	case BPF_FUNC_kallsyms_lookup_name:
+		return &bpf_kallsyms_lookup_name_proto;
 	default:
 		return tracing_prog_func_proto(func_id, prog);
 	}
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 6fc59d61937a..bbd0a3f4e5f6 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -4909,6 +4909,19 @@ union bpf_attr {
  *	Return
  *		The number of bytes written to the buffer, or a negative error
  *		in case of failure.
+ *
+ * long bpf_kallsyms_lookup_name(const char *name, int name_sz, int flags, u64 *res)
+ *	Description
+ *		Get the address of a kernel symbol, returned in *res*. *res* is
+ *		set to 0 if the symbol is not found.
+ *	Return
+ *		On success, zero. On error, a negative value.
+ *
+ *		**-EINVAL** if *flags* is not zero.
+ *
+ *		**-EINVAL** if string *name* is not the same size as *name_sz*.
+ *
+ *		**-ENOENT** if symbol is not found.
  */
 #define __BPF_FUNC_MAPPER(FN)		\
 	FN(unspec),			\
@@ -5089,6 +5102,7 @@ union bpf_attr {
 	FN(task_pt_regs),		\
 	FN(get_branch_snapshot),	\
 	FN(trace_vprintk),		\
+	FN(kallsyms_lookup_name),	\
 	/* */
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
-- 
2.33.0


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* [PATCH bpf-next v1 2/6] libbpf: Add typeless and weak ksym support to gen_loader
  2021-10-06  0:28 [PATCH bpf-next v1 0/6] Typeless/weak ksym for gen_loader + misc fixups Kumar Kartikeya Dwivedi
  2021-10-06  0:28 ` [PATCH bpf-next v1 1/6] bpf: Add bpf_kallsyms_lookup_name helper Kumar Kartikeya Dwivedi
@ 2021-10-06  0:28 ` Kumar Kartikeya Dwivedi
  2021-10-07 21:45   ` Song Liu
  2021-10-06  0:28 ` [PATCH bpf-next v1 3/6] libbpf: Ensure that module BTF fd is never 0 Kumar Kartikeya Dwivedi
                   ` (3 subsequent siblings)
  5 siblings, 1 reply; 27+ messages in thread
From: Kumar Kartikeya Dwivedi @ 2021-10-06  0:28 UTC (permalink / raw)
  To: bpf
  Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Song Liu, Yonghong Song,
	Jesper Dangaard Brouer, Toke Høiland-Jørgensen, netdev

This patch adds typeless and weak ksym support to BTF_KIND_VAR
relocation code in gen_loader. For typeless ksym, we use the newly added
bpf_kallsyms_lookup_name helper.

For weak ksym, we simply skip error check, and fix up the srg_reg for
the insn, as keeping it as BPF_PSEUDO_BTF_ID for weak ksym with its
insn[0].imm and insn[1].imm set as 0 will cause a failure.  This is
consistent with how libbpf relocates these two cases of BTF_KIND_VAR.

We also modify cleanup_relos to check for typeless ksyms in fd closing
loop, since those have no fd associated with the ksym. For this we can
reuse the unused 'off' member of ksym_desc.

Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
---
 tools/lib/bpf/bpf_gen_internal.h |  12 ++-
 tools/lib/bpf/gen_loader.c       | 123 ++++++++++++++++++++++++++++---
 tools/lib/bpf/libbpf.c           |  13 ++--
 3 files changed, 128 insertions(+), 20 deletions(-)

diff --git a/tools/lib/bpf/bpf_gen_internal.h b/tools/lib/bpf/bpf_gen_internal.h
index 70eccbffefb1..9f328d044636 100644
--- a/tools/lib/bpf/bpf_gen_internal.h
+++ b/tools/lib/bpf/bpf_gen_internal.h
@@ -8,13 +8,19 @@ struct ksym_relo_desc {
 	int kind;
 	int insn_idx;
 	bool is_weak;
+	bool is_typeless;
 };
 
 struct ksym_desc {
 	const char *name;
 	int ref;
 	int kind;
-	int off;
+	union {
+		/* used for kfunc */
+		int off;
+		/* used for typeless ksym */
+		bool typeless;
+	};
 	int insn;
 };
 
@@ -49,7 +55,7 @@ void bpf_gen__prog_load(struct bpf_gen *gen, struct bpf_prog_load_params *load_a
 void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *value, __u32 value_size);
 void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx);
 void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *name, enum bpf_attach_type type);
-void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak, int kind,
-			    int insn_idx);
+void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak,
+			    bool is_typeless, int kind, int insn_idx);
 
 #endif
diff --git a/tools/lib/bpf/gen_loader.c b/tools/lib/bpf/gen_loader.c
index 937bfc7db41e..da1fcb0e3bcb 100644
--- a/tools/lib/bpf/gen_loader.c
+++ b/tools/lib/bpf/gen_loader.c
@@ -13,6 +13,7 @@
 #include "hashmap.h"
 #include "bpf_gen_internal.h"
 #include "skel_internal.h"
+#include <asm/byteorder.h>
 
 #define MAX_USED_MAPS	64
 #define MAX_USED_PROGS	32
@@ -559,7 +560,7 @@ static void emit_find_attach_target(struct bpf_gen *gen)
 }
 
 void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak,
-			    int kind, int insn_idx)
+			    bool is_typeless, int kind, int insn_idx)
 {
 	struct ksym_relo_desc *relo;
 
@@ -572,6 +573,7 @@ void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak,
 	relo += gen->relo_cnt;
 	relo->name = name;
 	relo->is_weak = is_weak;
+	relo->is_typeless = is_typeless;
 	relo->kind = kind;
 	relo->insn_idx = insn_idx;
 	gen->relo_cnt++;
@@ -621,6 +623,33 @@ static void emit_bpf_find_by_name_kind(struct bpf_gen *gen, struct ksym_relo_des
 	debug_ret(gen, "find_by_name_kind(%s,%d)", relo->name, relo->kind);
 }
 
+/* Overwrites BPF_REG_{0, 1, 2, 3, 4, 7}
+ * Returns result in BPF_REG_7
+ * Returns u64 symbol addr in BPF_REG_9
+ */
+static void emit_bpf_kallsyms_lookup_name(struct bpf_gen *gen, struct ksym_relo_desc *relo)
+{
+	int name_off, len = strlen(relo->name) + 1, res_off;
+
+	name_off = add_data(gen, relo->name, len);
+	if (!name_off)
+		return;
+	res_off = add_data(gen, NULL, 8); /* res is u64 */
+	if (!res_off)
+		return;
+	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
+					 0, 0, 0, name_off));
+	emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
+	emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0));
+	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_4, BPF_PSEUDO_MAP_IDX_VALUE,
+					 0, 0, 0, res_off));
+	emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_4));
+	emit(gen, BPF_EMIT_CALL(BPF_FUNC_kallsyms_lookup_name));
+	emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0));
+	emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
+	debug_ret(gen, "kallsyms_lookup_name(%s,%d)", relo->name, relo->kind);
+}
+
 /* Expects:
  * BPF_REG_8 - pointer to instruction
  *
@@ -703,7 +732,8 @@ static void emit_relo_kfunc_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo
 /* Expects:
  * BPF_REG_8 - pointer to instruction
  */
-static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn)
+static void emit_relo_ksym_typeless(struct bpf_gen *gen,
+				    struct ksym_relo_desc *relo, int insn)
 {
 	struct ksym_desc *kdesc;
 
@@ -718,25 +748,96 @@ static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo,
 			       kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm));
 		goto log;
 	}
+	/* remember insn offset, so we can copy ksym addr later */
+	kdesc->insn = insn;
+	/* skip typeless ksym_desc in fd closing loop in cleanup_relos */
+	kdesc->typeless = true;
+	emit_bpf_kallsyms_lookup_name(gen, relo);
+	emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_7, -ENOENT, 1));
+	emit_check_err(gen);
+	/* store lower half of addr into insn[insn_idx].imm */
+	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9, offsetof(struct bpf_insn, imm)));
+	/* store upper half of addr into insn[insn_idx + 1].imm */
+	emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32));
+	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9,
+		      sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)));
+log:
+	if (!gen->log_level)
+		return;
+	emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8,
+			      offsetof(struct bpf_insn, imm)));
+	emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8, sizeof(struct bpf_insn) +
+			      offsetof(struct bpf_insn, imm)));
+	debug_regs(gen, BPF_REG_7, BPF_REG_9, " var t=0 w=%d (%s:count=%d): imm[0]: %%d, imm[1]: %%d",
+		   relo->is_weak, relo->name, kdesc->ref);
+	emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code)));
+	debug_regs(gen, BPF_REG_9, -1, " var t=0 w=%d (%s:count=%d): insn.reg",
+		   relo->is_weak, relo->name, kdesc->ref);
+}
+
+/* Expects:
+ * BPF_REG_8 - pointer to instruction
+ */
+static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn)
+{
+	struct ksym_desc *kdesc;
+	__u32 reg_mask;
+
+	kdesc = get_ksym_desc(gen, relo);
+	if (!kdesc)
+		return;
+	/* try to copy from existing ldimm64 insn */
+	if (kdesc->ref > 1) {
+		move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
+			       kdesc->insn + offsetof(struct bpf_insn, imm));
+		move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4,
+			       kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm));
+		emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_8, offsetof(struct bpf_insn, imm)));
+		/* jump over src_reg adjustment if imm is not 0 */
+		emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 0, 3));
+		goto clear_src_reg;
+	}
 	/* remember insn offset, so we can copy BTF ID and FD later */
 	kdesc->insn = insn;
 	emit_bpf_find_by_name_kind(gen, relo);
-	emit_check_err(gen);
+	if (!relo->is_weak)
+		emit_check_err(gen);
+	/* set default values as 0 */
+	emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0));
+	emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 0));
+	/* skip success case stores if ret < 0 */
+	emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, 4));
 	/* store btf_id into insn[insn_idx].imm */
 	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm)));
 	/* store btf_obj_fd into insn[insn_idx + 1].imm */
 	emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
 	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7,
 			      sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)));
-log:
+	emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3));
+clear_src_reg:
+	/* clear bpf_object__relocate_data's src_reg assignment, otherwise we get a verifier failure */
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	reg_mask = 0x0f; /* src_reg,dst_reg,... */
+#elif defined(__BIG_ENDIAN_BITFIELD)
+	reg_mask = 0xf0; /* dst_reg,src_reg,... */
+#else
+#error "Unsupported bit endianness, cannot proceed"
+#endif
+	emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code)));
+	emit(gen, BPF_ALU32_IMM(BPF_AND, BPF_REG_9, reg_mask));
+	emit(gen, BPF_STX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, offsetofend(struct bpf_insn, code)));
+
 	if (!gen->log_level)
 		return;
 	emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8,
 			      offsetof(struct bpf_insn, imm)));
 	emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8, sizeof(struct bpf_insn) +
 			      offsetof(struct bpf_insn, imm)));
-	debug_regs(gen, BPF_REG_7, BPF_REG_9, " var (%s:count=%d): imm: %%d, fd: %%d",
-		   relo->name, kdesc->ref);
+	debug_regs(gen, BPF_REG_7, BPF_REG_9, " var t=1 w=%d (%s:count=%d): imm[0]: %%d, imm[1]: %%d",
+		   relo->is_weak, relo->name, kdesc->ref);
+	emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code)));
+	debug_regs(gen, BPF_REG_9, -1, " var t=1 w=%d (%s:count=%d): insn.reg",
+		   relo->is_weak, relo->name, kdesc->ref);
 }
 
 static void emit_relo(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insns)
@@ -748,7 +849,10 @@ static void emit_relo(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn
 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_8, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, insn));
 	switch (relo->kind) {
 	case BTF_KIND_VAR:
-		emit_relo_ksym_btf(gen, relo, insn);
+		if (relo->is_typeless)
+			emit_relo_ksym_typeless(gen, relo, insn);
+		else
+			emit_relo_ksym_btf(gen, relo, insn);
 		break;
 	case BTF_KIND_FUNC:
 		emit_relo_kfunc_btf(gen, relo, insn);
@@ -773,12 +877,13 @@ static void cleanup_relos(struct bpf_gen *gen, int insns)
 	int i, insn;
 
 	for (i = 0; i < gen->nr_ksyms; i++) {
-		if (gen->ksyms[i].kind == BTF_KIND_VAR) {
+		/* only close fds for typed ksyms and kfuncs */
+		if (gen->ksyms[i].kind == BTF_KIND_VAR && !gen->ksyms[i].typeless) {
 			/* close fd recorded in insn[insn_idx + 1].imm */
 			insn = gen->ksyms[i].insn;
 			insn += sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm);
 			emit_sys_close_blob(gen, insn);
-		} else { /* BTF_KIND_FUNC */
+		} else if (gen->ksyms[i].kind == BTF_KIND_FUNC) {
 			emit_sys_close_blob(gen, blob_fd_array_off(gen, gen->ksyms[i].off));
 			if (gen->ksyms[i].off < MAX_FD_ARRAY_SZ)
 				gen->nr_fd_array--;
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index f32fa51b1e63..d286dec73b5f 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -6355,17 +6355,14 @@ static int bpf_program__record_externs(struct bpf_program *prog)
 		case RELO_EXTERN_VAR:
 			if (ext->type != EXT_KSYM)
 				continue;
-			if (!ext->ksym.type_id) {
-				pr_warn("typeless ksym %s is not supported yet\n",
-					ext->name);
-				return -ENOTSUP;
-			}
-			bpf_gen__record_extern(obj->gen_loader, ext->name, ext->is_weak,
+			bpf_gen__record_extern(obj->gen_loader, ext->name,
+					       ext->is_weak, !ext->ksym.type_id,
 					       BTF_KIND_VAR, relo->insn_idx);
 			break;
 		case RELO_EXTERN_FUNC:
-			bpf_gen__record_extern(obj->gen_loader, ext->name, ext->is_weak,
-					       BTF_KIND_FUNC, relo->insn_idx);
+			bpf_gen__record_extern(obj->gen_loader, ext->name,
+					       ext->is_weak, 0, BTF_KIND_FUNC,
+					       relo->insn_idx);
 			break;
 		default:
 			continue;
-- 
2.33.0


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* [PATCH bpf-next v1 3/6] libbpf: Ensure that module BTF fd is never 0
  2021-10-06  0:28 [PATCH bpf-next v1 0/6] Typeless/weak ksym for gen_loader + misc fixups Kumar Kartikeya Dwivedi
  2021-10-06  0:28 ` [PATCH bpf-next v1 1/6] bpf: Add bpf_kallsyms_lookup_name helper Kumar Kartikeya Dwivedi
  2021-10-06  0:28 ` [PATCH bpf-next v1 2/6] libbpf: Add typeless and weak ksym support to gen_loader Kumar Kartikeya Dwivedi
@ 2021-10-06  0:28 ` Kumar Kartikeya Dwivedi
  2021-10-06  4:41   ` Andrii Nakryiko
  2021-10-06  0:28 ` [PATCH bpf-next v1 4/6] bpf: selftests: Move test_ksyms_weak test to lskel, add libbpf test Kumar Kartikeya Dwivedi
                   ` (2 subsequent siblings)
  5 siblings, 1 reply; 27+ messages in thread
From: Kumar Kartikeya Dwivedi @ 2021-10-06  0:28 UTC (permalink / raw)
  To: bpf
  Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Song Liu, Yonghong Song,
	Jesper Dangaard Brouer, Toke Høiland-Jørgensen, netdev

Since the code assumes in various places that BTF fd for modules is
never 0, if we end up getting fd as 0, obtain a new fd > 0. Even though
fd 0 being free for allocation is usually an application error, it is
still possible that we end up getting fd 0 if the application explicitly
closes its stdin. Deal with this by getting a new fd using dup and
closing fd 0.

Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
---
 tools/lib/bpf/libbpf.c | 14 ++++++++++++++
 1 file changed, 14 insertions(+)

diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index d286dec73b5f..3e5e460fe63e 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -4975,6 +4975,20 @@ static int load_module_btfs(struct bpf_object *obj)
 			pr_warn("failed to get BTF object #%d FD: %d\n", id, err);
 			return err;
 		}
+		/* Make sure module BTF fd is never 0, as kernel depends on it
+		 * being > 0 to distinguish between vmlinux and module BTFs,
+		 * e.g. for BPF_PSEUDO_BTF_ID ld_imm64 insns (ksyms).
+		 */
+		if (!fd) {
+			fd = dup(0);
+			if (fd < 0) {
+				err = -errno;
+				pr_warn("failed to dup BTF object #%d FD 0 to FD > 0: %d\n", id, err);
+				close(0);
+				return err;
+			}
+			close(0);
+		}
 
 		len = sizeof(info);
 		memset(&info, 0, sizeof(info));
-- 
2.33.0


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* [PATCH bpf-next v1 4/6] bpf: selftests: Move test_ksyms_weak test to lskel, add libbpf test
  2021-10-06  0:28 [PATCH bpf-next v1 0/6] Typeless/weak ksym for gen_loader + misc fixups Kumar Kartikeya Dwivedi
                   ` (2 preceding siblings ...)
  2021-10-06  0:28 ` [PATCH bpf-next v1 3/6] libbpf: Ensure that module BTF fd is never 0 Kumar Kartikeya Dwivedi
@ 2021-10-06  0:28 ` Kumar Kartikeya Dwivedi
  2021-10-07 20:33   ` Song Liu
  2021-10-06  0:28 ` [PATCH bpf-next v1 5/6] bpf: selftests: Fix fd cleanup in sk_lookup test Kumar Kartikeya Dwivedi
  2021-10-06  0:28 ` [PATCH bpf-next v1 6/6] bpf: selftests: Fix memory leak in test_ima Kumar Kartikeya Dwivedi
  5 siblings, 1 reply; 27+ messages in thread
From: Kumar Kartikeya Dwivedi @ 2021-10-06  0:28 UTC (permalink / raw)
  To: bpf
  Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Song Liu, Yonghong Song,
	Jesper Dangaard Brouer, Toke Høiland-Jørgensen, netdev

Also, avoid using CO-RE features, as lskel doesn't support CO-RE, yet.
Create a file for testing libbpf skeleton as well, so that both
gen_loader and libbpf get tested.

Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
---
 tools/testing/selftests/bpf/Makefile          |  2 +-
 .../selftests/bpf/prog_tests/ksyms_btf.c      |  6 ++--
 .../bpf/prog_tests/ksyms_weak_libbpf.c        | 31 +++++++++++++++++++
 .../selftests/bpf/progs/test_ksyms_weak.c     |  3 +-
 4 files changed, 36 insertions(+), 6 deletions(-)
 create mode 100644 tools/testing/selftests/bpf/prog_tests/ksyms_weak_libbpf.c

diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index c5c9a9f50d8d..4ae5bc852ca8 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -317,7 +317,7 @@ LINKED_SKELS := test_static_linked.skel.h linked_funcs.skel.h		\
 LSKELS := kfunc_call_test.c fentry_test.c fexit_test.c fexit_sleep.c \
 	test_ringbuf.c atomics.c trace_printk.c trace_vprintk.c
 # Generate both light skeleton and libbpf skeleton for these
-LSKELS_EXTRA := test_ksyms_module.c
+LSKELS_EXTRA := test_ksyms_module.c test_ksyms_weak.c
 SKEL_BLACKLIST += $$(LSKELS)
 
 test_static_linked.skel.h-deps := test_static_linked1.o test_static_linked2.o
diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c b/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c
index cf3acfa5a91d..21ff02d47076 100644
--- a/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c
+++ b/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c
@@ -6,7 +6,7 @@
 #include <bpf/btf.h>
 #include "test_ksyms_btf.skel.h"
 #include "test_ksyms_btf_null_check.skel.h"
-#include "test_ksyms_weak.skel.h"
+#include "test_ksyms_weak.lskel.h"
 
 static int duration;
 
@@ -89,11 +89,11 @@ static void test_weak_syms(void)
 	int err;
 
 	skel = test_ksyms_weak__open_and_load();
-	if (CHECK(!skel, "test_ksyms_weak__open_and_load", "failed\n"))
+	if (!ASSERT_OK_PTR(skel, "test_ksyms_weak__open_and_load"))
 		return;
 
 	err = test_ksyms_weak__attach(skel);
-	if (CHECK(err, "test_ksyms_weak__attach", "skeleton attach failed: %d\n", err))
+	if (!ASSERT_OK(err, "test_ksyms_weak__attach"))
 		goto cleanup;
 
 	/* trigger tracepoint */
diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms_weak_libbpf.c b/tools/testing/selftests/bpf/prog_tests/ksyms_weak_libbpf.c
new file mode 100644
index 000000000000..b75725e28647
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/ksyms_weak_libbpf.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include "test_ksyms_weak.skel.h"
+
+void test_ksyms_weak_libbpf(void)
+{
+	struct test_ksyms_weak *skel;
+	struct test_ksyms_weak__data *data;
+	int err;
+
+	skel = test_ksyms_weak__open_and_load();
+	if (!ASSERT_OK_PTR(skel, "test_ksyms_weak__open_and_load"))
+		return;
+
+	err = test_ksyms_weak__attach(skel);
+	if (!ASSERT_OK(err, "test_ksyms_weak__attach"))
+		goto cleanup;
+
+	/* trigger tracepoint */
+	usleep(1);
+
+	data = skel->data;
+	ASSERT_EQ(data->out__existing_typed, 0, "existing typed ksym");
+	ASSERT_NEQ(data->out__existing_typeless, -1, "existing typeless ksym");
+	ASSERT_EQ(data->out__non_existent_typeless, 0, "nonexistent typeless ksym");
+	ASSERT_EQ(data->out__non_existent_typed, 0, "nonexistent typed ksym");
+
+cleanup:
+	test_ksyms_weak__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/progs/test_ksyms_weak.c b/tools/testing/selftests/bpf/progs/test_ksyms_weak.c
index 5f8379aadb29..521e7b99db08 100644
--- a/tools/testing/selftests/bpf/progs/test_ksyms_weak.c
+++ b/tools/testing/selftests/bpf/progs/test_ksyms_weak.c
@@ -21,7 +21,6 @@ __u64 out__non_existent_typed = -1;
 extern const struct rq runqueues __ksym __weak; /* typed */
 extern const void bpf_prog_active __ksym __weak; /* typeless */
 
-
 /* non-existent weak symbols. */
 
 /* typeless symbols, default to zero. */
@@ -38,7 +37,7 @@ int pass_handler(const void *ctx)
 	/* tests existing symbols. */
 	rq = (struct rq *)bpf_per_cpu_ptr(&runqueues, 0);
 	if (rq)
-		out__existing_typed = rq->cpu;
+		out__existing_typed = 0;
 	out__existing_typeless = (__u64)&bpf_prog_active;
 
 	/* tests non-existent symbols. */
-- 
2.33.0


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* [PATCH bpf-next v1 5/6] bpf: selftests: Fix fd cleanup in sk_lookup test
  2021-10-06  0:28 [PATCH bpf-next v1 0/6] Typeless/weak ksym for gen_loader + misc fixups Kumar Kartikeya Dwivedi
                   ` (3 preceding siblings ...)
  2021-10-06  0:28 ` [PATCH bpf-next v1 4/6] bpf: selftests: Move test_ksyms_weak test to lskel, add libbpf test Kumar Kartikeya Dwivedi
@ 2021-10-06  0:28 ` Kumar Kartikeya Dwivedi
  2021-10-06  6:49   ` Jakub Sitnicki
  2021-10-06  0:28 ` [PATCH bpf-next v1 6/6] bpf: selftests: Fix memory leak in test_ima Kumar Kartikeya Dwivedi
  5 siblings, 1 reply; 27+ messages in thread
From: Kumar Kartikeya Dwivedi @ 2021-10-06  0:28 UTC (permalink / raw)
  To: bpf
  Cc: Jakub Sitnicki, Alexei Starovoitov, Daniel Borkmann,
	Andrii Nakryiko, Martin KaFai Lau, Song Liu, Yonghong Song,
	Jesper Dangaard Brouer, Toke Høiland-Jørgensen, netdev

Similar to the fix in commit:
e31eec77e4ab (bpf: selftests: Fix fd cleanup in get_branch_snapshot)

We use memset to set fds to -1 without breaking on future changes to
the array size (when MAX_SERVER constant is bumped).

The particular close(0) occurs on non-reuseport tests, so it can be seen
with -n 115/{2,3} but not 115/4. This can cause problems with future
tests if they depend on BTF fd never being acquired as fd 0, breaking
internal libbpf assumptions.

Cc: Jakub Sitnicki <jakub@cloudflare.com>
Fixes: 0ab5539f8584 (selftests/bpf: Tests for BPF_SK_LOOKUP attach point)
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
---
 .../selftests/bpf/prog_tests/sk_lookup.c      | 20 +++++++++++++------
 1 file changed, 14 insertions(+), 6 deletions(-)

diff --git a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c
index aee41547e7f4..572220065bdf 100644
--- a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c
+++ b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c
@@ -598,11 +598,14 @@ static void query_lookup_prog(struct test_sk_lookup *skel)
 
 static void run_lookup_prog(const struct test *t)
 {
-	int server_fds[MAX_SERVERS] = { -1 };
 	int client_fd, reuse_conn_fd = -1;
 	struct bpf_link *lookup_link;
+	int server_fds[MAX_SERVERS];
 	int i, err;
 
+	/* set all fds to -1 */
+	memset(server_fds, 0xFF, sizeof(server_fds));
+
 	lookup_link = attach_lookup_prog(t->lookup_prog);
 	if (!lookup_link)
 		return;
@@ -663,8 +666,9 @@ static void run_lookup_prog(const struct test *t)
 	if (reuse_conn_fd != -1)
 		close(reuse_conn_fd);
 	for (i = 0; i < ARRAY_SIZE(server_fds); i++) {
-		if (server_fds[i] != -1)
-			close(server_fds[i]);
+		if (server_fds[i] == -1)
+			break;
+		close(server_fds[i]);
 	}
 	bpf_link__destroy(lookup_link);
 }
@@ -1053,11 +1057,14 @@ static void run_sk_assign(struct test_sk_lookup *skel,
 			  struct bpf_program *lookup_prog,
 			  const char *remote_ip, const char *local_ip)
 {
-	int server_fds[MAX_SERVERS] = { -1 };
+	int server_fds[MAX_SERVERS];
 	struct bpf_sk_lookup ctx;
 	__u64 server_cookie;
 	int i, err;
 
+	/* set all fds to -1 */
+	memset(server_fds, 0xFF, sizeof(server_fds));
+
 	DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
 		.ctx_in = &ctx,
 		.ctx_size_in = sizeof(ctx),
@@ -1097,8 +1104,9 @@ static void run_sk_assign(struct test_sk_lookup *skel,
 
 close_servers:
 	for (i = 0; i < ARRAY_SIZE(server_fds); i++) {
-		if (server_fds[i] != -1)
-			close(server_fds[i]);
+		if (server_fds[i] == -1)
+			break;
+		close(server_fds[i]);
 	}
 }
 
-- 
2.33.0


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* [PATCH bpf-next v1 6/6] bpf: selftests: Fix memory leak in test_ima
  2021-10-06  0:28 [PATCH bpf-next v1 0/6] Typeless/weak ksym for gen_loader + misc fixups Kumar Kartikeya Dwivedi
                   ` (4 preceding siblings ...)
  2021-10-06  0:28 ` [PATCH bpf-next v1 5/6] bpf: selftests: Fix fd cleanup in sk_lookup test Kumar Kartikeya Dwivedi
@ 2021-10-06  0:28 ` Kumar Kartikeya Dwivedi
  2021-10-06  4:44   ` Andrii Nakryiko
  5 siblings, 1 reply; 27+ messages in thread
From: Kumar Kartikeya Dwivedi @ 2021-10-06  0:28 UTC (permalink / raw)
  To: bpf
  Cc: Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Song Liu, Yonghong Song,
	Jesper Dangaard Brouer, Toke Høiland-Jørgensen, netdev

The allocated ring buffer is never freed, do so in the cleanup path.

Fixes: f446b570ac7e (bpf/selftests: Update the IMA test to use BPF ring buffer)
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
---
 tools/testing/selftests/bpf/prog_tests/test_ima.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/tools/testing/selftests/bpf/prog_tests/test_ima.c b/tools/testing/selftests/bpf/prog_tests/test_ima.c
index 0252f61d611a..97d8a6f84f4a 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_ima.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_ima.c
@@ -43,7 +43,7 @@ static int process_sample(void *ctx, void *data, size_t len)
 void test_test_ima(void)
 {
 	char measured_dir_template[] = "/tmp/ima_measuredXXXXXX";
-	struct ring_buffer *ringbuf;
+	struct ring_buffer *ringbuf = NULL;
 	const char *measured_dir;
 	char cmd[256];
 
@@ -85,5 +85,6 @@ void test_test_ima(void)
 	err = system(cmd);
 	CHECK(err, "failed to run command", "%s, errno = %d\n", cmd, errno);
 close_prog:
+	ring_buffer__free(ringbuf);
 	ima__destroy(skel);
 }
-- 
2.33.0


^ permalink raw reply related	[flat|nested] 27+ messages in thread

* Re: [PATCH bpf-next v1 3/6] libbpf: Ensure that module BTF fd is never 0
  2021-10-06  0:28 ` [PATCH bpf-next v1 3/6] libbpf: Ensure that module BTF fd is never 0 Kumar Kartikeya Dwivedi
@ 2021-10-06  4:41   ` Andrii Nakryiko
  2021-10-06  5:24     ` Kumar Kartikeya Dwivedi
  0 siblings, 1 reply; 27+ messages in thread
From: Andrii Nakryiko @ 2021-10-06  4:41 UTC (permalink / raw)
  To: Kumar Kartikeya Dwivedi
  Cc: bpf, Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Song Liu, Yonghong Song,
	Jesper Dangaard Brouer, Toke Høiland-Jørgensen,
	Networking

On Tue, Oct 5, 2021 at 5:29 PM Kumar Kartikeya Dwivedi <memxor@gmail.com> wrote:
>
> Since the code assumes in various places that BTF fd for modules is
> never 0, if we end up getting fd as 0, obtain a new fd > 0. Even though
> fd 0 being free for allocation is usually an application error, it is
> still possible that we end up getting fd 0 if the application explicitly
> closes its stdin. Deal with this by getting a new fd using dup and
> closing fd 0.
>
> Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
> ---
>  tools/lib/bpf/libbpf.c | 14 ++++++++++++++
>  1 file changed, 14 insertions(+)
>
> diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> index d286dec73b5f..3e5e460fe63e 100644
> --- a/tools/lib/bpf/libbpf.c
> +++ b/tools/lib/bpf/libbpf.c
> @@ -4975,6 +4975,20 @@ static int load_module_btfs(struct bpf_object *obj)
>                         pr_warn("failed to get BTF object #%d FD: %d\n", id, err);
>                         return err;
>                 }
> +               /* Make sure module BTF fd is never 0, as kernel depends on it
> +                * being > 0 to distinguish between vmlinux and module BTFs,
> +                * e.g. for BPF_PSEUDO_BTF_ID ld_imm64 insns (ksyms).
> +                */
> +               if (!fd) {
> +                       fd = dup(0);

This is not the only place where we make assumptions that fd > 0 but
technically can get fd == 0. Instead of doing such a check in every
such place, would it be possible to open (cheaply) some FD (/dev/null
or whatever, don't know what's the best file to open), if we detect
that FD == 0 is not allocated? Can we detect that fd 0 is not
allocated?

Doing something like that in bpf_object__open() or bpf_object__load()
would make everything much simpler and we'll have a guarantee that fd
== 0 is not going to be allocated (unless someone accidentally or not
accidentally does close(0), but that's entirely different story).

> +                       if (fd < 0) {
> +                               err = -errno;
> +                               pr_warn("failed to dup BTF object #%d FD 0 to FD > 0: %d\n", id, err);
> +                               close(0);
> +                               return err;
> +                       }
> +                       close(0);
> +               }
>
>                 len = sizeof(info);
>                 memset(&info, 0, sizeof(info));
> --
> 2.33.0
>

^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [PATCH bpf-next v1 6/6] bpf: selftests: Fix memory leak in test_ima
  2021-10-06  0:28 ` [PATCH bpf-next v1 6/6] bpf: selftests: Fix memory leak in test_ima Kumar Kartikeya Dwivedi
@ 2021-10-06  4:44   ` Andrii Nakryiko
  2021-10-07 21:48     ` Song Liu
  0 siblings, 1 reply; 27+ messages in thread
From: Andrii Nakryiko @ 2021-10-06  4:44 UTC (permalink / raw)
  To: Kumar Kartikeya Dwivedi
  Cc: bpf, Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Song Liu, Yonghong Song,
	Jesper Dangaard Brouer, Toke Høiland-Jørgensen,
	Networking

On Tue, Oct 5, 2021 at 5:29 PM Kumar Kartikeya Dwivedi <memxor@gmail.com> wrote:
>
> The allocated ring buffer is never freed, do so in the cleanup path.
>
> Fixes: f446b570ac7e (bpf/selftests: Update the IMA test to use BPF ring buffer)
> Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
> ---

Please stick to "selftests/bpf: " prefix which we use consistently for
BPF selftests patches.

Other than that LGTM.

Acked-by: Andrii Nakryiko <andrii@kernel.org>

>  tools/testing/selftests/bpf/prog_tests/test_ima.c | 3 ++-
>  1 file changed, 2 insertions(+), 1 deletion(-)
>
> diff --git a/tools/testing/selftests/bpf/prog_tests/test_ima.c b/tools/testing/selftests/bpf/prog_tests/test_ima.c
> index 0252f61d611a..97d8a6f84f4a 100644
> --- a/tools/testing/selftests/bpf/prog_tests/test_ima.c
> +++ b/tools/testing/selftests/bpf/prog_tests/test_ima.c
> @@ -43,7 +43,7 @@ static int process_sample(void *ctx, void *data, size_t len)
>  void test_test_ima(void)
>  {
>         char measured_dir_template[] = "/tmp/ima_measuredXXXXXX";
> -       struct ring_buffer *ringbuf;
> +       struct ring_buffer *ringbuf = NULL;
>         const char *measured_dir;
>         char cmd[256];
>
> @@ -85,5 +85,6 @@ void test_test_ima(void)
>         err = system(cmd);
>         CHECK(err, "failed to run command", "%s, errno = %d\n", cmd, errno);
>  close_prog:
> +       ring_buffer__free(ringbuf);
>         ima__destroy(skel);
>  }
> --
> 2.33.0
>

^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [PATCH bpf-next v1 3/6] libbpf: Ensure that module BTF fd is never 0
  2021-10-06  4:41   ` Andrii Nakryiko
@ 2021-10-06  5:24     ` Kumar Kartikeya Dwivedi
  2021-10-06 16:43       ` Andrii Nakryiko
  0 siblings, 1 reply; 27+ messages in thread
From: Kumar Kartikeya Dwivedi @ 2021-10-06  5:24 UTC (permalink / raw)
  To: Andrii Nakryiko
  Cc: bpf, Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Song Liu, Yonghong Song,
	Jesper Dangaard Brouer, Toke Høiland-Jørgensen,
	Networking

On Wed, Oct 06, 2021 at 10:11:29AM IST, Andrii Nakryiko wrote:
> On Tue, Oct 5, 2021 at 5:29 PM Kumar Kartikeya Dwivedi <memxor@gmail.com> wrote:
> >
> > Since the code assumes in various places that BTF fd for modules is
> > never 0, if we end up getting fd as 0, obtain a new fd > 0. Even though
> > fd 0 being free for allocation is usually an application error, it is
> > still possible that we end up getting fd 0 if the application explicitly
> > closes its stdin. Deal with this by getting a new fd using dup and
> > closing fd 0.
> >
> > Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
> > ---
> >  tools/lib/bpf/libbpf.c | 14 ++++++++++++++
> >  1 file changed, 14 insertions(+)
> >
> > diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> > index d286dec73b5f..3e5e460fe63e 100644
> > --- a/tools/lib/bpf/libbpf.c
> > +++ b/tools/lib/bpf/libbpf.c
> > @@ -4975,6 +4975,20 @@ static int load_module_btfs(struct bpf_object *obj)
> >                         pr_warn("failed to get BTF object #%d FD: %d\n", id, err);
> >                         return err;
> >                 }
> > +               /* Make sure module BTF fd is never 0, as kernel depends on it
> > +                * being > 0 to distinguish between vmlinux and module BTFs,
> > +                * e.g. for BPF_PSEUDO_BTF_ID ld_imm64 insns (ksyms).
> > +                */
> > +               if (!fd) {
> > +                       fd = dup(0);
>
> This is not the only place where we make assumptions that fd > 0 but
> technically can get fd == 0. Instead of doing such a check in every
> such place, would it be possible to open (cheaply) some FD (/dev/null
> or whatever, don't know what's the best file to open), if we detect
> that FD == 0 is not allocated? Can we detect that fd 0 is not
> allocated?
>

We can, e.g. using access("/proc/self/fd/0", F_OK), but I think just calling
open unconditonally and doing if (ret > 0) close(ret) is better. Also, do I
leave it lingering, or should I close(0) if we created it on destroy?

> Doing something like that in bpf_object__open() or bpf_object__load()
> would make everything much simpler and we'll have a guarantee that fd
> == 0 is not going to be allocated (unless someone accidentally or not
> accidentally does close(0), but that's entirely different story).
>
> > +                       if (fd < 0) {
> > +                               err = -errno;
> > +                               pr_warn("failed to dup BTF object #%d FD 0 to FD > 0: %d\n", id, err);
> > +                               close(0);
> > +                               return err;
> > +                       }
> > +                       close(0);
> > +               }
> >
> >                 len = sizeof(info);
> >                 memset(&info, 0, sizeof(info));
> > --
> > 2.33.0
> >

--
Kartikeya

^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [PATCH bpf-next v1 5/6] bpf: selftests: Fix fd cleanup in sk_lookup test
  2021-10-06  0:28 ` [PATCH bpf-next v1 5/6] bpf: selftests: Fix fd cleanup in sk_lookup test Kumar Kartikeya Dwivedi
@ 2021-10-06  6:49   ` Jakub Sitnicki
  2021-10-07 21:48     ` Song Liu
  0 siblings, 1 reply; 27+ messages in thread
From: Jakub Sitnicki @ 2021-10-06  6:49 UTC (permalink / raw)
  To: Kumar Kartikeya Dwivedi
  Cc: bpf, Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Song Liu, Yonghong Song,
	Jesper Dangaard Brouer, Toke Høiland-Jørgensen, netdev

On Wed, Oct 06, 2021 at 02:28 AM CEST, Kumar Kartikeya Dwivedi wrote:
> Similar to the fix in commit:
> e31eec77e4ab (bpf: selftests: Fix fd cleanup in get_branch_snapshot)
>
> We use memset to set fds to -1 without breaking on future changes to
> the array size (when MAX_SERVER constant is bumped).
>
> The particular close(0) occurs on non-reuseport tests, so it can be seen
> with -n 115/{2,3} but not 115/4. This can cause problems with future
> tests if they depend on BTF fd never being acquired as fd 0, breaking
> internal libbpf assumptions.
>
> Cc: Jakub Sitnicki <jakub@cloudflare.com>
> Fixes: 0ab5539f8584 (selftests/bpf: Tests for BPF_SK_LOOKUP attach point)
> Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
> ---
>  .../selftests/bpf/prog_tests/sk_lookup.c      | 20 +++++++++++++------
>  1 file changed, 14 insertions(+), 6 deletions(-)
>
> diff --git a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c
> index aee41547e7f4..572220065bdf 100644
> --- a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c
> +++ b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c
> @@ -598,11 +598,14 @@ static void query_lookup_prog(struct test_sk_lookup *skel)
>
>  static void run_lookup_prog(const struct test *t)
>  {
> -	int server_fds[MAX_SERVERS] = { -1 };
>  	int client_fd, reuse_conn_fd = -1;
>  	struct bpf_link *lookup_link;
> +	int server_fds[MAX_SERVERS];
>  	int i, err;
>
> +	/* set all fds to -1 */
> +	memset(server_fds, 0xFF, sizeof(server_fds));
> +
>  	lookup_link = attach_lookup_prog(t->lookup_prog);
>  	if (!lookup_link)
>  		return;
> @@ -663,8 +666,9 @@ static void run_lookup_prog(const struct test *t)
>  	if (reuse_conn_fd != -1)
>  		close(reuse_conn_fd);
>  	for (i = 0; i < ARRAY_SIZE(server_fds); i++) {
> -		if (server_fds[i] != -1)
> -			close(server_fds[i]);
> +		if (server_fds[i] == -1)
> +			break;
> +		close(server_fds[i]);
>  	}
>  	bpf_link__destroy(lookup_link);
>  }
> @@ -1053,11 +1057,14 @@ static void run_sk_assign(struct test_sk_lookup *skel,
>  			  struct bpf_program *lookup_prog,
>  			  const char *remote_ip, const char *local_ip)
>  {
> -	int server_fds[MAX_SERVERS] = { -1 };
> +	int server_fds[MAX_SERVERS];
>  	struct bpf_sk_lookup ctx;
>  	__u64 server_cookie;
>  	int i, err;
>
> +	/* set all fds to -1 */
> +	memset(server_fds, 0xFF, sizeof(server_fds));
> +
>  	DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
>  		.ctx_in = &ctx,
>  		.ctx_size_in = sizeof(ctx),
> @@ -1097,8 +1104,9 @@ static void run_sk_assign(struct test_sk_lookup *skel,
>
>  close_servers:
>  	for (i = 0; i < ARRAY_SIZE(server_fds); i++) {
> -		if (server_fds[i] != -1)
> -			close(server_fds[i]);
> +		if (server_fds[i] == -1)
> +			break;
> +		close(server_fds[i]);
>  	}
>  }

My mistake. Thanks for the fix.

Alternatively we can use an extended designated initializer:

int server_fds[] = { [0 ... MAX_SERVERS] = -1 };

Reviewed-by: Jakub Sitnicki <jakub@cloudflare.com>

^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [PATCH bpf-next v1 3/6] libbpf: Ensure that module BTF fd is never 0
  2021-10-06  5:24     ` Kumar Kartikeya Dwivedi
@ 2021-10-06 16:43       ` Andrii Nakryiko
  2021-10-06 19:09         ` Alexei Starovoitov
  0 siblings, 1 reply; 27+ messages in thread
From: Andrii Nakryiko @ 2021-10-06 16:43 UTC (permalink / raw)
  To: Kumar Kartikeya Dwivedi
  Cc: bpf, Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Song Liu, Yonghong Song,
	Jesper Dangaard Brouer, Toke Høiland-Jørgensen,
	Networking

On Tue, Oct 5, 2021 at 10:24 PM Kumar Kartikeya Dwivedi
<memxor@gmail.com> wrote:
>
> On Wed, Oct 06, 2021 at 10:11:29AM IST, Andrii Nakryiko wrote:
> > On Tue, Oct 5, 2021 at 5:29 PM Kumar Kartikeya Dwivedi <memxor@gmail.com> wrote:
> > >
> > > Since the code assumes in various places that BTF fd for modules is
> > > never 0, if we end up getting fd as 0, obtain a new fd > 0. Even though
> > > fd 0 being free for allocation is usually an application error, it is
> > > still possible that we end up getting fd 0 if the application explicitly
> > > closes its stdin. Deal with this by getting a new fd using dup and
> > > closing fd 0.
> > >
> > > Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
> > > ---
> > >  tools/lib/bpf/libbpf.c | 14 ++++++++++++++
> > >  1 file changed, 14 insertions(+)
> > >
> > > diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> > > index d286dec73b5f..3e5e460fe63e 100644
> > > --- a/tools/lib/bpf/libbpf.c
> > > +++ b/tools/lib/bpf/libbpf.c
> > > @@ -4975,6 +4975,20 @@ static int load_module_btfs(struct bpf_object *obj)
> > >                         pr_warn("failed to get BTF object #%d FD: %d\n", id, err);
> > >                         return err;
> > >                 }
> > > +               /* Make sure module BTF fd is never 0, as kernel depends on it
> > > +                * being > 0 to distinguish between vmlinux and module BTFs,
> > > +                * e.g. for BPF_PSEUDO_BTF_ID ld_imm64 insns (ksyms).
> > > +                */
> > > +               if (!fd) {
> > > +                       fd = dup(0);
> >
> > This is not the only place where we make assumptions that fd > 0 but
> > technically can get fd == 0. Instead of doing such a check in every
> > such place, would it be possible to open (cheaply) some FD (/dev/null
> > or whatever, don't know what's the best file to open), if we detect
> > that FD == 0 is not allocated? Can we detect that fd 0 is not
> > allocated?
> >
>
> We can, e.g. using access("/proc/self/fd/0", F_OK), but I think just calling
> open unconditonally and doing if (ret > 0) close(ret) is better. Also, do I

yeah, I like this idea, let's go with it

> leave it lingering, or should I close(0) if we created it on destroy?

I don't mind leaving it open indefinitely, but can you please check
that it doesn't trigger LeakSanitizer errors?

>
> > Doing something like that in bpf_object__open() or bpf_object__load()
> > would make everything much simpler and we'll have a guarantee that fd
> > == 0 is not going to be allocated (unless someone accidentally or not
> > accidentally does close(0), but that's entirely different story).
> >
> > > +                       if (fd < 0) {
> > > +                               err = -errno;
> > > +                               pr_warn("failed to dup BTF object #%d FD 0 to FD > 0: %d\n", id, err);
> > > +                               close(0);
> > > +                               return err;
> > > +                       }
> > > +                       close(0);
> > > +               }
> > >
> > >                 len = sizeof(info);
> > >                 memset(&info, 0, sizeof(info));
> > > --
> > > 2.33.0
> > >
>
> --
> Kartikeya

^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [PATCH bpf-next v1 3/6] libbpf: Ensure that module BTF fd is never 0
  2021-10-06 16:43       ` Andrii Nakryiko
@ 2021-10-06 19:09         ` Alexei Starovoitov
  2021-10-06 19:38           ` Andrii Nakryiko
  0 siblings, 1 reply; 27+ messages in thread
From: Alexei Starovoitov @ 2021-10-06 19:09 UTC (permalink / raw)
  To: Andrii Nakryiko
  Cc: Kumar Kartikeya Dwivedi, bpf, Alexei Starovoitov,
	Daniel Borkmann, Andrii Nakryiko, Martin KaFai Lau, Song Liu,
	Yonghong Song, Jesper Dangaard Brouer,
	Toke Høiland-Jørgensen, Networking

On Wed, Oct 6, 2021 at 9:43 AM Andrii Nakryiko
<andrii.nakryiko@gmail.com> wrote:
>
> On Tue, Oct 5, 2021 at 10:24 PM Kumar Kartikeya Dwivedi
> <memxor@gmail.com> wrote:
> >
> > On Wed, Oct 06, 2021 at 10:11:29AM IST, Andrii Nakryiko wrote:
> > > On Tue, Oct 5, 2021 at 5:29 PM Kumar Kartikeya Dwivedi <memxor@gmail.com> wrote:
> > > >
> > > > Since the code assumes in various places that BTF fd for modules is
> > > > never 0, if we end up getting fd as 0, obtain a new fd > 0. Even though
> > > > fd 0 being free for allocation is usually an application error, it is
> > > > still possible that we end up getting fd 0 if the application explicitly
> > > > closes its stdin. Deal with this by getting a new fd using dup and
> > > > closing fd 0.
> > > >
> > > > Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
> > > > ---
> > > >  tools/lib/bpf/libbpf.c | 14 ++++++++++++++
> > > >  1 file changed, 14 insertions(+)
> > > >
> > > > diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> > > > index d286dec73b5f..3e5e460fe63e 100644
> > > > --- a/tools/lib/bpf/libbpf.c
> > > > +++ b/tools/lib/bpf/libbpf.c
> > > > @@ -4975,6 +4975,20 @@ static int load_module_btfs(struct bpf_object *obj)
> > > >                         pr_warn("failed to get BTF object #%d FD: %d\n", id, err);
> > > >                         return err;
> > > >                 }
> > > > +               /* Make sure module BTF fd is never 0, as kernel depends on it
> > > > +                * being > 0 to distinguish between vmlinux and module BTFs,
> > > > +                * e.g. for BPF_PSEUDO_BTF_ID ld_imm64 insns (ksyms).
> > > > +                */
> > > > +               if (!fd) {
> > > > +                       fd = dup(0);
> > >
> > > This is not the only place where we make assumptions that fd > 0 but
> > > technically can get fd == 0. Instead of doing such a check in every
> > > such place, would it be possible to open (cheaply) some FD (/dev/null
> > > or whatever, don't know what's the best file to open), if we detect
> > > that FD == 0 is not allocated? Can we detect that fd 0 is not
> > > allocated?
> > >
> >
> > We can, e.g. using access("/proc/self/fd/0", F_OK), but I think just calling
> > open unconditonally and doing if (ret > 0) close(ret) is better. Also, do I
>
> yeah, I like this idea, let's go with it

FYI some production environments may detect that FDs 0,1,2 are not
pointing to stdin, stdout, stderr and will force close whatever files are there
and open 0,1,2 with canonical files.

libbpf doesn't have to resort to such measures, but it would be prudent to
make libbpf operate on FDs > 2 for all bpf objects to make sure other
frameworks don't ruin libbpf's view of FDs.

^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [PATCH bpf-next v1 3/6] libbpf: Ensure that module BTF fd is never 0
  2021-10-06 19:09         ` Alexei Starovoitov
@ 2021-10-06 19:38           ` Andrii Nakryiko
  2021-10-07 10:24             ` Toke Høiland-Jørgensen
  0 siblings, 1 reply; 27+ messages in thread
From: Andrii Nakryiko @ 2021-10-06 19:38 UTC (permalink / raw)
  To: Alexei Starovoitov
  Cc: Kumar Kartikeya Dwivedi, bpf, Alexei Starovoitov,
	Daniel Borkmann, Andrii Nakryiko, Martin KaFai Lau, Song Liu,
	Yonghong Song, Jesper Dangaard Brouer,
	Toke Høiland-Jørgensen, Networking

On Wed, Oct 6, 2021 at 12:09 PM Alexei Starovoitov
<alexei.starovoitov@gmail.com> wrote:
>
> On Wed, Oct 6, 2021 at 9:43 AM Andrii Nakryiko
> <andrii.nakryiko@gmail.com> wrote:
> >
> > On Tue, Oct 5, 2021 at 10:24 PM Kumar Kartikeya Dwivedi
> > <memxor@gmail.com> wrote:
> > >
> > > On Wed, Oct 06, 2021 at 10:11:29AM IST, Andrii Nakryiko wrote:
> > > > On Tue, Oct 5, 2021 at 5:29 PM Kumar Kartikeya Dwivedi <memxor@gmail.com> wrote:
> > > > >
> > > > > Since the code assumes in various places that BTF fd for modules is
> > > > > never 0, if we end up getting fd as 0, obtain a new fd > 0. Even though
> > > > > fd 0 being free for allocation is usually an application error, it is
> > > > > still possible that we end up getting fd 0 if the application explicitly
> > > > > closes its stdin. Deal with this by getting a new fd using dup and
> > > > > closing fd 0.
> > > > >
> > > > > Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
> > > > > ---
> > > > >  tools/lib/bpf/libbpf.c | 14 ++++++++++++++
> > > > >  1 file changed, 14 insertions(+)
> > > > >
> > > > > diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> > > > > index d286dec73b5f..3e5e460fe63e 100644
> > > > > --- a/tools/lib/bpf/libbpf.c
> > > > > +++ b/tools/lib/bpf/libbpf.c
> > > > > @@ -4975,6 +4975,20 @@ static int load_module_btfs(struct bpf_object *obj)
> > > > >                         pr_warn("failed to get BTF object #%d FD: %d\n", id, err);
> > > > >                         return err;
> > > > >                 }
> > > > > +               /* Make sure module BTF fd is never 0, as kernel depends on it
> > > > > +                * being > 0 to distinguish between vmlinux and module BTFs,
> > > > > +                * e.g. for BPF_PSEUDO_BTF_ID ld_imm64 insns (ksyms).
> > > > > +                */
> > > > > +               if (!fd) {
> > > > > +                       fd = dup(0);
> > > >
> > > > This is not the only place where we make assumptions that fd > 0 but
> > > > technically can get fd == 0. Instead of doing such a check in every
> > > > such place, would it be possible to open (cheaply) some FD (/dev/null
> > > > or whatever, don't know what's the best file to open), if we detect
> > > > that FD == 0 is not allocated? Can we detect that fd 0 is not
> > > > allocated?
> > > >
> > >
> > > We can, e.g. using access("/proc/self/fd/0", F_OK), but I think just calling
> > > open unconditonally and doing if (ret > 0) close(ret) is better. Also, do I
> >
> > yeah, I like this idea, let's go with it
>
> FYI some production environments may detect that FDs 0,1,2 are not
> pointing to stdin, stdout, stderr and will force close whatever files are there
> and open 0,1,2 with canonical files.
>
> libbpf doesn't have to resort to such measures, but it would be prudent to
> make libbpf operate on FDs > 2 for all bpf objects to make sure other
> frameworks don't ruin libbpf's view of FDs.

oh well, even without those production complications this would be a
bit fragile, e.g., if the application temporarily opened FD 0 and then
closed it.

Ok, Kumar, can you please do it as a simple helper that would
dup()'ing until we have FD>2, and use it in as few places as possible
to make sure that all FDs (not just module BTF) are covered. I'd
suggest doing that only in low-level helpers in btf.c, I think
libbpf's logic always goes through those anyways (but please
double-check that we don't call bpf syscall directly anywhere else).

^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [PATCH bpf-next v1 3/6] libbpf: Ensure that module BTF fd is never 0
  2021-10-06 19:38           ` Andrii Nakryiko
@ 2021-10-07 10:24             ` Toke Høiland-Jørgensen
  2021-10-07 18:44               ` Kumar Kartikeya Dwivedi
  0 siblings, 1 reply; 27+ messages in thread
From: Toke Høiland-Jørgensen @ 2021-10-07 10:24 UTC (permalink / raw)
  To: Andrii Nakryiko, Alexei Starovoitov
  Cc: Kumar Kartikeya Dwivedi, bpf, Alexei Starovoitov,
	Daniel Borkmann, Andrii Nakryiko, Martin KaFai Lau, Song Liu,
	Yonghong Song, Jesper Dangaard Brouer, Networking

Andrii Nakryiko <andrii.nakryiko@gmail.com> writes:

> On Wed, Oct 6, 2021 at 12:09 PM Alexei Starovoitov
> <alexei.starovoitov@gmail.com> wrote:
>>
>> On Wed, Oct 6, 2021 at 9:43 AM Andrii Nakryiko
>> <andrii.nakryiko@gmail.com> wrote:
>> >
>> > On Tue, Oct 5, 2021 at 10:24 PM Kumar Kartikeya Dwivedi
>> > <memxor@gmail.com> wrote:
>> > >
>> > > On Wed, Oct 06, 2021 at 10:11:29AM IST, Andrii Nakryiko wrote:
>> > > > On Tue, Oct 5, 2021 at 5:29 PM Kumar Kartikeya Dwivedi <memxor@gmail.com> wrote:
>> > > > >
>> > > > > Since the code assumes in various places that BTF fd for modules is
>> > > > > never 0, if we end up getting fd as 0, obtain a new fd > 0. Even though
>> > > > > fd 0 being free for allocation is usually an application error, it is
>> > > > > still possible that we end up getting fd 0 if the application explicitly
>> > > > > closes its stdin. Deal with this by getting a new fd using dup and
>> > > > > closing fd 0.
>> > > > >
>> > > > > Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
>> > > > > ---
>> > > > >  tools/lib/bpf/libbpf.c | 14 ++++++++++++++
>> > > > >  1 file changed, 14 insertions(+)
>> > > > >
>> > > > > diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
>> > > > > index d286dec73b5f..3e5e460fe63e 100644
>> > > > > --- a/tools/lib/bpf/libbpf.c
>> > > > > +++ b/tools/lib/bpf/libbpf.c
>> > > > > @@ -4975,6 +4975,20 @@ static int load_module_btfs(struct bpf_object *obj)
>> > > > >                         pr_warn("failed to get BTF object #%d FD: %d\n", id, err);
>> > > > >                         return err;
>> > > > >                 }
>> > > > > +               /* Make sure module BTF fd is never 0, as kernel depends on it
>> > > > > +                * being > 0 to distinguish between vmlinux and module BTFs,
>> > > > > +                * e.g. for BPF_PSEUDO_BTF_ID ld_imm64 insns (ksyms).
>> > > > > +                */
>> > > > > +               if (!fd) {
>> > > > > +                       fd = dup(0);
>> > > >
>> > > > This is not the only place where we make assumptions that fd > 0 but
>> > > > technically can get fd == 0. Instead of doing such a check in every
>> > > > such place, would it be possible to open (cheaply) some FD (/dev/null
>> > > > or whatever, don't know what's the best file to open), if we detect
>> > > > that FD == 0 is not allocated? Can we detect that fd 0 is not
>> > > > allocated?
>> > > >
>> > >
>> > > We can, e.g. using access("/proc/self/fd/0", F_OK), but I think just calling
>> > > open unconditonally and doing if (ret > 0) close(ret) is better. Also, do I
>> >
>> > yeah, I like this idea, let's go with it
>>
>> FYI some production environments may detect that FDs 0,1,2 are not
>> pointing to stdin, stdout, stderr and will force close whatever files are there
>> and open 0,1,2 with canonical files.
>>
>> libbpf doesn't have to resort to such measures, but it would be prudent to
>> make libbpf operate on FDs > 2 for all bpf objects to make sure other
>> frameworks don't ruin libbpf's view of FDs.
>
> oh well, even without those production complications this would be a
> bit fragile, e.g., if the application temporarily opened FD 0 and then
> closed it.
>
> Ok, Kumar, can you please do it as a simple helper that would
> dup()'ing until we have FD>2, and use it in as few places as possible
> to make sure that all FDs (not just module BTF) are covered. I'd
> suggest doing that only in low-level helpers in btf.c, I think
> libbpf's logic always goes through those anyways (but please
> double-check that we don't call bpf syscall directly anywhere else).

FYI, you can use fcntl() with F_DUPFD{,_CLOEXEC} and tell it the minimum
fd number you're interested in for the clone. We do that in libxdp to
protect against fd 0:

https://github.com/xdp-project/xdp-tools/blob/master/lib/libxdp/libxdp.c#L1184

Given Alexei's comments above, maybe we should be '3' for the last arg
instead of 1...

-Toke


^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [PATCH bpf-next v1 3/6] libbpf: Ensure that module BTF fd is never 0
  2021-10-07 10:24             ` Toke Høiland-Jørgensen
@ 2021-10-07 18:44               ` Kumar Kartikeya Dwivedi
  2021-10-07 21:29                 ` Andrii Nakryiko
  0 siblings, 1 reply; 27+ messages in thread
From: Kumar Kartikeya Dwivedi @ 2021-10-07 18:44 UTC (permalink / raw)
  To: Toke Høiland-Jørgensen
  Cc: Andrii Nakryiko, Alexei Starovoitov, bpf, Alexei Starovoitov,
	Daniel Borkmann, Andrii Nakryiko, Martin KaFai Lau, Song Liu,
	Yonghong Song, Jesper Dangaard Brouer, Networking

On Thu, Oct 07, 2021 at 03:54:34PM IST, Toke Høiland-Jørgensen wrote:
> Andrii Nakryiko <andrii.nakryiko@gmail.com> writes:
>
> > On Wed, Oct 6, 2021 at 12:09 PM Alexei Starovoitov
> > <alexei.starovoitov@gmail.com> wrote:
> >>
> >> On Wed, Oct 6, 2021 at 9:43 AM Andrii Nakryiko
> >> <andrii.nakryiko@gmail.com> wrote:
> >> >
> >> > On Tue, Oct 5, 2021 at 10:24 PM Kumar Kartikeya Dwivedi
> >> > <memxor@gmail.com> wrote:
> >> > >
> >> > > On Wed, Oct 06, 2021 at 10:11:29AM IST, Andrii Nakryiko wrote:
> >> > > > On Tue, Oct 5, 2021 at 5:29 PM Kumar Kartikeya Dwivedi <memxor@gmail.com> wrote:
> >> > > > >
> >> > > > > Since the code assumes in various places that BTF fd for modules is
> >> > > > > never 0, if we end up getting fd as 0, obtain a new fd > 0. Even though
> >> > > > > fd 0 being free for allocation is usually an application error, it is
> >> > > > > still possible that we end up getting fd 0 if the application explicitly
> >> > > > > closes its stdin. Deal with this by getting a new fd using dup and
> >> > > > > closing fd 0.
> >> > > > >
> >> > > > > Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
> >> > > > > ---
> >> > > > >  tools/lib/bpf/libbpf.c | 14 ++++++++++++++
> >> > > > >  1 file changed, 14 insertions(+)
> >> > > > >
> >> > > > > diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> >> > > > > index d286dec73b5f..3e5e460fe63e 100644
> >> > > > > --- a/tools/lib/bpf/libbpf.c
> >> > > > > +++ b/tools/lib/bpf/libbpf.c
> >> > > > > @@ -4975,6 +4975,20 @@ static int load_module_btfs(struct bpf_object *obj)
> >> > > > >                         pr_warn("failed to get BTF object #%d FD: %d\n", id, err);
> >> > > > >                         return err;
> >> > > > >                 }
> >> > > > > +               /* Make sure module BTF fd is never 0, as kernel depends on it
> >> > > > > +                * being > 0 to distinguish between vmlinux and module BTFs,
> >> > > > > +                * e.g. for BPF_PSEUDO_BTF_ID ld_imm64 insns (ksyms).
> >> > > > > +                */
> >> > > > > +               if (!fd) {
> >> > > > > +                       fd = dup(0);
> >> > > >
> >> > > > This is not the only place where we make assumptions that fd > 0 but
> >> > > > technically can get fd == 0. Instead of doing such a check in every
> >> > > > such place, would it be possible to open (cheaply) some FD (/dev/null
> >> > > > or whatever, don't know what's the best file to open), if we detect
> >> > > > that FD == 0 is not allocated? Can we detect that fd 0 is not
> >> > > > allocated?
> >> > > >
> >> > >
> >> > > We can, e.g. using access("/proc/self/fd/0", F_OK), but I think just calling
> >> > > open unconditonally and doing if (ret > 0) close(ret) is better. Also, do I
> >> >
> >> > yeah, I like this idea, let's go with it
> >>
> >> FYI some production environments may detect that FDs 0,1,2 are not
> >> pointing to stdin, stdout, stderr and will force close whatever files are there
> >> and open 0,1,2 with canonical files.
> >>
> >> libbpf doesn't have to resort to such measures, but it would be prudent to
> >> make libbpf operate on FDs > 2 for all bpf objects to make sure other
> >> frameworks don't ruin libbpf's view of FDs.
> >
> > oh well, even without those production complications this would be a
> > bit fragile, e.g., if the application temporarily opened FD 0 and then
> > closed it.
> >
> > Ok, Kumar, can you please do it as a simple helper that would
> > dup()'ing until we have FD>2, and use it in as few places as possible
> > to make sure that all FDs (not just module BTF) are covered. I'd
> > suggest doing that only in low-level helpers in btf.c, I think
> > libbpf's logic always goes through those anyways (but please
> > double-check that we don't call bpf syscall directly anywhere else).
>

Just to make sure I am on the same page:

I have to...
1. Add a small wrapper (currently named fd_gt_2, any other suggestions?)
   that takes in the fd, and dups it to fd >= 3 if in range [0, 2] (and closes
   original fd in this case).
   Use this for all fd returning bpf syscalls in bpf.c (btf.c is a typo?).
   Audit other places directly calling syscall(__NR_bpf, ...).
2. Assume that the situation Alexei mentioned only occurs at startup, or
   sometime later, not in parallel (which would race with us, and not sure
   we can deal with it). I'm thinking of a case where such an fd gets passed
   to an exec'd binary which closes invalids fds on startup (so keeping them
   >= 3 allows proper inheritance).
3. gen_loader can hit the same case, so short of adding a bpf_sys_fcntl (or the
   helper only exposing F_DUPFD), next best option is to reserve the three fds from
   skel_internal.h or gen_trace (in bpftool) and close later after loading is done.

Feedback needed on 3 (and whether a generic bpf_sys_dup providing functionality of
existing fcntl and dup{,2,3} is better than simply reserving the three fds at
load time).

> FYI, you can use fcntl() with F_DUPFD{,_CLOEXEC} and tell it the minimum
> fd number you're interested in for the clone. We do that in libxdp to
> protect against fd 0:
>

Thanks, will switch the dup to fcntl in the next version.

> https://github.com/xdp-project/xdp-tools/blob/master/lib/libxdp/libxdp.c#L1184
>
> Given Alexei's comments above, maybe we should be '3' for the last arg
> instead of 1...
>
> -Toke
>

--
Kartikeya

^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [PATCH bpf-next v1 1/6] bpf: Add bpf_kallsyms_lookup_name helper
  2021-10-06  0:28 ` [PATCH bpf-next v1 1/6] bpf: Add bpf_kallsyms_lookup_name helper Kumar Kartikeya Dwivedi
@ 2021-10-07 20:23   ` Song Liu
  0 siblings, 0 replies; 27+ messages in thread
From: Song Liu @ 2021-10-07 20:23 UTC (permalink / raw)
  To: Kumar Kartikeya Dwivedi
  Cc: bpf, Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Song Liu, Yonghong Song,
	Jesper Dangaard Brouer, Toke Høiland-Jørgensen,
	Networking

On Tue, Oct 5, 2021 at 5:29 PM Kumar Kartikeya Dwivedi <memxor@gmail.com> wrote:
>
> This helper allows us to get the address of a kernel symbol from inside
> a BPF_PROG_TYPE_SYSCALL prog (used by gen_loader), so that we can
> relocate weak and typeless ksym vars.
>
> Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>

LGTM.

Acked-by: Song Liu <songliubraving@fb.com>

> ---
>  include/linux/bpf.h            |  1 +
>  include/uapi/linux/bpf.h       | 14 ++++++++++++++
>  kernel/bpf/syscall.c           | 24 ++++++++++++++++++++++++
>  tools/include/uapi/linux/bpf.h | 14 ++++++++++++++
>  4 files changed, 53 insertions(+)
>
> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> index d604c8251d88..17206aae329d 100644
> --- a/include/linux/bpf.h
> +++ b/include/linux/bpf.h
> @@ -2107,6 +2107,7 @@ extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
>  extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
>  extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
>  extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
> +extern const struct bpf_func_proto bpf_kallsyms_lookup_name_proto;
>
>  const struct bpf_func_proto *tracing_prog_func_proto(
>    enum bpf_func_id func_id, const struct bpf_prog *prog);
> diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
> index 6fc59d61937a..bbd0a3f4e5f6 100644
> --- a/include/uapi/linux/bpf.h
> +++ b/include/uapi/linux/bpf.h
> @@ -4909,6 +4909,19 @@ union bpf_attr {
>   *     Return
>   *             The number of bytes written to the buffer, or a negative error
>   *             in case of failure.
> + *
> + * long bpf_kallsyms_lookup_name(const char *name, int name_sz, int flags, u64 *res)
> + *     Description
> + *             Get the address of a kernel symbol, returned in *res*. *res* is
> + *             set to 0 if the symbol is not found.
> + *     Return
> + *             On success, zero. On error, a negative value.
> + *
> + *             **-EINVAL** if *flags* is not zero.
> + *
> + *             **-EINVAL** if string *name* is not the same size as *name_sz*.
> + *
> + *             **-ENOENT** if symbol is not found.
>   */
>  #define __BPF_FUNC_MAPPER(FN)          \
>         FN(unspec),                     \
> @@ -5089,6 +5102,7 @@ union bpf_attr {
>         FN(task_pt_regs),               \
>         FN(get_branch_snapshot),        \
>         FN(trace_vprintk),              \
> +       FN(kallsyms_lookup_name),       \
>         /* */
>
>  /* integer value in 'imm' field of BPF_CALL instruction selects which helper
> diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
> index 4e50c0bfdb7d..073ca9ebe58b 100644
> --- a/kernel/bpf/syscall.c
> +++ b/kernel/bpf/syscall.c
> @@ -4753,6 +4753,28 @@ static const struct bpf_func_proto bpf_sys_close_proto = {
>         .arg1_type      = ARG_ANYTHING,
>  };
>
> +BPF_CALL_4(bpf_kallsyms_lookup_name, const char *, name, int, name_sz, int, flags, u64 *, res)
> +{
> +       if (flags)
> +               return -EINVAL;
> +
> +       if (name_sz <= 1 || name[name_sz - 1])
> +               return -EINVAL;
> +
> +       *res = kallsyms_lookup_name(name);
> +       return *res ? 0 : -ENOENT;
> +}
> +
> +const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = {
> +       .func           = bpf_kallsyms_lookup_name,
> +       .gpl_only       = true,
> +       .ret_type       = RET_INTEGER,
> +       .arg1_type      = ARG_PTR_TO_MEM,
> +       .arg2_type      = ARG_CONST_SIZE,
> +       .arg3_type      = ARG_ANYTHING,
> +       .arg4_type      = ARG_PTR_TO_LONG,
> +};
> +
>  static const struct bpf_func_proto *
>  syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
>  {
> @@ -4763,6 +4785,8 @@ syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
>                 return &bpf_btf_find_by_name_kind_proto;
>         case BPF_FUNC_sys_close:
>                 return &bpf_sys_close_proto;
> +       case BPF_FUNC_kallsyms_lookup_name:
> +               return &bpf_kallsyms_lookup_name_proto;
>         default:
>                 return tracing_prog_func_proto(func_id, prog);
>         }
> diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
> index 6fc59d61937a..bbd0a3f4e5f6 100644
> --- a/tools/include/uapi/linux/bpf.h
> +++ b/tools/include/uapi/linux/bpf.h
> @@ -4909,6 +4909,19 @@ union bpf_attr {
>   *     Return
>   *             The number of bytes written to the buffer, or a negative error
>   *             in case of failure.
> + *
> + * long bpf_kallsyms_lookup_name(const char *name, int name_sz, int flags, u64 *res)
> + *     Description
> + *             Get the address of a kernel symbol, returned in *res*. *res* is
> + *             set to 0 if the symbol is not found.
> + *     Return
> + *             On success, zero. On error, a negative value.
> + *
> + *             **-EINVAL** if *flags* is not zero.
> + *
> + *             **-EINVAL** if string *name* is not the same size as *name_sz*.
> + *
> + *             **-ENOENT** if symbol is not found.
>   */
>  #define __BPF_FUNC_MAPPER(FN)          \
>         FN(unspec),                     \
> @@ -5089,6 +5102,7 @@ union bpf_attr {
>         FN(task_pt_regs),               \
>         FN(get_branch_snapshot),        \
>         FN(trace_vprintk),              \
> +       FN(kallsyms_lookup_name),       \
>         /* */
>
>  /* integer value in 'imm' field of BPF_CALL instruction selects which helper
> --
> 2.33.0
>

^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [PATCH bpf-next v1 4/6] bpf: selftests: Move test_ksyms_weak test to lskel, add libbpf test
  2021-10-06  0:28 ` [PATCH bpf-next v1 4/6] bpf: selftests: Move test_ksyms_weak test to lskel, add libbpf test Kumar Kartikeya Dwivedi
@ 2021-10-07 20:33   ` Song Liu
  2021-10-07 20:46     ` Kumar Kartikeya Dwivedi
  0 siblings, 1 reply; 27+ messages in thread
From: Song Liu @ 2021-10-07 20:33 UTC (permalink / raw)
  To: Kumar Kartikeya Dwivedi
  Cc: bpf, Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Song Liu, Yonghong Song,
	Jesper Dangaard Brouer, Toke Høiland-Jørgensen,
	Networking

On Tue, Oct 5, 2021 at 5:29 PM Kumar Kartikeya Dwivedi <memxor@gmail.com> wrote:
>
> Also, avoid using CO-RE features, as lskel doesn't support CO-RE, yet.
> Create a file for testing libbpf skeleton as well, so that both
> gen_loader and libbpf get tested.
>
> Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
[...]
> diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms_weak_libbpf.c b/tools/testing/selftests/bpf/prog_tests/ksyms_weak_libbpf.c
> new file mode 100644
> index 000000000000..b75725e28647
> --- /dev/null
> +++ b/tools/testing/selftests/bpf/prog_tests/ksyms_weak_libbpf.c
> @@ -0,0 +1,31 @@
> +// SPDX-License-Identifier: GPL-2.0
> +
> +#include <test_progs.h>
> +#include "test_ksyms_weak.skel.h"
> +
> +void test_ksyms_weak_libbpf(void)

This is (almost?) the same as test_weak_syms(), right? Why do we need both?

> +{
> +       struct test_ksyms_weak *skel;
> +       struct test_ksyms_weak__data *data;
> +       int err;
> +
> +       skel = test_ksyms_weak__open_and_load();
> +       if (!ASSERT_OK_PTR(skel, "test_ksyms_weak__open_and_load"))
> +               return;

[...]

> diff --git a/tools/testing/selftests/bpf/progs/test_ksyms_weak.c b/tools/testing/selftests/bpf/progs/test_ksyms_weak.c
> index 5f8379aadb29..521e7b99db08 100644
> --- a/tools/testing/selftests/bpf/progs/test_ksyms_weak.c
> +++ b/tools/testing/selftests/bpf/progs/test_ksyms_weak.c
> @@ -21,7 +21,6 @@ __u64 out__non_existent_typed = -1;
>  extern const struct rq runqueues __ksym __weak; /* typed */
>  extern const void bpf_prog_active __ksym __weak; /* typeless */
>
> -
>  /* non-existent weak symbols. */
>
>  /* typeless symbols, default to zero. */
> @@ -38,7 +37,7 @@ int pass_handler(const void *ctx)
>         /* tests existing symbols. */
>         rq = (struct rq *)bpf_per_cpu_ptr(&runqueues, 0);
>         if (rq)
> -               out__existing_typed = rq->cpu;
> +               out__existing_typed = 0;

Why do we need this change?

>         out__existing_typeless = (__u64)&bpf_prog_active;
>
>         /* tests non-existent symbols. */
> --
> 2.33.0
>

^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [PATCH bpf-next v1 4/6] bpf: selftests: Move test_ksyms_weak test to lskel, add libbpf test
  2021-10-07 20:33   ` Song Liu
@ 2021-10-07 20:46     ` Kumar Kartikeya Dwivedi
  2021-10-07 20:55       ` Kumar Kartikeya Dwivedi
  2021-10-07 20:57       ` Song Liu
  0 siblings, 2 replies; 27+ messages in thread
From: Kumar Kartikeya Dwivedi @ 2021-10-07 20:46 UTC (permalink / raw)
  To: Song Liu
  Cc: bpf, Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Song Liu, Yonghong Song,
	Jesper Dangaard Brouer, Toke Høiland-Jørgensen,
	Networking

On Fri, Oct 08, 2021 at 02:03:49AM IST, Song Liu wrote:
> On Tue, Oct 5, 2021 at 5:29 PM Kumar Kartikeya Dwivedi <memxor@gmail.com> wrote:
> >
> > Also, avoid using CO-RE features, as lskel doesn't support CO-RE, yet.
> > Create a file for testing libbpf skeleton as well, so that both
> > gen_loader and libbpf get tested.
> >
> > Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
> [...]
> > diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms_weak_libbpf.c b/tools/testing/selftests/bpf/prog_tests/ksyms_weak_libbpf.c
> > new file mode 100644
> > index 000000000000..b75725e28647
> > --- /dev/null
> > +++ b/tools/testing/selftests/bpf/prog_tests/ksyms_weak_libbpf.c
> > @@ -0,0 +1,31 @@
> > +// SPDX-License-Identifier: GPL-2.0
> > +
> > +#include <test_progs.h>
> > +#include "test_ksyms_weak.skel.h"
> > +
> > +void test_ksyms_weak_libbpf(void)
>
> This is (almost?) the same as test_weak_syms(), right? Why do we need both?
>

One includes lskel.h (light skeleton), the other includes skel.h (libbpf
skeleton). Trying to include both in the same file, it ends up redefining the
same struct. I am not sure whether adding a prefix/suffix to light skeleton
struct names is possible now, maybe through another option to bpftool in
addition to name?

> > +{
> > +       struct test_ksyms_weak *skel;
> > +       struct test_ksyms_weak__data *data;
> > +       int err;
> > +
> > +       skel = test_ksyms_weak__open_and_load();
> > +       if (!ASSERT_OK_PTR(skel, "test_ksyms_weak__open_and_load"))
> > +               return;
>
> [...]
>
> > diff --git a/tools/testing/selftests/bpf/progs/test_ksyms_weak.c b/tools/testing/selftests/bpf/progs/test_ksyms_weak.c
> > index 5f8379aadb29..521e7b99db08 100644
> > --- a/tools/testing/selftests/bpf/progs/test_ksyms_weak.c
> > +++ b/tools/testing/selftests/bpf/progs/test_ksyms_weak.c
> > @@ -21,7 +21,6 @@ __u64 out__non_existent_typed = -1;
> >  extern const struct rq runqueues __ksym __weak; /* typed */
> >  extern const void bpf_prog_active __ksym __weak; /* typeless */
> >
> > -
> >  /* non-existent weak symbols. */
> >
> >  /* typeless symbols, default to zero. */
> > @@ -38,7 +37,7 @@ int pass_handler(const void *ctx)
> >         /* tests existing symbols. */
> >         rq = (struct rq *)bpf_per_cpu_ptr(&runqueues, 0);
> >         if (rq)
> > -               out__existing_typed = rq->cpu;
> > +               out__existing_typed = 0;
>
> Why do we need this change?
>

Since they share the same BPF object for generating skeleton, it needs to remove
dependency on CO-RE which gen_loader does not support.

If it is kept, we get this:
...
libbpf: // TODO core_relo: prog 0 insn[5] rq kind 0
libbpf: prog 'pass_handler': relo #0: failed to relocate: -95
libbpf: failed to perform CO-RE relocations: -95
libbpf: failed to load object 'test_ksyms_weak'
...
> >         out__existing_typeless = (__u64)&bpf_prog_active;
> >
> >         /* tests non-existent symbols. */
> > --
> > 2.33.0
> >

--
Kartikeya

^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [PATCH bpf-next v1 4/6] bpf: selftests: Move test_ksyms_weak test to lskel, add libbpf test
  2021-10-07 20:46     ` Kumar Kartikeya Dwivedi
@ 2021-10-07 20:55       ` Kumar Kartikeya Dwivedi
  2021-10-07 20:57       ` Song Liu
  1 sibling, 0 replies; 27+ messages in thread
From: Kumar Kartikeya Dwivedi @ 2021-10-07 20:55 UTC (permalink / raw)
  To: Song Liu
  Cc: bpf, Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Song Liu, Yonghong Song,
	Jesper Dangaard Brouer, Toke Høiland-Jørgensen,
	Networking

On Fri, Oct 08, 2021 at 02:16:09AM IST, Kumar Kartikeya Dwivedi wrote:
> On Fri, Oct 08, 2021 at 02:03:49AM IST, Song Liu wrote:
> > On Tue, Oct 5, 2021 at 5:29 PM Kumar Kartikeya Dwivedi <memxor@gmail.com> wrote:
> > >
> > > Also, avoid using CO-RE features, as lskel doesn't support CO-RE, yet.
> > > Create a file for testing libbpf skeleton as well, so that both
> > > gen_loader and libbpf get tested.
> > >
> > > Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
> > [...]
> > > diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms_weak_libbpf.c b/tools/testing/selftests/bpf/prog_tests/ksyms_weak_libbpf.c
> > > new file mode 100644
> > > index 000000000000..b75725e28647
> > > --- /dev/null
> > > +++ b/tools/testing/selftests/bpf/prog_tests/ksyms_weak_libbpf.c
> > > @@ -0,0 +1,31 @@
> > > +// SPDX-License-Identifier: GPL-2.0
> > > +
> > > +#include <test_progs.h>
> > > +#include "test_ksyms_weak.skel.h"
> > > +
> > > +void test_ksyms_weak_libbpf(void)
> >
> > This is (almost?) the same as test_weak_syms(), right? Why do we need both?
> >
>
> One includes lskel.h (light skeleton), the other includes skel.h (libbpf
> skeleton). Trying to include both in the same file, it ends up redefining the
> same struct. I am not sure whether adding a prefix/suffix to light skeleton
> struct names is possible now, maybe through another option to bpftool in
> addition to name?

Sorry, I misremembered. The name option is enough, it is because of how I did it
in the Makefile (using LSKELS_EXTRA).  I'll fix this in the next spin.

> [...]

--
Kartikeya

^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [PATCH bpf-next v1 4/6] bpf: selftests: Move test_ksyms_weak test to lskel, add libbpf test
  2021-10-07 20:46     ` Kumar Kartikeya Dwivedi
  2021-10-07 20:55       ` Kumar Kartikeya Dwivedi
@ 2021-10-07 20:57       ` Song Liu
  1 sibling, 0 replies; 27+ messages in thread
From: Song Liu @ 2021-10-07 20:57 UTC (permalink / raw)
  To: Kumar Kartikeya Dwivedi
  Cc: bpf, Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Song Liu, Yonghong Song,
	Jesper Dangaard Brouer, Toke Høiland-Jørgensen,
	Networking

On Thu, Oct 7, 2021 at 1:46 PM Kumar Kartikeya Dwivedi <memxor@gmail.com> wrote:
>
[...]
> > >
> > >  /* typeless symbols, default to zero. */
> > > @@ -38,7 +37,7 @@ int pass_handler(const void *ctx)
> > >         /* tests existing symbols. */
> > >         rq = (struct rq *)bpf_per_cpu_ptr(&runqueues, 0);
> > >         if (rq)
> > > -               out__existing_typed = rq->cpu;
> > > +               out__existing_typed = 0;
> >
> > Why do we need this change?
> >
>
> Since they share the same BPF object for generating skeleton, it needs to remove
> dependency on CO-RE which gen_loader does not support.
>
> If it is kept, we get this:
> ...
> libbpf: // TODO core_relo: prog 0 insn[5] rq kind 0
> libbpf: prog 'pass_handler': relo #0: failed to relocate: -95
> libbpf: failed to perform CO-RE relocations: -95
> libbpf: failed to load object 'test_ksyms_weak'

I see. Thanks for the explanation.

Song

^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [PATCH bpf-next v1 3/6] libbpf: Ensure that module BTF fd is never 0
  2021-10-07 18:44               ` Kumar Kartikeya Dwivedi
@ 2021-10-07 21:29                 ` Andrii Nakryiko
  0 siblings, 0 replies; 27+ messages in thread
From: Andrii Nakryiko @ 2021-10-07 21:29 UTC (permalink / raw)
  To: Kumar Kartikeya Dwivedi
  Cc: Toke Høiland-Jørgensen, Alexei Starovoitov, bpf,
	Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Song Liu, Yonghong Song,
	Jesper Dangaard Brouer, Networking

On Thu, Oct 7, 2021 at 11:44 AM Kumar Kartikeya Dwivedi
<memxor@gmail.com> wrote:
>
> On Thu, Oct 07, 2021 at 03:54:34PM IST, Toke Høiland-Jørgensen wrote:
> > Andrii Nakryiko <andrii.nakryiko@gmail.com> writes:
> >
> > > On Wed, Oct 6, 2021 at 12:09 PM Alexei Starovoitov
> > > <alexei.starovoitov@gmail.com> wrote:
> > >>
> > >> On Wed, Oct 6, 2021 at 9:43 AM Andrii Nakryiko
> > >> <andrii.nakryiko@gmail.com> wrote:
> > >> >
> > >> > On Tue, Oct 5, 2021 at 10:24 PM Kumar Kartikeya Dwivedi
> > >> > <memxor@gmail.com> wrote:
> > >> > >
> > >> > > On Wed, Oct 06, 2021 at 10:11:29AM IST, Andrii Nakryiko wrote:
> > >> > > > On Tue, Oct 5, 2021 at 5:29 PM Kumar Kartikeya Dwivedi <memxor@gmail.com> wrote:
> > >> > > > >
> > >> > > > > Since the code assumes in various places that BTF fd for modules is
> > >> > > > > never 0, if we end up getting fd as 0, obtain a new fd > 0. Even though
> > >> > > > > fd 0 being free for allocation is usually an application error, it is
> > >> > > > > still possible that we end up getting fd 0 if the application explicitly
> > >> > > > > closes its stdin. Deal with this by getting a new fd using dup and
> > >> > > > > closing fd 0.
> > >> > > > >
> > >> > > > > Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
> > >> > > > > ---
> > >> > > > >  tools/lib/bpf/libbpf.c | 14 ++++++++++++++
> > >> > > > >  1 file changed, 14 insertions(+)
> > >> > > > >
> > >> > > > > diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> > >> > > > > index d286dec73b5f..3e5e460fe63e 100644
> > >> > > > > --- a/tools/lib/bpf/libbpf.c
> > >> > > > > +++ b/tools/lib/bpf/libbpf.c
> > >> > > > > @@ -4975,6 +4975,20 @@ static int load_module_btfs(struct bpf_object *obj)
> > >> > > > >                         pr_warn("failed to get BTF object #%d FD: %d\n", id, err);
> > >> > > > >                         return err;
> > >> > > > >                 }
> > >> > > > > +               /* Make sure module BTF fd is never 0, as kernel depends on it
> > >> > > > > +                * being > 0 to distinguish between vmlinux and module BTFs,
> > >> > > > > +                * e.g. for BPF_PSEUDO_BTF_ID ld_imm64 insns (ksyms).
> > >> > > > > +                */
> > >> > > > > +               if (!fd) {
> > >> > > > > +                       fd = dup(0);
> > >> > > >
> > >> > > > This is not the only place where we make assumptions that fd > 0 but
> > >> > > > technically can get fd == 0. Instead of doing such a check in every
> > >> > > > such place, would it be possible to open (cheaply) some FD (/dev/null
> > >> > > > or whatever, don't know what's the best file to open), if we detect
> > >> > > > that FD == 0 is not allocated? Can we detect that fd 0 is not
> > >> > > > allocated?
> > >> > > >
> > >> > >
> > >> > > We can, e.g. using access("/proc/self/fd/0", F_OK), but I think just calling
> > >> > > open unconditonally and doing if (ret > 0) close(ret) is better. Also, do I
> > >> >
> > >> > yeah, I like this idea, let's go with it
> > >>
> > >> FYI some production environments may detect that FDs 0,1,2 are not
> > >> pointing to stdin, stdout, stderr and will force close whatever files are there
> > >> and open 0,1,2 with canonical files.
> > >>
> > >> libbpf doesn't have to resort to such measures, but it would be prudent to
> > >> make libbpf operate on FDs > 2 for all bpf objects to make sure other
> > >> frameworks don't ruin libbpf's view of FDs.
> > >
> > > oh well, even without those production complications this would be a
> > > bit fragile, e.g., if the application temporarily opened FD 0 and then
> > > closed it.
> > >
> > > Ok, Kumar, can you please do it as a simple helper that would
> > > dup()'ing until we have FD>2, and use it in as few places as possible
> > > to make sure that all FDs (not just module BTF) are covered. I'd
> > > suggest doing that only in low-level helpers in btf.c, I think
> > > libbpf's logic always goes through those anyways (but please
> > > double-check that we don't call bpf syscall directly anywhere else).
> >
>
> Just to make sure I am on the same page:
>
> I have to...
> 1. Add a small wrapper (currently named fd_gt_2, any other suggestions?)

ensure_good_fd() or something? 2 is a tiny detail there.

>    that takes in the fd, and dups it to fd >= 3 if in range [0, 2] (and closes
>    original fd in this case).
>    Use this for all fd returning bpf syscalls in bpf.c (btf.c is a typo?).

yep, typo, I meant bpf.c

>    Audit other places directly calling syscall(__NR_bpf, ...).

yep

> 2. Assume that the situation Alexei mentioned only occurs at startup, or
>    sometime later, not in parallel (which would race with us, and not sure
>    we can deal with it). I'm thinking of a case where such an fd gets passed
>    to an exec'd binary which closes invalids fds on startup (so keeping them
>    >= 3 allows proper inheritance).

with checking it next to syscall(__NR_bpf) and fcntl suggestion from
Toke, why does it matter?

> 3. gen_loader can hit the same case, so short of adding a bpf_sys_fcntl (or the
>    helper only exposing F_DUPFD), next best option is to reserve the three fds from
>    skel_internal.h or gen_trace (in bpftool) and close later after loading is done.

Not sure, will defer to Alexei.

>
> Feedback needed on 3 (and whether a generic bpf_sys_dup providing functionality of
> existing fcntl and dup{,2,3} is better than simply reserving the three fds at
> load time).
>
> > FYI, you can use fcntl() with F_DUPFD{,_CLOEXEC} and tell it the minimum
> > fd number you're interested in for the clone. We do that in libxdp to
> > protect against fd 0:
> >
>
> Thanks, will switch the dup to fcntl in the next version.
>
> > https://github.com/xdp-project/xdp-tools/blob/master/lib/libxdp/libxdp.c#L1184
> >
> > Given Alexei's comments above, maybe we should be '3' for the last arg
> > instead of 1...
> >
> > -Toke
> >
>
> --
> Kartikeya

^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [PATCH bpf-next v1 2/6] libbpf: Add typeless and weak ksym support to gen_loader
  2021-10-06  0:28 ` [PATCH bpf-next v1 2/6] libbpf: Add typeless and weak ksym support to gen_loader Kumar Kartikeya Dwivedi
@ 2021-10-07 21:45   ` Song Liu
  2021-10-07 22:01     ` Kumar Kartikeya Dwivedi
  0 siblings, 1 reply; 27+ messages in thread
From: Song Liu @ 2021-10-07 21:45 UTC (permalink / raw)
  To: Kumar Kartikeya Dwivedi
  Cc: bpf, Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Song Liu, Yonghong Song,
	Jesper Dangaard Brouer, Toke Høiland-Jørgensen,
	Networking

On Tue, Oct 5, 2021 at 5:29 PM Kumar Kartikeya Dwivedi <memxor@gmail.com> wrote:
>
> This patch adds typeless and weak ksym support to BTF_KIND_VAR
> relocation code in gen_loader. For typeless ksym, we use the newly added
> bpf_kallsyms_lookup_name helper.
>
> For weak ksym, we simply skip error check, and fix up the srg_reg for
> the insn, as keeping it as BPF_PSEUDO_BTF_ID for weak ksym with its
> insn[0].imm and insn[1].imm set as 0 will cause a failure.  This is
> consistent with how libbpf relocates these two cases of BTF_KIND_VAR.
>
> We also modify cleanup_relos to check for typeless ksyms in fd closing
> loop, since those have no fd associated with the ksym. For this we can
> reuse the unused 'off' member of ksym_desc.
>
> Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
[...]

Everything above (trimmed) makes sense to me.

> +/* Expects:
> + * BPF_REG_8 - pointer to instruction
> + */
> +static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn)
> +{

But I don't quite follow why we need these changes to emit_relo_ksym_btf.
Maybe we should have these changes in a separate patch and add some
more explanations?

Thanks,
Song

[...]

^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [PATCH bpf-next v1 5/6] bpf: selftests: Fix fd cleanup in sk_lookup test
  2021-10-06  6:49   ` Jakub Sitnicki
@ 2021-10-07 21:48     ` Song Liu
  0 siblings, 0 replies; 27+ messages in thread
From: Song Liu @ 2021-10-07 21:48 UTC (permalink / raw)
  To: Jakub Sitnicki
  Cc: Kumar Kartikeya Dwivedi, bpf, Alexei Starovoitov,
	Daniel Borkmann, Andrii Nakryiko, Martin KaFai Lau, Song Liu,
	Yonghong Song, Jesper Dangaard Brouer,
	Toke Høiland-Jørgensen, Networking

On Tue, Oct 5, 2021 at 11:50 PM Jakub Sitnicki <jakub@cloudflare.com> wrote:
>
> On Wed, Oct 06, 2021 at 02:28 AM CEST, Kumar Kartikeya Dwivedi wrote:
> > Similar to the fix in commit:
> > e31eec77e4ab (bpf: selftests: Fix fd cleanup in get_branch_snapshot)
> >
> > We use memset to set fds to -1 without breaking on future changes to
> > the array size (when MAX_SERVER constant is bumped).
> >
> > The particular close(0) occurs on non-reuseport tests, so it can be seen
> > with -n 115/{2,3} but not 115/4. This can cause problems with future
> > tests if they depend on BTF fd never being acquired as fd 0, breaking
> > internal libbpf assumptions.
> >
> > Cc: Jakub Sitnicki <jakub@cloudflare.com>
> > Fixes: 0ab5539f8584 (selftests/bpf: Tests for BPF_SK_LOOKUP attach point)

Similar to Andrii's comment in 6/6, we need add " to the Fixes tag.

> > Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>

Acked-by: Song Liu <songliubraving@fb.com>

>
> Reviewed-by: Jakub Sitnicki <jakub@cloudflare.com>

^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [PATCH bpf-next v1 6/6] bpf: selftests: Fix memory leak in test_ima
  2021-10-06  4:44   ` Andrii Nakryiko
@ 2021-10-07 21:48     ` Song Liu
  0 siblings, 0 replies; 27+ messages in thread
From: Song Liu @ 2021-10-07 21:48 UTC (permalink / raw)
  To: Andrii Nakryiko
  Cc: Kumar Kartikeya Dwivedi, bpf, Alexei Starovoitov,
	Daniel Borkmann, Andrii Nakryiko, Martin KaFai Lau, Song Liu,
	Yonghong Song, Jesper Dangaard Brouer,
	Toke Høiland-Jørgensen, Networking

On Tue, Oct 5, 2021 at 9:46 PM Andrii Nakryiko
<andrii.nakryiko@gmail.com> wrote:
>
> On Tue, Oct 5, 2021 at 5:29 PM Kumar Kartikeya Dwivedi <memxor@gmail.com> wrote:
> >
> > The allocated ring buffer is never freed, do so in the cleanup path.
> >
> > Fixes: f446b570ac7e (bpf/selftests: Update the IMA test to use BPF ring buffer)
> > Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
> > ---
>
> Please stick to "selftests/bpf: " prefix which we use consistently for
> BPF selftests patches.
>
> Other than that LGTM.
>
> Acked-by: Andrii Nakryiko <andrii@kernel.org>

Acked-by: Song Liu <songliubraving@fb.com>

^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [PATCH bpf-next v1 2/6] libbpf: Add typeless and weak ksym support to gen_loader
  2021-10-07 21:45   ` Song Liu
@ 2021-10-07 22:01     ` Kumar Kartikeya Dwivedi
  2021-10-07 22:17       ` Song Liu
  0 siblings, 1 reply; 27+ messages in thread
From: Kumar Kartikeya Dwivedi @ 2021-10-07 22:01 UTC (permalink / raw)
  To: Song Liu
  Cc: bpf, Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Song Liu, Yonghong Song,
	Jesper Dangaard Brouer, Toke Høiland-Jørgensen,
	Networking

On Fri, Oct 08, 2021 at 03:15:10AM IST, Song Liu wrote:
> On Tue, Oct 5, 2021 at 5:29 PM Kumar Kartikeya Dwivedi <memxor@gmail.com> wrote:
> >
> > This patch adds typeless and weak ksym support to BTF_KIND_VAR
> > relocation code in gen_loader. For typeless ksym, we use the newly added
> > bpf_kallsyms_lookup_name helper.
> >
> > For weak ksym, we simply skip error check, and fix up the srg_reg for
> > the insn, as keeping it as BPF_PSEUDO_BTF_ID for weak ksym with its
> > insn[0].imm and insn[1].imm set as 0 will cause a failure.  This is
> > consistent with how libbpf relocates these two cases of BTF_KIND_VAR.
> >
> > We also modify cleanup_relos to check for typeless ksyms in fd closing
> > loop, since those have no fd associated with the ksym. For this we can
> > reuse the unused 'off' member of ksym_desc.
> >
> > Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
> [...]
>
> Everything above (trimmed) makes sense to me.
>
> > +/* Expects:
> > + * BPF_REG_8 - pointer to instruction
> > + */
> > +static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn)
> > +{
>
> But I don't quite follow why we need these changes to emit_relo_ksym_btf.
> Maybe we should have these changes in a separate patch and add some
> more explanations?
>

Before, if the bpf_btf_find_by_name_kind call failed, we just bailed out due to
the emit_check_err. Now, if it is weak, the error check is conditional, so
we set 0 as the default values and skip the store for btf_id and btf_fd if the
btf lookup failed. Till here, it is similar to the case for emit_relo_kfunc_btf.

Note that we only reach this path once for each unique symbol: the next time, we
enter the kdesc->ref > 1 branch, which copies from the existing insn.

Regarding src_reg stuff: in bpf_object__relocate_data, for obj->gen_loader,
ext->is_set is always true. For the normal libbpf case, it is only true if the
lookup succeeded for BTF (in bpf_object__resolve_ksym_var_btf_id). So depending
on if ext->is_set, it skips assigning BPF_PSEUDO_BTF_ID to src_reg and zeroes
out insn[0].imm and insn[1].imm. Also, the case for ext->is_set = false for
libbpf is only reached if we don't fail on lookup error, and that depends on
ext->is_weak. TLDR; ext->is_weak and lookup failure means src_reg is not
assigned.

For gen_loader, since this src_reg assignment is always there, we need to clear
it for the case where lookup failed, hence the:
-log:
+	emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3));

otherwise we end up with src_reg = BPF_PSEUDO_BTF_ID, imm[0] = 0, imm[1] = 0,
which ends up failing the load.

Similarly, we jump over the src_reg adjustment from the kdesc->ref > 1 case if
imm is not equal to 0 (if it were 0, then this is weak ksym). Error check
ensures this instruction is only reached if relo->is_weak (for the same symbol),
so we don't need to check it again there.

Doing it the other way around (not assigning BPF_PSEUDO_BTF_ID by default for
gen_loader) would still involve writing to it in the success case, so IMO
touching it seems unavoidable. If there are better ideas, please lmk.

I added the debug statements so that the selftest reloc result can be inspected
easily, but not sure I can/should verify it from the selftest itself.  I'll
split typeless and weak ksym support into separate patches next time, and
explain this in the commit message.

> Thanks,
> Song
>
> [...]

--
Kartikeya

^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [PATCH bpf-next v1 2/6] libbpf: Add typeless and weak ksym support to gen_loader
  2021-10-07 22:01     ` Kumar Kartikeya Dwivedi
@ 2021-10-07 22:17       ` Song Liu
  0 siblings, 0 replies; 27+ messages in thread
From: Song Liu @ 2021-10-07 22:17 UTC (permalink / raw)
  To: Kumar Kartikeya Dwivedi
  Cc: bpf, Alexei Starovoitov, Daniel Borkmann, Andrii Nakryiko,
	Martin KaFai Lau, Song Liu, Yonghong Song,
	Jesper Dangaard Brouer, Toke Høiland-Jørgensen,
	Networking

On Thu, Oct 7, 2021 at 3:01 PM Kumar Kartikeya Dwivedi <memxor@gmail.com> wrote:
>
> On Fri, Oct 08, 2021 at 03:15:10AM IST, Song Liu wrote:
> > On Tue, Oct 5, 2021 at 5:29 PM Kumar Kartikeya Dwivedi <memxor@gmail.com> wrote:
> > >
> > > This patch adds typeless and weak ksym support to BTF_KIND_VAR
> > > relocation code in gen_loader. For typeless ksym, we use the newly added
> > > bpf_kallsyms_lookup_name helper.
> > >
> > > For weak ksym, we simply skip error check, and fix up the srg_reg for
> > > the insn, as keeping it as BPF_PSEUDO_BTF_ID for weak ksym with its
> > > insn[0].imm and insn[1].imm set as 0 will cause a failure.  This is
> > > consistent with how libbpf relocates these two cases of BTF_KIND_VAR.
> > >
> > > We also modify cleanup_relos to check for typeless ksyms in fd closing
> > > loop, since those have no fd associated with the ksym. For this we can
> > > reuse the unused 'off' member of ksym_desc.
> > >
> > > Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
> > [...]
> >
> > Everything above (trimmed) makes sense to me.
> >
> > > +/* Expects:
> > > + * BPF_REG_8 - pointer to instruction
> > > + */
> > > +static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn)
> > > +{
> >
> > But I don't quite follow why we need these changes to emit_relo_ksym_btf.
> > Maybe we should have these changes in a separate patch and add some
> > more explanations?
> >
>
> Before, if the bpf_btf_find_by_name_kind call failed, we just bailed out due to
> the emit_check_err. Now, if it is weak, the error check is conditional, so
> we set 0 as the default values and skip the store for btf_id and btf_fd if the
> btf lookup failed. Till here, it is similar to the case for emit_relo_kfunc_btf.
>
> Note that we only reach this path once for each unique symbol: the next time, we
> enter the kdesc->ref > 1 branch, which copies from the existing insn.
>
> Regarding src_reg stuff: in bpf_object__relocate_data, for obj->gen_loader,
> ext->is_set is always true. For the normal libbpf case, it is only true if the
> lookup succeeded for BTF (in bpf_object__resolve_ksym_var_btf_id). So depending
> on if ext->is_set, it skips assigning BPF_PSEUDO_BTF_ID to src_reg and zeroes
> out insn[0].imm and insn[1].imm. Also, the case for ext->is_set = false for
> libbpf is only reached if we don't fail on lookup error, and that depends on
> ext->is_weak. TLDR; ext->is_weak and lookup failure means src_reg is not
> assigned.
>
> For gen_loader, since this src_reg assignment is always there, we need to clear
> it for the case where lookup failed, hence the:
> -log:
> +       emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3));
>
> otherwise we end up with src_reg = BPF_PSEUDO_BTF_ID, imm[0] = 0, imm[1] = 0,
> which ends up failing the load.
>
> Similarly, we jump over the src_reg adjustment from the kdesc->ref > 1 case if
> imm is not equal to 0 (if it were 0, then this is weak ksym). Error check
> ensures this instruction is only reached if relo->is_weak (for the same symbol),
> so we don't need to check it again there.
>
> Doing it the other way around (not assigning BPF_PSEUDO_BTF_ID by default for
> gen_loader) would still involve writing to it in the success case, so IMO
> touching it seems unavoidable. If there are better ideas, please lmk.
>
> I added the debug statements so that the selftest reloc result can be inspected
> easily, but not sure I can/should verify it from the selftest itself.  I'll
> split typeless and weak ksym support into separate patches next time, and
> explain this in the commit message.

Thanks for these explanations.It will be helpful to include them in
the commit log.

Song

^ permalink raw reply	[flat|nested] 27+ messages in thread

end of thread, other threads:[~2021-10-07 22:18 UTC | newest]

Thread overview: 27+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-10-06  0:28 [PATCH bpf-next v1 0/6] Typeless/weak ksym for gen_loader + misc fixups Kumar Kartikeya Dwivedi
2021-10-06  0:28 ` [PATCH bpf-next v1 1/6] bpf: Add bpf_kallsyms_lookup_name helper Kumar Kartikeya Dwivedi
2021-10-07 20:23   ` Song Liu
2021-10-06  0:28 ` [PATCH bpf-next v1 2/6] libbpf: Add typeless and weak ksym support to gen_loader Kumar Kartikeya Dwivedi
2021-10-07 21:45   ` Song Liu
2021-10-07 22:01     ` Kumar Kartikeya Dwivedi
2021-10-07 22:17       ` Song Liu
2021-10-06  0:28 ` [PATCH bpf-next v1 3/6] libbpf: Ensure that module BTF fd is never 0 Kumar Kartikeya Dwivedi
2021-10-06  4:41   ` Andrii Nakryiko
2021-10-06  5:24     ` Kumar Kartikeya Dwivedi
2021-10-06 16:43       ` Andrii Nakryiko
2021-10-06 19:09         ` Alexei Starovoitov
2021-10-06 19:38           ` Andrii Nakryiko
2021-10-07 10:24             ` Toke Høiland-Jørgensen
2021-10-07 18:44               ` Kumar Kartikeya Dwivedi
2021-10-07 21:29                 ` Andrii Nakryiko
2021-10-06  0:28 ` [PATCH bpf-next v1 4/6] bpf: selftests: Move test_ksyms_weak test to lskel, add libbpf test Kumar Kartikeya Dwivedi
2021-10-07 20:33   ` Song Liu
2021-10-07 20:46     ` Kumar Kartikeya Dwivedi
2021-10-07 20:55       ` Kumar Kartikeya Dwivedi
2021-10-07 20:57       ` Song Liu
2021-10-06  0:28 ` [PATCH bpf-next v1 5/6] bpf: selftests: Fix fd cleanup in sk_lookup test Kumar Kartikeya Dwivedi
2021-10-06  6:49   ` Jakub Sitnicki
2021-10-07 21:48     ` Song Liu
2021-10-06  0:28 ` [PATCH bpf-next v1 6/6] bpf: selftests: Fix memory leak in test_ima Kumar Kartikeya Dwivedi
2021-10-06  4:44   ` Andrii Nakryiko
2021-10-07 21:48     ` Song Liu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).