All of lore.kernel.org
 help / color / mirror / Atom feed
From: Kumar Kartikeya Dwivedi <memxor@gmail.com>
To: bpf@vger.kernel.org
Cc: Alexei Starovoitov <ast@kernel.org>,
	Andrii Nakryiko <andrii@kernel.org>,
	Daniel Borkmann <daniel@iogearbox.net>,
	Dave Marchevsky <davemarchevsky@fb.com>,
	Delyan Kratunov <delyank@fb.com>
Subject: [PATCH RFC bpf-next v1 17/32] bpf: Support bpf_list_node in local kptrs
Date: Sun,  4 Sep 2022 22:41:30 +0200	[thread overview]
Message-ID: <20220904204145.3089-18-memxor@gmail.com> (raw)
In-Reply-To: <20220904204145.3089-1-memxor@gmail.com>

To allow a user to link their kptr allocated node into a linked list, we
must have a linked list node type that is recognized by the verifier fit
for this purpose. Its name and offset will be matched with the
specification on the bpf_list_head it is being added to. This would
allow precise verification and type safety in BPF programs.

Since bpf_list_node does not correspond to local type, but it is
embedded in a local type (i.e. a type present in program BTF, not kernel
BTF), we need to specially tag such a field so that verifier knows that
it is a special kernel object whose invariants must hold during use of
the kptr allocation. For instance, reading and writing is allowed to all
other offsets in the kptr allocation, but access to this special field
would be rejected.

To do so, it needs to be tagged using a "kernel" BTF declaration tag,
like so:

struct item {
	int data;
	struct bpf_list_node node __kernel;
};

In future commits, more objects (such as kptrs inside kptrs, spin_lock,
even bpf_list_head) will be allowed in kptr allocation. But those need
more plumbing before it can all be made safe.

Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
---
 include/linux/btf.h                           | 15 ++++
 kernel/bpf/btf.c                              | 86 ++++++++++++++++---
 kernel/bpf/helpers.c                          |  8 ++
 kernel/bpf/verifier.c                         | 46 ++++++++--
 .../testing/selftests/bpf/bpf_experimental.h  |  9 ++
 5 files changed, 146 insertions(+), 18 deletions(-)

diff --git a/include/linux/btf.h b/include/linux/btf.h
index fc35c932e89e..062bc45e1cc9 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -433,6 +433,10 @@ const struct btf_member *
 btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
 		      const struct btf_type *t, enum bpf_prog_type prog_type,
 		      int arg);
+int btf_local_type_has_bpf_list_node(const struct btf *btf,
+				     const struct btf_type *t, u32 *offsetp);
+bool btf_local_type_has_special_fields(const struct btf *btf,
+				       const struct btf_type *t);
 #else
 static inline const struct btf_type *btf_type_by_id(const struct btf *btf,
 						    u32 type_id)
@@ -471,6 +475,17 @@ btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
 {
 	return NULL;
 }
+static inline int btf_local_type_has_bpf_list_node(const struct btf *btf,
+						   const struct btf_type *t,
+						   u32 *offsetp)
+{
+	return -ENOENT;
+}
+static inline bool btf_local_type_has_special_fields(const struct btf *btf,
+						     const struct btf_type *t)
+{
+	return false;
+}
 #endif
 
 #endif
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 17977e0f4e09..d8bc4752204c 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -3186,6 +3186,7 @@ enum btf_field_type {
 	BTF_FIELD_TIMER,
 	BTF_FIELD_KPTR,
 	BTF_FIELD_LIST_HEAD,
+	BTF_FIELD_LIST_NODE,
 };
 
 enum {
@@ -3319,8 +3320,8 @@ static int btf_find_list_head(const struct btf *btf, const struct btf_type *pt,
 }
 
 static int btf_find_struct_field(const struct btf *btf, const struct btf_type *t,
-				 const char *name, int sz, int align,
-				 enum btf_field_type field_type,
+				 const char *name, const char *decl_tag, int sz,
+				 int align, enum btf_field_type field_type,
 				 struct btf_field_info *info, int info_cnt)
 {
 	const struct btf_member *member;
@@ -3334,6 +3335,8 @@ static int btf_find_struct_field(const struct btf *btf, const struct btf_type *t
 
 		if (name && strcmp(__btf_name_by_offset(btf, member_type->name_off), name))
 			continue;
+		if (decl_tag && !btf_find_decl_tag_value(btf, t, i, decl_tag))
+			continue;
 
 		off = __btf_member_bit_offset(t, member);
 		if (off % 8)
@@ -3346,6 +3349,7 @@ static int btf_find_struct_field(const struct btf *btf, const struct btf_type *t
 		switch (field_type) {
 		case BTF_FIELD_SPIN_LOCK:
 		case BTF_FIELD_TIMER:
+		case BTF_FIELD_LIST_NODE:
 			ret = btf_find_struct(btf, member_type, off, sz,
 					      idx < info_cnt ? &info[idx] : &tmp);
 			if (ret < 0)
@@ -3377,8 +3381,8 @@ static int btf_find_struct_field(const struct btf *btf, const struct btf_type *t
 }
 
 static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
-				const char *name, int sz, int align,
-				enum btf_field_type field_type,
+				const char *name, const char *decl_tag, int sz,
+				int align, enum btf_field_type field_type,
 				struct btf_field_info *info, int info_cnt)
 {
 	const struct btf_var_secinfo *vsi;
@@ -3394,6 +3398,8 @@ static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
 
 		if (name && strcmp(__btf_name_by_offset(btf, var_type->name_off), name))
 			continue;
+		if (decl_tag && !btf_find_decl_tag_value(btf, t, i, decl_tag))
+			continue;
 		if (vsi->size != sz)
 			continue;
 		if (off % align)
@@ -3402,6 +3408,7 @@ static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
 		switch (field_type) {
 		case BTF_FIELD_SPIN_LOCK:
 		case BTF_FIELD_TIMER:
+		case BTF_FIELD_LIST_NODE:
 			ret = btf_find_struct(btf, var_type, off, sz,
 					      idx < info_cnt ? &info[idx] : &tmp);
 			if (ret < 0)
@@ -3433,7 +3440,7 @@ static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
 }
 
 static int btf_find_field(const struct btf *btf, const struct btf_type *t,
-			  enum btf_field_type field_type,
+			  enum btf_field_type field_type, const char *decl_tag,
 			  struct btf_field_info *info, int info_cnt)
 {
 	const char *name;
@@ -3460,14 +3467,19 @@ static int btf_find_field(const struct btf *btf, const struct btf_type *t,
 		sz = sizeof(struct bpf_list_head);
 		align = __alignof__(struct bpf_list_head);
 		break;
+	case BTF_FIELD_LIST_NODE:
+		name = "bpf_list_node";
+		sz = sizeof(struct bpf_list_node);
+		align = __alignof__(struct bpf_list_node);
+		break;
 	default:
 		return -EFAULT;
 	}
 
 	if (__btf_type_is_struct(t))
-		return btf_find_struct_field(btf, t, name, sz, align, field_type, info, info_cnt);
+		return btf_find_struct_field(btf, t, name, decl_tag, sz, align, field_type, info, info_cnt);
 	else if (btf_type_is_datasec(t))
-		return btf_find_datasec_var(btf, t, name, sz, align, field_type, info, info_cnt);
+		return btf_find_datasec_var(btf, t, name, decl_tag, sz, align, field_type, info, info_cnt);
 	return -EINVAL;
 }
 
@@ -3480,7 +3492,7 @@ int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t)
 	struct btf_field_info info;
 	int ret;
 
-	ret = btf_find_field(btf, t, BTF_FIELD_SPIN_LOCK, &info, 1);
+	ret = btf_find_field(btf, t, BTF_FIELD_SPIN_LOCK, NULL, &info, 1);
 	if (ret < 0)
 		return ret;
 	if (!ret)
@@ -3493,7 +3505,7 @@ int btf_find_timer(const struct btf *btf, const struct btf_type *t)
 	struct btf_field_info info;
 	int ret;
 
-	ret = btf_find_field(btf, t, BTF_FIELD_TIMER, &info, 1);
+	ret = btf_find_field(btf, t, BTF_FIELD_TIMER, NULL, &info, 1);
 	if (ret < 0)
 		return ret;
 	if (!ret)
@@ -3510,7 +3522,7 @@ struct bpf_map_value_off *btf_parse_kptrs(const struct btf *btf,
 	struct module *mod = NULL;
 	int ret, i, nr_off;
 
-	ret = btf_find_field(btf, t, BTF_FIELD_KPTR, info_arr, ARRAY_SIZE(info_arr));
+	ret = btf_find_field(btf, t, BTF_FIELD_KPTR, NULL, info_arr, ARRAY_SIZE(info_arr));
 	if (ret < 0)
 		return ERR_PTR(ret);
 	if (!ret)
@@ -3609,7 +3621,7 @@ struct bpf_map_value_off *btf_parse_list_heads(struct btf *btf, const struct btf
 	struct bpf_map_value_off *tab;
 	int ret, i, nr_off;
 
-	ret = btf_find_field(btf, t, BTF_FIELD_LIST_HEAD, info_arr, ARRAY_SIZE(info_arr));
+	ret = btf_find_field(btf, t, BTF_FIELD_LIST_HEAD, NULL, info_arr, ARRAY_SIZE(info_arr));
 	if (ret < 0)
 		return ERR_PTR(ret);
 	if (!ret)
@@ -5916,6 +5928,37 @@ static int btf_struct_walk(struct bpf_verifier_log *log, const struct btf *btf,
 	return -EINVAL;
 }
 
+static int btf_find_local_type_field(const struct btf *btf,
+				     const struct btf_type *t,
+				     enum btf_field_type type,
+				     u32 *offsetp)
+{
+	struct btf_field_info info;
+	int ret;
+
+	/* These are invariants that must hold if this is a local type */
+	WARN_ON_ONCE(btf_is_kernel(btf) || !__btf_type_is_struct(t));
+	ret = btf_find_field(btf, t, type, "kernel", &info, 1);
+	if (ret < 0)
+		return ret;
+	if (!ret)
+		return 0;
+	if (offsetp)
+		*offsetp = info.off;
+	return ret;
+}
+
+int btf_local_type_has_bpf_list_node(const struct btf *btf,
+				     const struct btf_type *t, u32 *offsetp)
+{
+	return btf_find_local_type_field(btf, t, BTF_FIELD_LIST_NODE, offsetp);
+}
+
+bool btf_local_type_has_special_fields(const struct btf *btf, const struct btf_type *t)
+{
+	return btf_local_type_has_bpf_list_node(btf, t, NULL) == 1;
+}
+
 int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf,
 		      const struct btf_type *t, int off, int size,
 		      enum bpf_access_type atype __maybe_unused,
@@ -5926,6 +5969,27 @@ int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf,
 	int err;
 	u32 id;
 
+	if (local_type) {
+		u32 offset;
+
+#define PREVENT_DIRECT_WRITE(field)							\
+	err = btf_local_type_has_##field(btf, t, &offset);				\
+	if (err < 0) {									\
+		bpf_log(log, "incorrect " #field " specification in local type\n");	\
+		return err;								\
+	}										\
+	if (err) {									\
+		if (off < offset + sizeof(struct field) && offset < off + size) {	\
+			bpf_log(log, "direct access to " #field " is disallowed\n");	\
+			return -EACCES;							\
+		}									\
+	}
+		PREVENT_DIRECT_WRITE(bpf_list_node);
+
+#undef PREVENT_DIRECT_WRITE
+		err = 0;
+	}
+
 	do {
 		err = btf_struct_walk(log, btf, t, off, size, &id, &tmp_flag);
 
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index d417aa4f0b22..0bb11d8bcaca 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -1710,6 +1710,13 @@ void *bpf_kptr_alloc(u64 local_type_id__k, u64 flags)
 	return kmalloc(size, GFP_ATOMIC);
 }
 
+void bpf_list_node_init(struct bpf_list_node *node__clkptr)
+{
+	BUILD_BUG_ON(sizeof(struct bpf_list_node) != sizeof(struct list_head));
+	BUILD_BUG_ON(__alignof__(struct bpf_list_node) != __alignof__(struct list_head));
+	INIT_LIST_HEAD((struct list_head *)node__clkptr);
+}
+
 __diag_pop();
 
 BTF_SET8_START(tracing_btf_ids)
@@ -1717,6 +1724,7 @@ BTF_SET8_START(tracing_btf_ids)
 BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE)
 #endif
 BTF_ID_FLAGS(func, bpf_kptr_alloc, KF_ACQUIRE | KF_RET_NULL | __KF_RET_DYN_BTF)
+BTF_ID_FLAGS(func, bpf_list_node_init)
 BTF_SET8_END(tracing_btf_ids)
 
 static const struct btf_kfunc_id_set tracing_kfunc_set = {
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 64cceb7d2f20..1108b6200501 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -7755,10 +7755,14 @@ static u32 *reg2btf_ids[__BPF_REG_TYPE_MAX] = {
 
 BTF_ID_LIST(special_kfuncs)
 BTF_ID(func, bpf_kptr_alloc)
+BTF_ID(func, bpf_list_node_init)
+BTF_ID(struct, btf) /* empty entry */
 
 enum bpf_special_kfuncs {
 	KF_SPECIAL_bpf_kptr_alloc,
-	KF_SPECIAL_MAX,
+	KF_SPECIAL_bpf_list_node_init,
+	KF_SPECIAL_bpf_empty,
+	KF_SPECIAL_MAX = KF_SPECIAL_bpf_empty,
 };
 
 static bool __is_kfunc_special(const struct btf *btf, u32 func_id, unsigned int kf_sp)
@@ -7922,6 +7926,7 @@ static int process_kf_arg_ptr_to_kptr_strong(struct bpf_verifier_env *env,
 
 struct local_type_field {
 	enum {
+		FIELD_bpf_list_node,
 		FIELD_MAX,
 	} type;
 	enum bpf_special_kfuncs ctor_kfunc;
@@ -7944,9 +7949,34 @@ static int local_type_field_cmp(const void *a, const void *b)
 
 static int find_local_type_fields(const struct btf *btf, u32 btf_id, struct local_type_field *fields)
 {
-	/* XXX: Fill the fields when support is added */
-	sort(fields, FIELD_MAX, sizeof(fields[0]), local_type_field_cmp, NULL);
-	return FIELD_MAX;
+	const struct btf_type *t;
+	int cnt = 0, ret;
+	u32 offset;
+
+	t = btf_type_by_id(btf, btf_id);
+	if (!t)
+		return -ENOENT;
+
+#define FILL_LOCAL_TYPE_FIELD(ftype, ctor, dtor, nd)        \
+	ret = btf_local_type_has_##ftype(btf, t, &offset);  \
+	if (ret < 0)                                        \
+		return ret;                                 \
+	if (ret) {                                          \
+		fields[cnt].type = FIELD_##ftype;           \
+		fields[cnt].ctor_kfunc = KF_SPECIAL_##ctor; \
+		fields[cnt].dtor_kfunc = KF_SPECIAL_##dtor; \
+		fields[cnt].name = #ftype;                  \
+		fields[cnt].offset = offset;                \
+		fields[cnt].needs_destruction = nd;         \
+		cnt++;                                      \
+	}
+
+	FILL_LOCAL_TYPE_FIELD(bpf_list_node, bpf_list_node_init, bpf_empty, false);
+
+#undef FILL_LOCAL_TYPE_FIELD
+
+	sort(fields, cnt, sizeof(fields[0]), local_type_field_cmp, NULL);
+	return cnt;
 }
 
 static int
@@ -8439,10 +8469,12 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
 			 * setting of this flag.
 			 */
 			regs[BPF_REG_0].type |= MEM_TYPE_LOCAL;
-			/* TODO: Recognize special fields in local type aand
-			 * force their construction before pointer escapes by
-			 * setting OBJ_CONSTRUCTING.
+			/* Recognize special fields in local type and force
+			 * their construction before pointer escapes by setting
+			 * OBJ_CONSTRUCTING.
 			 */
+			if (btf_local_type_has_special_fields(ret_btf, ret_t))
+				regs[BPF_REG_0].type |= OBJ_CONSTRUCTING;
 		} else {
 			if (!btf_type_is_struct(ptr_type)) {
 				ptr_type_name = btf_name_by_offset(desc_btf, ptr_type->name_off);
diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h
index bddd77093d1e..c3c5442742dc 100644
--- a/tools/testing/selftests/bpf/bpf_experimental.h
+++ b/tools/testing/selftests/bpf/bpf_experimental.h
@@ -32,4 +32,13 @@ struct bpf_list_node {
  */
 void *bpf_kptr_alloc(__u64 local_type_id, __u64 flags) __ksym;
 
+/* Description
+ *	Initialize bpf_list_node field in a local kptr. This kfunc has
+ *	constructor semantics, and thus can only be called on a local kptr in
+ *	'constructing' phase.
+ * Returns
+ *	Void.
+ */
+void bpf_list_node_init(struct bpf_list_node *node) __ksym;
+
 #endif
-- 
2.34.1


  parent reply	other threads:[~2022-09-04 20:42 UTC|newest]

Thread overview: 82+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-09-04 20:41 [PATCH RFC bpf-next v1 00/32] Local kptrs, BPF linked lists Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 01/32] bpf: Add copy_map_value_long to copy to remote percpu memory Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 02/32] bpf: Support kptrs in percpu arraymap Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 03/32] bpf: Add zero_map_value to zero map value with special fields Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 04/32] bpf: Support kptrs in percpu hashmap and percpu LRU hashmap Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 05/32] bpf: Support kptrs in local storage maps Kumar Kartikeya Dwivedi
2022-09-07 19:00   ` Alexei Starovoitov
2022-09-08  2:47     ` Kumar Kartikeya Dwivedi
2022-09-09  5:27   ` Martin KaFai Lau
2022-09-09 11:22     ` Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 06/32] bpf: Annotate data races in bpf_local_storage Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 07/32] bpf: Allow specifying volatile type modifier for kptrs Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 08/32] bpf: Add comment about kptr's PTR_TO_MAP_VALUE handling Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 09/32] bpf: Rewrite kfunc argument handling Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 10/32] bpf: Drop kfunc support from btf_check_func_arg_match Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 11/32] bpf: Support constant scalar arguments for kfuncs Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 12/32] bpf: Teach verifier about non-size constant arguments Kumar Kartikeya Dwivedi
2022-09-07 22:11   ` Alexei Starovoitov
2022-09-08  2:49     ` Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 13/32] bpf: Introduce bpf_list_head support for BPF maps Kumar Kartikeya Dwivedi
2022-09-07 22:46   ` Alexei Starovoitov
2022-09-08  2:58     ` Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 14/32] bpf: Introduce bpf_kptr_alloc helper Kumar Kartikeya Dwivedi
2022-09-07 23:30   ` Alexei Starovoitov
2022-09-08  3:01     ` Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 15/32] bpf: Add helper macro bpf_expr_for_each_reg_in_vstate Kumar Kartikeya Dwivedi
2022-09-07 23:48   ` Alexei Starovoitov
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 16/32] bpf: Introduce BPF memory object model Kumar Kartikeya Dwivedi
2022-09-08  0:34   ` Alexei Starovoitov
2022-09-08  2:39     ` Kumar Kartikeya Dwivedi
2022-09-08  3:37       ` Alexei Starovoitov
2022-09-08 11:50         ` Kumar Kartikeya Dwivedi
2022-09-08 14:18           ` Alexei Starovoitov
2022-09-08 14:45             ` Kumar Kartikeya Dwivedi
2022-09-08 15:11               ` Alexei Starovoitov
2022-09-08 15:37                 ` Kumar Kartikeya Dwivedi
2022-09-08 15:59                   ` Alexei Starovoitov
2022-09-04 20:41 ` Kumar Kartikeya Dwivedi [this message]
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 18/32] bpf: Support bpf_spin_lock in local kptrs Kumar Kartikeya Dwivedi
2022-09-08  0:35   ` Alexei Starovoitov
2022-09-09  8:25     ` Dave Marchevsky
2022-09-09 11:20       ` Kumar Kartikeya Dwivedi
2022-09-09 14:26         ` Alexei Starovoitov
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 19/32] bpf: Support bpf_list_head " Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 20/32] bpf: Introduce bpf_kptr_free helper Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 21/32] bpf: Allow locking bpf_spin_lock global variables Kumar Kartikeya Dwivedi
2022-09-08  0:27   ` Alexei Starovoitov
2022-09-08  0:39     ` Kumar Kartikeya Dwivedi
2022-09-08  0:55       ` Alexei Starovoitov
2022-09-08  1:00     ` Kumar Kartikeya Dwivedi
2022-09-08  1:08       ` Alexei Starovoitov
2022-09-08  1:15         ` Kumar Kartikeya Dwivedi
2022-09-08  2:39           ` Alexei Starovoitov
2022-09-09  8:13   ` Dave Marchevsky
2022-09-09 11:05     ` Kumar Kartikeya Dwivedi
2022-09-09 14:24       ` Alexei Starovoitov
2022-09-09 14:50         ` Kumar Kartikeya Dwivedi
2022-09-09 14:58           ` Alexei Starovoitov
2022-09-09 18:32             ` Andrii Nakryiko
2022-09-09 19:25               ` Alexei Starovoitov
2022-09-09 20:21                 ` Andrii Nakryiko
2022-09-09 20:57                   ` Alexei Starovoitov
2022-09-10  0:21                     ` Andrii Nakryiko
2022-09-11 22:31                       ` Alexei Starovoitov
2022-09-20 20:55                         ` Andrii Nakryiko
2022-10-18  4:06                           ` Andrii Nakryiko
2022-09-09 22:30                 ` Dave Marchevsky
2022-09-09 22:49                   ` Kumar Kartikeya Dwivedi
2022-09-09 22:57                     ` Alexei Starovoitov
2022-09-09 23:04                       ` Kumar Kartikeya Dwivedi
2022-09-09 22:51                   ` Alexei Starovoitov
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 22/32] bpf: Bump BTF_KFUNC_SET_MAX_CNT Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 23/32] bpf: Add single ownership BPF linked list API Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 24/32] bpf: Permit NULL checking pointer with non-zero fixed offset Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 25/32] bpf: Allow storing local kptrs in BPF maps Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 26/32] bpf: Wire up freeing of bpf_list_heads in maps Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 27/32] bpf: Add destructor for bpf_list_head in local kptr Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 28/32] bpf: Remove duplicate PTR_TO_BTF_ID RO check Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 29/32] libbpf: Add support for private BSS map section Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 30/32] selftests/bpf: Add BTF tag macros for local kptrs, BPF linked lists Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 31/32] selftests/bpf: Add BPF linked list API tests Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 32/32] selftests/bpf: Add referenced local kptr tests Kumar Kartikeya Dwivedi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220904204145.3089-18-memxor@gmail.com \
    --to=memxor@gmail.com \
    --cc=andrii@kernel.org \
    --cc=ast@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=davemarchevsky@fb.com \
    --cc=delyank@fb.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.