All of lore.kernel.org
 help / color / mirror / Atom feed
From: Kumar Kartikeya Dwivedi <memxor@gmail.com>
To: bpf@vger.kernel.org
Cc: Alexei Starovoitov <ast@kernel.org>,
	Andrii Nakryiko <andrii@kernel.org>,
	Daniel Borkmann <daniel@iogearbox.net>,
	Dave Marchevsky <davemarchevsky@fb.com>,
	Delyan Kratunov <delyank@fb.com>
Subject: [PATCH RFC bpf-next v1 19/32] bpf: Support bpf_list_head in local kptrs
Date: Sun,  4 Sep 2022 22:41:32 +0200	[thread overview]
Message-ID: <20220904204145.3089-20-memxor@gmail.com> (raw)
In-Reply-To: <20220904204145.3089-1-memxor@gmail.com>

To support map-in-map style use case, allow embedding a bpf_list_head
inside a allocated kptr representing local type. Now, this is a field
that will actually need explicit action while destructing the object,
i.e. popping off all the nodes owned by the bpf_list_head and then
freeing each one of them. Hence, the destruction state needs tracking
when we are in the phase, and we also need to reject freeing when the
embedded list_head is not destructed.

For now, needs_destruction is false. Future patch will flip it to true
once adding items to such list_head is supported.

Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
---
 include/linux/btf.h                           |  8 ++
 kernel/bpf/btf.c                              | 89 ++++++++++++++++---
 kernel/bpf/helpers.c                          |  8 ++
 kernel/bpf/verifier.c                         |  4 +
 .../testing/selftests/bpf/bpf_experimental.h  |  9 ++
 5 files changed, 106 insertions(+), 12 deletions(-)

diff --git a/include/linux/btf.h b/include/linux/btf.h
index d99cad21e6d9..42c7f0283887 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -437,6 +437,8 @@ int btf_local_type_has_bpf_list_node(const struct btf *btf,
 				     const struct btf_type *t, u32 *offsetp);
 int btf_local_type_has_bpf_spin_lock(const struct btf *btf,
 				     const struct btf_type *t, u32 *offsetp);
+int btf_local_type_has_bpf_list_head(const struct btf *btf,
+				     const struct btf_type *t, u32 *offsetp);
 bool btf_local_type_has_special_fields(const struct btf *btf,
 				       const struct btf_type *t);
 #else
@@ -489,6 +491,12 @@ static inline int btf_local_type_has_bpf_spin_lock(const struct btf *btf,
 {
 	return -ENOENT;
 }
+static inline int btf_local_type_has_bpf_list_head(const struct btf *btf,
+					           const struct btf_type *t,
+					           u32 *offsetp)
+{
+	return -ENOENT;
+}
 static inline bool btf_local_type_has_special_fields(const struct btf *btf,
 						     const struct btf_type *t)
 {
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 63193c324898..c8d4513cc73e 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -3185,7 +3185,8 @@ enum btf_field_type {
 	BTF_FIELD_SPIN_LOCK,
 	BTF_FIELD_TIMER,
 	BTF_FIELD_KPTR,
-	BTF_FIELD_LIST_HEAD,
+	BTF_FIELD_LIST_HEAD_MAP,
+	BTF_FIELD_LIST_HEAD_KPTR,
 	BTF_FIELD_LIST_NODE,
 };
 
@@ -3204,6 +3205,7 @@ struct btf_field_info {
 		struct {
 			u32 value_type_id;
 			const char *node_name;
+			enum btf_field_type type;
 		} list_head;
 	};
 };
@@ -3282,9 +3284,11 @@ static const char *btf_find_decl_tag_value(const struct btf *btf,
 	return NULL;
 }
 
-static int btf_find_list_head(const struct btf *btf, const struct btf_type *pt,
-			      int comp_idx, const struct btf_type *t,
-			      u32 off, int sz, struct btf_field_info *info)
+static int btf_find_list_head(const struct btf *btf,
+			      enum btf_field_type field_type,
+			      const struct btf_type *pt, int comp_idx,
+			      const struct btf_type *t, u32 off, int sz,
+			      struct btf_field_info *info)
 {
 	const char *value_type;
 	const char *list_node;
@@ -3316,6 +3320,7 @@ static int btf_find_list_head(const struct btf *btf, const struct btf_type *pt,
 	info->off = off;
 	info->list_head.value_type_id = id;
 	info->list_head.node_name = list_node;
+	info->list_head.type = field_type;
 	return BTF_FIELD_FOUND;
 }
 
@@ -3361,8 +3366,9 @@ static int btf_find_struct_field(const struct btf *btf, const struct btf_type *t
 			if (ret < 0)
 				return ret;
 			break;
-		case BTF_FIELD_LIST_HEAD:
-			ret = btf_find_list_head(btf, t, i, member_type, off, sz,
+		case BTF_FIELD_LIST_HEAD_MAP:
+		case BTF_FIELD_LIST_HEAD_KPTR:
+			ret = btf_find_list_head(btf, field_type, t, i, member_type, off, sz,
 						 idx < info_cnt ? &info[idx] : &tmp);
 			if (ret < 0)
 				return ret;
@@ -3420,8 +3426,9 @@ static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
 			if (ret < 0)
 				return ret;
 			break;
-		case BTF_FIELD_LIST_HEAD:
-			ret = btf_find_list_head(btf, var, -1, var_type, off, sz,
+		case BTF_FIELD_LIST_HEAD_MAP:
+		case BTF_FIELD_LIST_HEAD_KPTR:
+			ret = btf_find_list_head(btf, field_type, var, -1, var_type, off, sz,
 						 idx < info_cnt ? &info[idx] : &tmp);
 			if (ret < 0)
 				return ret;
@@ -3462,7 +3469,8 @@ static int btf_find_field(const struct btf *btf, const struct btf_type *t,
 		sz = sizeof(u64);
 		align = 8;
 		break;
-	case BTF_FIELD_LIST_HEAD:
+	case BTF_FIELD_LIST_HEAD_MAP:
+	case BTF_FIELD_LIST_HEAD_KPTR:
 		name = "bpf_list_head";
 		sz = sizeof(struct bpf_list_head);
 		align = __alignof__(struct bpf_list_head);
@@ -3615,13 +3623,53 @@ struct bpf_map_value_off *btf_parse_kptrs(const struct btf *btf,
 	return ERR_PTR(ret);
 }
 
+static bool list_head_value_ok(const struct btf *btf, const struct btf_type *pt,
+			       const struct btf_type *vt,
+			       enum btf_field_type type)
+{
+	struct btf_field_info info;
+	int ret;
+
+	/* This is the value type of either map or kptr list_head. For map
+	 * list_head, we allow the value_type to have another bpf_list_head, but
+	 * for kptr list_head, we cannot allow another level of list_head.
+	 *
+	 * Also, in the map case, we must catch the case where the value_type's
+	 * list_head encodes the map_value as its own value_type.
+	 *
+	 * Essentially, we want only two levels for map, one level for kptr, and
+	 * no cycles at all in the type graph.
+	 */
+	WARN_ON_ONCE(btf_is_kernel(btf) || !__btf_type_is_struct(vt));
+	ret = btf_find_field(btf, vt, type, "kernel", &info, 1);
+	if (ret < 0)
+		return false;
+	/* For map or kptr, if value doesn't have list_head, it's ok! */
+	if (!ret)
+		return true;
+	if (ret) {
+		/* For kptr, we don't allow list_head in the value type. */
+		if (type == BTF_FIELD_LIST_HEAD_KPTR)
+			return false;
+		/* The map's list_head's value has another list head. We now
+		 * need to ensure it doesn't refer to map value type itself,
+		 * creating a cycle.
+		 */
+		vt = btf_type_by_id(btf, info.list_head.value_type_id);
+		if (vt == pt)
+			return false;
+	}
+	return true;
+}
+
 struct bpf_map_value_off *btf_parse_list_heads(struct btf *btf, const struct btf_type *t)
 {
 	struct btf_field_info info_arr[BPF_MAP_VALUE_OFF_MAX];
 	struct bpf_map_value_off *tab;
+	const struct btf_type *pt = t;
 	int ret, i, nr_off;
 
-	ret = btf_find_field(btf, t, BTF_FIELD_LIST_HEAD, NULL, info_arr, ARRAY_SIZE(info_arr));
+	ret = btf_find_field(btf, t, BTF_FIELD_LIST_HEAD_MAP, NULL, info_arr, ARRAY_SIZE(info_arr));
 	if (ret < 0)
 		return ERR_PTR(ret);
 	if (!ret)
@@ -3644,6 +3692,8 @@ struct bpf_map_value_off *btf_parse_list_heads(struct btf *btf, const struct btf
 		 * verify its type.
 		 */
 		ret = -EINVAL;
+		if (!list_head_value_ok(btf, pt, t, BTF_FIELD_LIST_HEAD_MAP))
+			goto end;
 		for_each_member(j, t, member) {
 			if (strcmp(info_arr[i].list_head.node_name, __btf_name_by_offset(btf, member->name_off)))
 				continue;
@@ -5937,12 +5987,19 @@ static int btf_find_local_type_field(const struct btf *btf,
 	int ret;
 
 	/* These are invariants that must hold if this is a local type */
-	WARN_ON_ONCE(btf_is_kernel(btf) || !__btf_type_is_struct(t));
+	WARN_ON_ONCE(btf_is_kernel(btf) || !__btf_type_is_struct(t) || type == BTF_FIELD_LIST_HEAD_MAP);
 	ret = btf_find_field(btf, t, type, "kernel", &info, 1);
 	if (ret < 0)
 		return ret;
 	if (!ret)
 		return 0;
+	/* A validation step needs to be done for bpf_list_head in local kptrs */
+	if (type == BTF_FIELD_LIST_HEAD_KPTR) {
+		const struct btf_type *vt = btf_type_by_id(btf, info.list_head.value_type_id);
+
+		if (!list_head_value_ok(btf, t, vt, type))
+			return -EINVAL;
+	}
 	if (offsetp)
 		*offsetp = info.off;
 	return ret;
@@ -5960,10 +6017,17 @@ int btf_local_type_has_bpf_spin_lock(const struct btf *btf,
 	return btf_find_local_type_field(btf, t, BTF_FIELD_SPIN_LOCK, offsetp);
 }
 
+int btf_local_type_has_bpf_list_head(const struct btf *btf,
+				     const struct btf_type *t, u32 *offsetp)
+{
+	return btf_find_local_type_field(btf, t, BTF_FIELD_LIST_HEAD_KPTR, offsetp);
+}
+
 bool btf_local_type_has_special_fields(const struct btf *btf, const struct btf_type *t)
 {
 	return btf_local_type_has_bpf_list_node(btf, t, NULL) == 1 ||
-	       btf_local_type_has_bpf_spin_lock(btf, t, NULL) == 1;
+	       btf_local_type_has_bpf_spin_lock(btf, t, NULL) == 1 ||
+	       btf_local_type_has_bpf_list_head(btf, t, NULL) == 1;
 }
 
 int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf,
@@ -5993,6 +6057,7 @@ int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf,
 	}
 		PREVENT_DIRECT_WRITE(bpf_list_node);
 		PREVENT_DIRECT_WRITE(bpf_spin_lock);
+		PREVENT_DIRECT_WRITE(bpf_list_head);
 
 #undef PREVENT_DIRECT_WRITE
 		err = 0;
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 94a23a544aee..8eee0793c7f1 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -1724,6 +1724,13 @@ void bpf_spin_lock_init(struct bpf_spin_lock *lock__clkptr)
 	memset(lock__clkptr, 0, sizeof(*lock__clkptr));
 }
 
+void bpf_list_head_init(struct bpf_list_head *head__clkptr)
+{
+	BUILD_BUG_ON(sizeof(struct bpf_list_head) != sizeof(struct list_head));
+	BUILD_BUG_ON(__alignof__(struct bpf_list_head) != __alignof__(struct list_head));
+	INIT_LIST_HEAD((struct list_head *)head__clkptr);
+}
+
 __diag_pop();
 
 BTF_SET8_START(tracing_btf_ids)
@@ -1733,6 +1740,7 @@ BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE)
 BTF_ID_FLAGS(func, bpf_kptr_alloc, KF_ACQUIRE | KF_RET_NULL | __KF_RET_DYN_BTF)
 BTF_ID_FLAGS(func, bpf_list_node_init)
 BTF_ID_FLAGS(func, bpf_spin_lock_init)
+BTF_ID_FLAGS(func, bpf_list_head_init)
 BTF_SET8_END(tracing_btf_ids)
 
 static const struct btf_kfunc_id_set tracing_kfunc_set = {
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 130a4f0550f5..a5aa5de4b246 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -7811,12 +7811,14 @@ BTF_ID_LIST(special_kfuncs)
 BTF_ID(func, bpf_kptr_alloc)
 BTF_ID(func, bpf_list_node_init)
 BTF_ID(func, bpf_spin_lock_init)
+BTF_ID(func, bpf_list_head_init)
 BTF_ID(struct, btf) /* empty entry */
 
 enum bpf_special_kfuncs {
 	KF_SPECIAL_bpf_kptr_alloc,
 	KF_SPECIAL_bpf_list_node_init,
 	KF_SPECIAL_bpf_spin_lock_init,
+	KF_SPECIAL_bpf_list_head_init,
 	KF_SPECIAL_bpf_empty,
 	KF_SPECIAL_MAX = KF_SPECIAL_bpf_empty,
 };
@@ -7984,6 +7986,7 @@ struct local_type_field {
 	enum {
 		FIELD_bpf_list_node,
 		FIELD_bpf_spin_lock,
+		FIELD_bpf_list_head,
 		FIELD_MAX,
 	} type;
 	enum bpf_special_kfuncs ctor_kfunc;
@@ -8030,6 +8033,7 @@ static int find_local_type_fields(const struct btf *btf, u32 btf_id, struct loca
 
 	FILL_LOCAL_TYPE_FIELD(bpf_list_node, bpf_list_node_init, bpf_empty, false);
 	FILL_LOCAL_TYPE_FIELD(bpf_spin_lock, bpf_spin_lock_init, bpf_empty, false);
+	FILL_LOCAL_TYPE_FIELD(bpf_list_head, bpf_list_head_init, bpf_empty, false);
 
 #undef FILL_LOCAL_TYPE_FIELD
 
diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h
index 8b1cdfb2f6bc..f0b6e92c6908 100644
--- a/tools/testing/selftests/bpf/bpf_experimental.h
+++ b/tools/testing/selftests/bpf/bpf_experimental.h
@@ -50,4 +50,13 @@ void bpf_list_node_init(struct bpf_list_node *node) __ksym;
  */
 void bpf_spin_lock_init(struct bpf_spin_lock *node) __ksym;
 
+/* Description
+ *	Initialize bpf_list_head field in a local kptr. This kfunc has
+ *	constructor semantics, and thus can only be called on a local kptr in
+ *	'constructing' phase.
+ * Returns
+ *	Void.
+ */
+void bpf_list_head_init(struct bpf_list_head *node) __ksym;
+
 #endif
-- 
2.34.1


  parent reply	other threads:[~2022-09-04 20:42 UTC|newest]

Thread overview: 82+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-09-04 20:41 [PATCH RFC bpf-next v1 00/32] Local kptrs, BPF linked lists Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 01/32] bpf: Add copy_map_value_long to copy to remote percpu memory Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 02/32] bpf: Support kptrs in percpu arraymap Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 03/32] bpf: Add zero_map_value to zero map value with special fields Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 04/32] bpf: Support kptrs in percpu hashmap and percpu LRU hashmap Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 05/32] bpf: Support kptrs in local storage maps Kumar Kartikeya Dwivedi
2022-09-07 19:00   ` Alexei Starovoitov
2022-09-08  2:47     ` Kumar Kartikeya Dwivedi
2022-09-09  5:27   ` Martin KaFai Lau
2022-09-09 11:22     ` Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 06/32] bpf: Annotate data races in bpf_local_storage Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 07/32] bpf: Allow specifying volatile type modifier for kptrs Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 08/32] bpf: Add comment about kptr's PTR_TO_MAP_VALUE handling Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 09/32] bpf: Rewrite kfunc argument handling Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 10/32] bpf: Drop kfunc support from btf_check_func_arg_match Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 11/32] bpf: Support constant scalar arguments for kfuncs Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 12/32] bpf: Teach verifier about non-size constant arguments Kumar Kartikeya Dwivedi
2022-09-07 22:11   ` Alexei Starovoitov
2022-09-08  2:49     ` Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 13/32] bpf: Introduce bpf_list_head support for BPF maps Kumar Kartikeya Dwivedi
2022-09-07 22:46   ` Alexei Starovoitov
2022-09-08  2:58     ` Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 14/32] bpf: Introduce bpf_kptr_alloc helper Kumar Kartikeya Dwivedi
2022-09-07 23:30   ` Alexei Starovoitov
2022-09-08  3:01     ` Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 15/32] bpf: Add helper macro bpf_expr_for_each_reg_in_vstate Kumar Kartikeya Dwivedi
2022-09-07 23:48   ` Alexei Starovoitov
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 16/32] bpf: Introduce BPF memory object model Kumar Kartikeya Dwivedi
2022-09-08  0:34   ` Alexei Starovoitov
2022-09-08  2:39     ` Kumar Kartikeya Dwivedi
2022-09-08  3:37       ` Alexei Starovoitov
2022-09-08 11:50         ` Kumar Kartikeya Dwivedi
2022-09-08 14:18           ` Alexei Starovoitov
2022-09-08 14:45             ` Kumar Kartikeya Dwivedi
2022-09-08 15:11               ` Alexei Starovoitov
2022-09-08 15:37                 ` Kumar Kartikeya Dwivedi
2022-09-08 15:59                   ` Alexei Starovoitov
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 17/32] bpf: Support bpf_list_node in local kptrs Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 18/32] bpf: Support bpf_spin_lock " Kumar Kartikeya Dwivedi
2022-09-08  0:35   ` Alexei Starovoitov
2022-09-09  8:25     ` Dave Marchevsky
2022-09-09 11:20       ` Kumar Kartikeya Dwivedi
2022-09-09 14:26         ` Alexei Starovoitov
2022-09-04 20:41 ` Kumar Kartikeya Dwivedi [this message]
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 20/32] bpf: Introduce bpf_kptr_free helper Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 21/32] bpf: Allow locking bpf_spin_lock global variables Kumar Kartikeya Dwivedi
2022-09-08  0:27   ` Alexei Starovoitov
2022-09-08  0:39     ` Kumar Kartikeya Dwivedi
2022-09-08  0:55       ` Alexei Starovoitov
2022-09-08  1:00     ` Kumar Kartikeya Dwivedi
2022-09-08  1:08       ` Alexei Starovoitov
2022-09-08  1:15         ` Kumar Kartikeya Dwivedi
2022-09-08  2:39           ` Alexei Starovoitov
2022-09-09  8:13   ` Dave Marchevsky
2022-09-09 11:05     ` Kumar Kartikeya Dwivedi
2022-09-09 14:24       ` Alexei Starovoitov
2022-09-09 14:50         ` Kumar Kartikeya Dwivedi
2022-09-09 14:58           ` Alexei Starovoitov
2022-09-09 18:32             ` Andrii Nakryiko
2022-09-09 19:25               ` Alexei Starovoitov
2022-09-09 20:21                 ` Andrii Nakryiko
2022-09-09 20:57                   ` Alexei Starovoitov
2022-09-10  0:21                     ` Andrii Nakryiko
2022-09-11 22:31                       ` Alexei Starovoitov
2022-09-20 20:55                         ` Andrii Nakryiko
2022-10-18  4:06                           ` Andrii Nakryiko
2022-09-09 22:30                 ` Dave Marchevsky
2022-09-09 22:49                   ` Kumar Kartikeya Dwivedi
2022-09-09 22:57                     ` Alexei Starovoitov
2022-09-09 23:04                       ` Kumar Kartikeya Dwivedi
2022-09-09 22:51                   ` Alexei Starovoitov
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 22/32] bpf: Bump BTF_KFUNC_SET_MAX_CNT Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 23/32] bpf: Add single ownership BPF linked list API Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 24/32] bpf: Permit NULL checking pointer with non-zero fixed offset Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 25/32] bpf: Allow storing local kptrs in BPF maps Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 26/32] bpf: Wire up freeing of bpf_list_heads in maps Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 27/32] bpf: Add destructor for bpf_list_head in local kptr Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 28/32] bpf: Remove duplicate PTR_TO_BTF_ID RO check Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 29/32] libbpf: Add support for private BSS map section Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 30/32] selftests/bpf: Add BTF tag macros for local kptrs, BPF linked lists Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 31/32] selftests/bpf: Add BPF linked list API tests Kumar Kartikeya Dwivedi
2022-09-04 20:41 ` [PATCH RFC bpf-next v1 32/32] selftests/bpf: Add referenced local kptr tests Kumar Kartikeya Dwivedi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220904204145.3089-20-memxor@gmail.com \
    --to=memxor@gmail.com \
    --cc=andrii@kernel.org \
    --cc=ast@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=davemarchevsky@fb.com \
    --cc=delyank@fb.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.