All of lore.kernel.org
 help / color / mirror / Atom feed
From: Benjamin Tissoires <bentiss@kernel.org>
To: Alexei Starovoitov <ast@kernel.org>,
	 Daniel Borkmann <daniel@iogearbox.net>,
	 John Fastabend <john.fastabend@gmail.com>,
	 Andrii Nakryiko <andrii@kernel.org>,
	 Martin KaFai Lau <martin.lau@linux.dev>,
	 Eduard Zingerman <eddyz87@gmail.com>, Song Liu <song@kernel.org>,
	 Yonghong Song <yonghong.song@linux.dev>,
	KP Singh <kpsingh@kernel.org>,
	 Stanislav Fomichev <sdf@google.com>, Hao Luo <haoluo@google.com>,
	 Jiri Olsa <jolsa@kernel.org>, Jiri Kosina <jikos@kernel.org>,
	 Benjamin Tissoires <benjamin.tissoires@redhat.com>,
	 Jonathan Corbet <corbet@lwn.net>, Shuah Khan <shuah@kernel.org>
Cc: bpf@vger.kernel.org, linux-kernel@vger.kernel.org,
	 linux-input@vger.kernel.org, linux-doc@vger.kernel.org,
	 linux-kselftest@vger.kernel.org,
	Benjamin Tissoires <bentiss@kernel.org>
Subject: [PATCH RFC bpf-next v3 04/16] bpf/helpers: introduce sleepable bpf_timers
Date: Wed, 21 Feb 2024 17:25:20 +0100	[thread overview]
Message-ID: <20240221-hid-bpf-sleepable-v3-4-1fb378ca6301@kernel.org> (raw)
In-Reply-To: <20240221-hid-bpf-sleepable-v3-0-1fb378ca6301@kernel.org>

They are implemented as a workqueue, which means that there are no
guarantees of timing nor ordering.

Signed-off-by: Benjamin Tissoires <bentiss@kernel.org>

---

changes in v3:
- extracted the implementation in bpf_timer only, without
  bpf_timer_set_sleepable_cb()
- rely on schedule_work() only, from bpf_timer_start()
- add semaphore to ensure bpf_timer_work_cb() is accessing
  consistent data

changes in v2 (compared to the one attaches to v1 0/9):
- make use of a kfunc
- add a (non-used) BPF_F_TIMER_SLEEPABLE
- the callback is *not* called, it makes the kernel crashes
---
 include/uapi/linux/bpf.h |  4 +++
 kernel/bpf/helpers.c     | 92 ++++++++++++++++++++++++++++++++++++++++--------
 2 files changed, 82 insertions(+), 14 deletions(-)

diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index d96708380e52..1fc7ecbd9d33 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -7421,10 +7421,14 @@ struct bpf_core_relo {
  *     - BPF_F_TIMER_ABS: Timeout passed is absolute time, by default it is
  *       relative to current time.
  *     - BPF_F_TIMER_CPU_PIN: Timer will be pinned to the CPU of the caller.
+ *     - BPF_F_TIMER_SLEEPABLE: Timer will run in a sleepable context, with
+ *       no guarantees of ordering nor timing (consider this as being just
+ *       offloaded immediately).
  */
 enum {
 	BPF_F_TIMER_ABS = (1ULL << 0),
 	BPF_F_TIMER_CPU_PIN = (1ULL << 1),
+	BPF_F_TIMER_SLEEPABLE = (1ULL << 2),
 };
 
 /* BPF numbers iterator state */
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 93edf730d288..f9add0abe40a 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -23,6 +23,7 @@
 #include <linux/btf_ids.h>
 #include <linux/bpf_mem_alloc.h>
 #include <linux/kasan.h>
+#include <linux/semaphore.h>
 
 #include "../../lib/kstrtox.h"
 
@@ -1094,13 +1095,19 @@ const struct bpf_func_proto bpf_snprintf_proto = {
  * bpf_timer_cancel() cancels the timer and decrements prog's refcnt.
  * Inner maps can contain bpf timers as well. ops->map_release_uref is
  * freeing the timers when inner map is replaced or deleted by user space.
+ *
+ * sleepable_lock protects only the setup of the workqueue, not the callback
+ * itself. This is done to ensure we don't run concurrently a free of the
+ * callback or the associated program.
  */
 struct bpf_hrtimer {
 	struct hrtimer timer;
+	struct work_struct work;
 	struct bpf_map *map;
 	struct bpf_prog *prog;
 	void __rcu *callback_fn;
 	void *value;
+	struct semaphore sleepable_lock;
 };
 
 /* the actual struct hidden inside uapi struct bpf_timer */
@@ -1113,6 +1120,55 @@ struct bpf_timer_kern {
 	struct bpf_spin_lock lock;
 } __attribute__((aligned(8)));
 
+static u32 __bpf_timer_compute_key(struct bpf_hrtimer *timer)
+{
+	struct bpf_map *map = timer->map;
+	void *value = timer->value;
+
+	if (map->map_type == BPF_MAP_TYPE_ARRAY) {
+		struct bpf_array *array = container_of(map, struct bpf_array, map);
+
+		/* compute the key */
+		return ((char *)value - array->value) / array->elem_size;
+	}
+
+	/* hash or lru */
+	return *(u32 *)(value - round_up(map->key_size, 8));
+}
+
+static void bpf_timer_work_cb(struct work_struct *work)
+{
+	struct bpf_hrtimer *t = container_of(work, struct bpf_hrtimer, work);
+	struct bpf_map *map = t->map;
+	void *value = t->value;
+	bpf_callback_t callback_fn;
+	u32 key;
+
+	BTF_TYPE_EMIT(struct bpf_timer);
+
+	down(&t->sleepable_lock);
+
+	callback_fn = READ_ONCE(t->callback_fn);
+	if (!callback_fn) {
+		up(&t->sleepable_lock);
+		return;
+	}
+
+	key = __bpf_timer_compute_key(t);
+
+	/* prevent the callback to be freed by bpf_timer_cancel() while running
+	 * so we can release the semaphore
+	 */
+	bpf_prog_inc(t->prog);
+
+	up(&t->sleepable_lock);
+
+	callback_fn((u64)(long)map, (u64)(long)&key, (u64)(long)value, 0, 0);
+	/* The verifier checked that return value is zero. */
+
+	bpf_prog_put(t->prog);
+}
+
 static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running);
 
 static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
@@ -1121,8 +1177,7 @@ static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
 	struct bpf_map *map = t->map;
 	void *value = t->value;
 	bpf_callback_t callback_fn;
-	void *key;
-	u32 idx;
+	u32 key;
 
 	BTF_TYPE_EMIT(struct bpf_timer);
 	callback_fn = rcu_dereference_check(t->callback_fn, rcu_read_lock_bh_held());
@@ -1136,17 +1191,9 @@ static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
 	 * bpf_map_delete_elem() on the same timer.
 	 */
 	this_cpu_write(hrtimer_running, t);
-	if (map->map_type == BPF_MAP_TYPE_ARRAY) {
-		struct bpf_array *array = container_of(map, struct bpf_array, map);
-
-		/* compute the key */
-		idx = ((char *)value - array->value) / array->elem_size;
-		key = &idx;
-	} else { /* hash or lru */
-		key = value - round_up(map->key_size, 8);
-	}
+	key = __bpf_timer_compute_key(t);
 
-	callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
+	callback_fn((u64)(long)map, (u64)(long)&key, (u64)(long)value, 0, 0);
 	/* The verifier checked that return value is zero. */
 
 	this_cpu_write(hrtimer_running, NULL);
@@ -1191,6 +1238,8 @@ BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map
 	t->prog = NULL;
 	rcu_assign_pointer(t->callback_fn, NULL);
 	hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT);
+	INIT_WORK(&t->work, bpf_timer_work_cb);
+	sema_init(&t->sleepable_lock, 1);
 	t->timer.function = bpf_timer_cb;
 	WRITE_ONCE(timer->timer, t);
 	/* Guarantee the order between timer->timer and map->usercnt. So
@@ -1245,6 +1294,7 @@ BPF_CALL_3(bpf_timer_set_callback, struct bpf_timer_kern *, timer, void *, callb
 		ret = -EPERM;
 		goto out;
 	}
+	down(&t->sleepable_lock);
 	prev = t->prog;
 	if (prev != prog) {
 		/* Bump prog refcnt once. Every bpf_timer_set_callback()
@@ -1261,6 +1311,7 @@ BPF_CALL_3(bpf_timer_set_callback, struct bpf_timer_kern *, timer, void *, callb
 		t->prog = prog;
 	}
 	rcu_assign_pointer(t->callback_fn, callback_fn);
+	up(&t->sleepable_lock);
 out:
 	__bpf_spin_unlock_irqrestore(&timer->lock);
 	return ret;
@@ -1282,7 +1333,7 @@ BPF_CALL_3(bpf_timer_start, struct bpf_timer_kern *, timer, u64, nsecs, u64, fla
 
 	if (in_nmi())
 		return -EOPNOTSUPP;
-	if (flags & ~(BPF_F_TIMER_ABS | BPF_F_TIMER_CPU_PIN))
+	if (flags & ~(BPF_F_TIMER_ABS | BPF_F_TIMER_CPU_PIN | BPF_F_TIMER_SLEEPABLE))
 		return -EINVAL;
 	__bpf_spin_lock_irqsave(&timer->lock);
 	t = timer->timer;
@@ -1299,7 +1350,10 @@ BPF_CALL_3(bpf_timer_start, struct bpf_timer_kern *, timer, u64, nsecs, u64, fla
 	if (flags & BPF_F_TIMER_CPU_PIN)
 		mode |= HRTIMER_MODE_PINNED;
 
-	hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode);
+	if (flags & BPF_F_TIMER_SLEEPABLE)
+		schedule_work(&t->work);
+	else
+		hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode);
 out:
 	__bpf_spin_unlock_irqrestore(&timer->lock);
 	return ret;
@@ -1346,13 +1400,21 @@ BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer)
 		ret = -EDEADLK;
 		goto out;
 	}
+	down(&t->sleepable_lock);
 	drop_prog_refcnt(t);
+	up(&t->sleepable_lock);
 out:
 	__bpf_spin_unlock_irqrestore(&timer->lock);
 	/* Cancel the timer and wait for associated callback to finish
 	 * if it was running.
 	 */
 	ret = ret ?: hrtimer_cancel(&t->timer);
+
+	/* also cancel the sleepable work, but *do not* wait for
+	 * it to finish if it was running as we might not be in a
+	 * sleepable context
+	 */
+	ret = ret ?: cancel_work(&t->work);
 	return ret;
 }
 
@@ -1407,6 +1469,8 @@ void bpf_timer_cancel_and_free(void *val)
 	 */
 	if (this_cpu_read(hrtimer_running) != t)
 		hrtimer_cancel(&t->timer);
+
+	cancel_work_sync(&t->work);
 	kfree(t);
 }
 

-- 
2.43.0


  parent reply	other threads:[~2024-02-21 16:25 UTC|newest]

Thread overview: 40+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-02-21 16:25 [PATCH RFC bpf-next v3 00/16] sleepable bpf_timer (was: allow HID-BPF to do device IOs) Benjamin Tissoires
2024-02-21 16:25 ` [PATCH RFC bpf-next v3 01/16] bpf/verifier: allow more maps in sleepable bpf programs Benjamin Tissoires
2024-02-21 16:25 ` [PATCH RFC bpf-next v3 02/16] bpf/verifier: introduce in_sleepable() helper Benjamin Tissoires
2024-02-23  1:56   ` Alexei Starovoitov
2024-02-23 19:46     ` Benjamin Tissoires
2024-02-21 16:25 ` [PATCH RFC bpf-next v3 03/16] bpf/verifier: add is_async_callback_calling_insn() helper Benjamin Tissoires
2024-02-21 16:25 ` Benjamin Tissoires [this message]
2024-02-22  8:05   ` [PATCH RFC bpf-next v3 04/16] bpf/helpers: introduce sleepable bpf_timers Benjamin Tissoires
2024-02-22 11:50   ` Toke Høiland-Jørgensen
2024-02-22 20:47   ` Alexei Starovoitov
2024-02-22 22:40   ` Eduard Zingerman
2024-02-27 14:27     ` Benjamin Tissoires
2024-02-21 16:25 ` [PATCH RFC bpf-next v3 05/16] bpf/verifier: add bpf_timer as a kfunc capable type Benjamin Tissoires
2024-02-23  0:22   ` Eduard Zingerman
2024-02-23  0:26     ` Eduard Zingerman
2024-02-23 14:54   ` Eduard Zingerman
2024-02-21 16:25 ` [PATCH RFC bpf-next v3 06/16] bpf/helpers: introduce bpf_timer_set_sleepable_cb() kfunc Benjamin Tissoires
2024-02-22 20:53   ` Alexei Starovoitov
2024-02-23 15:10   ` Eduard Zingerman
2024-02-21 16:25 ` [PATCH RFC bpf-next v3 07/16] bpf/helpers: mark the callback of bpf_timer_set_sleepable_cb() as sleepable Benjamin Tissoires
2024-02-23 15:35   ` Eduard Zingerman
2024-02-21 16:25 ` [PATCH RFC bpf-next v3 08/16] bpf/verifier: do_misc_fixups for is_bpf_timer_set_sleepable_cb_kfunc Benjamin Tissoires
2024-02-23 16:00   ` Eduard Zingerman
2024-02-27 16:18     ` Benjamin Tissoires
2024-02-27 16:36       ` Eduard Zingerman
2024-02-27 16:51         ` Benjamin Tissoires
2024-02-28  1:49           ` Alexei Starovoitov
2024-02-28 11:01             ` Benjamin Tissoires
2024-02-21 16:25 ` [PATCH RFC bpf-next v3 09/16] HID: bpf/dispatch: regroup kfuncs definitions Benjamin Tissoires
2024-02-22 20:17   ` Eduard Zingerman
2024-02-23 19:44     ` Benjamin Tissoires
2024-02-21 16:25 ` [PATCH RFC bpf-next v3 10/16] HID: bpf: export hid_hw_output_report as a BPF kfunc Benjamin Tissoires
2024-02-21 16:25 ` [PATCH RFC bpf-next v3 11/16] selftests/hid: Add test for hid_bpf_hw_output_report Benjamin Tissoires
2024-02-21 16:25 ` [PATCH RFC bpf-next v3 12/16] HID: bpf: allow to inject HID event from BPF Benjamin Tissoires
2024-02-21 16:25 ` [PATCH RFC bpf-next v3 13/16] selftests/hid: add tests for hid_bpf_input_report Benjamin Tissoires
2024-02-21 16:25 ` [PATCH RFC bpf-next v3 14/16] HID: bpf: allow to use bpf_timer_set_sleepable_cb() in tracing callbacks Benjamin Tissoires
2024-02-21 16:25 ` [PATCH RFC bpf-next v3 15/16] selftests/hid: add test for bpf_timer Benjamin Tissoires
2024-02-21 16:25 ` [PATCH RFC bpf-next v3 16/16] selftests/hid: add KASAN to the VM tests Benjamin Tissoires
2024-02-23 16:19 ` [PATCH RFC bpf-next v3 00/16] sleepable bpf_timer (was: allow HID-BPF to do device IOs) Eduard Zingerman
2024-02-23 19:42   ` Benjamin Tissoires

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240221-hid-bpf-sleepable-v3-4-1fb378ca6301@kernel.org \
    --to=bentiss@kernel.org \
    --cc=andrii@kernel.org \
    --cc=ast@kernel.org \
    --cc=benjamin.tissoires@redhat.com \
    --cc=bpf@vger.kernel.org \
    --cc=corbet@lwn.net \
    --cc=daniel@iogearbox.net \
    --cc=eddyz87@gmail.com \
    --cc=haoluo@google.com \
    --cc=jikos@kernel.org \
    --cc=john.fastabend@gmail.com \
    --cc=jolsa@kernel.org \
    --cc=kpsingh@kernel.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-input@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-kselftest@vger.kernel.org \
    --cc=martin.lau@linux.dev \
    --cc=sdf@google.com \
    --cc=shuah@kernel.org \
    --cc=song@kernel.org \
    --cc=yonghong.song@linux.dev \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.