All of lore.kernel.org
 help / color / mirror / Atom feed
* [RFC PATCH] bpf: Add new bpf map type for timer
@ 2015-10-19  5:34 He Kuang
  2015-10-19  6:30 ` kbuild test robot
                   ` (2 more replies)
  0 siblings, 3 replies; 8+ messages in thread
From: He Kuang @ 2015-10-19  5:34 UTC (permalink / raw)
  To: ast, davem, daniel, mingo, rostedt, xiakaixu
  Cc: wangnan0, hekuang, linux-kernel

This patch implements a timer map type inherited from array map. eBPF
programs can deloy a timer by updating an entry in timer map, and
destroy that by deleting the entry. The timer delay time(ns) is set by
updating the value field of the entries.

Currently, an intended empty function is called when the timer is
triggered, then eBPF programs can be attatched to this function and
perfrom customized functions.

Signed-off-by: He Kuang <hekuang@huawei.com>
---

Here's a hypothetical scenario to illustrate the use of timer map.

A video frame is updated between frame_refresh_start() and
frame_refresh_end(), in most cases, the interval between these two
functions is less than 40ms, but occasionally over 200ms.

We can set a timer which alarm after the frame_refresh_start() has
been executed 42ms(slightly larger than 40ms) and destory this timer
if frame_refresh_end() is called. So, for most cases, the timer is not
triggered, this can significantly reduce the amount of trace data.

eBPF progs:

  struct bpf_map_def SEC("maps") timer_map = {
  	.type = BPF_MAP_TYPE_TIMER_ARRAY,
  	.key_size = sizeof(int),
  	.value_size = sizeof(unsigned long long),
  	.max_entries = 4,
  };

  SEC("func1=frame_refresh_start")
  int bpf_prog1(struct pt_regs *ctx)
  {
  	int delay_ns = 42 * NSEC_PER_MSEC; /* 42 milliseconds */
  	unsigned int index = 0;

  	bpf_map_update_elem(&timer_map, &index, &delay_ns, BPF_ANY);
  	return 0;
  }

  SEC("func2=frame_refresh_end")
  int bpf_prog2(struct pt_regs *ctx)
  {
  	unsigned int index = 0;

  	bpf_map_delete_elem(&timer_map, &index);
  	return 0;
  }

  SEC("func3=bpf_timer_callback")
  int bpf_prog3(struct pt_regs *ctx, int result)
  {
  	char fmt[] = "timer triggered\n";

  	bpf_trace_printk(fmt, sizeof(fmt));

  	/* If comes here, frame refresh time is beyond the threshold
  	   we can switch on more tracepoints or enable more detailed
  	   trace paramaters to diagnose the problems.
       */
  	return 0;
  }

Then we can write an exec to test the ebpf progs above, each timeout
frame updates will trigger the timer.

---
 include/uapi/linux/bpf.h |   1 +
 kernel/bpf/arraymap.c    | 161 +++++++++++++++++++++++++++++++++++++++++++----
 2 files changed, 149 insertions(+), 13 deletions(-)

diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 92a48e2..c41c80e 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -115,6 +115,7 @@ enum bpf_map_type {
 	BPF_MAP_TYPE_ARRAY,
 	BPF_MAP_TYPE_PROG_ARRAY,
 	BPF_MAP_TYPE_PERF_EVENT_ARRAY,
+	BPF_MAP_TYPE_TIMER_ARRAY,
 };
 
 enum bpf_prog_type {
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 29ace10..1e03c70 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -16,18 +16,10 @@
 #include <linux/mm.h>
 #include <linux/filter.h>
 
-/* Called from syscall */
-static struct bpf_map *array_map_alloc(union bpf_attr *attr)
+static struct bpf_map *__array_map_alloc(union bpf_attr *attr, u32 elem_size)
 {
 	struct bpf_array *array;
-	u32 elem_size, array_size;
-
-	/* check sanity of attributes */
-	if (attr->max_entries == 0 || attr->key_size != 4 ||
-	    attr->value_size == 0)
-		return ERR_PTR(-EINVAL);
-
-	elem_size = round_up(attr->value_size, 8);
+	u32 array_size;
 
 	/* check round_up into zero and u32 overflow */
 	if (elem_size == 0 ||
@@ -54,6 +46,20 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
 	return &array->map;
 }
 
+/* Called from syscall */
+static struct bpf_map *array_map_alloc(union bpf_attr *attr)
+{
+	u32 elem_size;
+
+	/* check sanity of attributes */
+	if (attr->max_entries == 0 || attr->key_size != 4 ||
+	    attr->value_size == 0)
+		return ERR_PTR(-EINVAL);
+
+	elem_size = round_up(attr->value_size, 8);
+	return __array_map_alloc(attr, elem_size);
+}
+
 /* Called from syscall or from eBPF program */
 static void *array_map_lookup_elem(struct bpf_map *map, void *key)
 {
@@ -171,7 +177,7 @@ static void fd_array_map_free(struct bpf_map *map)
 	kvfree(array);
 }
 
-static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
+static void *empty_array_map_lookup_elem(struct bpf_map *map, void *key)
 {
 	return NULL;
 }
@@ -255,7 +261,7 @@ static const struct bpf_map_ops prog_array_ops = {
 	.map_alloc = fd_array_map_alloc,
 	.map_free = fd_array_map_free,
 	.map_get_next_key = array_map_get_next_key,
-	.map_lookup_elem = fd_array_map_lookup_elem,
+	.map_lookup_elem = empty_array_map_lookup_elem,
 	.map_update_elem = fd_array_map_update_elem,
 	.map_delete_elem = fd_array_map_delete_elem,
 	.map_fd_get_ptr = prog_fd_array_get_ptr,
@@ -312,7 +318,7 @@ static const struct bpf_map_ops perf_event_array_ops = {
 	.map_alloc = fd_array_map_alloc,
 	.map_free = perf_event_array_map_free,
 	.map_get_next_key = array_map_get_next_key,
-	.map_lookup_elem = fd_array_map_lookup_elem,
+	.map_lookup_elem = empty_array_map_lookup_elem,
 	.map_update_elem = fd_array_map_update_elem,
 	.map_delete_elem = fd_array_map_delete_elem,
 	.map_fd_get_ptr = perf_event_fd_array_get_ptr,
@@ -330,3 +336,132 @@ static int __init register_perf_event_array_map(void)
 	return 0;
 }
 late_initcall(register_perf_event_array_map);
+
+/* Intended empty function as ebpf stub */
+enum hrtimer_restart bpf_timer_callback(struct hrtimer *timer __maybe_unused)
+{
+	return HRTIMER_NORESTART;
+}
+
+static void timer_array_map_init_timers(struct bpf_map *map)
+{
+	struct bpf_array *array = container_of(map, struct bpf_array, map);
+	int i;
+	struct hrtimer *timer;
+
+	/* init all timer */
+	for (i = 0; i < array->map.max_entries; i++) {
+		timer = (struct hrtimer *)(array->value +
+					   array->elem_size * i);
+		hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+		timer->function = bpf_timer_callback;
+	}
+}
+
+static struct bpf_map *timer_array_map_alloc(union bpf_attr *attr)
+{
+	u32 elem_size;
+	struct bpf_map *map;
+
+	if (attr->value_size != sizeof(u64))
+		return ERR_PTR(-EINVAL);
+
+	/* check sanity of attributes */
+	if (attr->max_entries == 0 || attr->key_size != 4 ||
+	    attr->value_size == 0)
+		return ERR_PTR(-EINVAL);
+
+	elem_size = round_up(sizeof(struct hrtimer), 8);
+	map = __array_map_alloc(attr, elem_size);
+	if (IS_ERR(map))
+		return map;
+
+	timer_array_map_init_timers(map);
+
+	return map;
+}
+
+static void timer_array_map_free(struct bpf_map *map)
+{
+	struct bpf_array *array = container_of(map, struct bpf_array, map);
+	int i;
+	struct hrtimer *timer;
+
+	synchronize_rcu();
+
+	/* cancel all untriggered timer */
+	for (i = 0; i < array->map.max_entries; i++) {
+		timer = (struct hrtimer *)(array->value +
+					   array->elem_size * i);
+		hrtimer_cancel(timer);
+	}
+	kvfree(array);
+}
+
+/* Timer activate */
+static int timer_array_map_update_elem(struct bpf_map *map, void *key,
+				       void *value, u64 map_flags)
+{
+	struct bpf_array *array = container_of(map, struct bpf_array, map);
+	u32 index = *(u32 *)key;
+	struct hrtimer *timer;
+	int delay_ns = *(u64 *)value;
+	ktime_t kt;
+
+	if (map_flags > BPF_EXIST)
+		/* unknown flags */
+		return -EINVAL;
+
+	if (index >= array->map.max_entries)
+		/* all elements were pre-allocated, cannot insert a new one */
+		return -E2BIG;
+
+	if (map_flags == BPF_NOEXIST)
+		/* all elements already exist */
+		return -EEXIST;
+
+	timer = (struct hrtimer *)(array->value + array->elem_size * index);
+	kt = ktime_set(0, delay_ns);
+	hrtimer_start(timer, kt, HRTIMER_MODE_REL);
+
+	return 0;
+}
+
+/* Timer inactivate */
+static int timer_array_map_delete_elem(struct bpf_map *map, void *key)
+{
+	struct bpf_array *array = container_of(map, struct bpf_array, map);
+	u32 index = *(u32 *)key;
+	struct hrtimer *timer;
+
+	if (index >= array->map.max_entries)
+		return -E2BIG;
+
+	timer = (struct hrtimer *)(array->value + array->elem_size * index);
+
+	if (hrtimer_try_to_cancel(timer) >= 0)
+		return 0;
+	else
+		return -EBUSY;
+}
+
+static const struct bpf_map_ops timer_array_ops = {
+	.map_alloc = timer_array_map_alloc,
+	.map_free = timer_array_map_free,
+	.map_get_next_key = array_map_get_next_key,
+	.map_lookup_elem = empty_array_map_lookup_elem,
+	.map_update_elem = timer_array_map_update_elem,
+	.map_delete_elem = timer_array_map_delete_elem,
+};
+
+static struct bpf_map_type_list timer_array_type __read_mostly = {
+	.ops = &timer_array_ops,
+	.type = BPF_MAP_TYPE_TIMER_ARRAY,
+};
+
+static int __init register_timer_array_map(void)
+{
+	bpf_register_map_type(&timer_array_type);
+	return 0;
+}
+late_initcall(register_timer_array_map);
-- 
1.8.5.2


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [RFC PATCH] bpf: Add new bpf map type for timer
  2015-10-19  5:34 [RFC PATCH] bpf: Add new bpf map type for timer He Kuang
@ 2015-10-19  6:30 ` kbuild test robot
  2015-10-19  6:30 ` [RFC PATCH] bpf: bpf_timer_callback() can be static kbuild test robot
  2015-10-21 10:02 ` [RFC PATCH] bpf: Add new bpf map type for timer He Kuang
  2 siblings, 0 replies; 8+ messages in thread
From: kbuild test robot @ 2015-10-19  6:30 UTC (permalink / raw)
  To: He Kuang
  Cc: kbuild-all, ast, davem, daniel, mingo, rostedt, xiakaixu,
	wangnan0, hekuang, linux-kernel

Hi He,

[auto build test WARNING on v4.3-rc6 -- if it's inappropriate base, please suggest rules for selecting the more suitable base]

url:    https://github.com/0day-ci/linux/commits/He-Kuang/bpf-Add-new-bpf-map-type-for-timer/20151019-133809
reproduce:
        # apt-get install sparse
        make ARCH=x86_64 allmodconfig
        make C=1 CF=-D__CHECK_ENDIAN__


sparse warnings: (new ones prefixed by >>)

>> kernel/bpf/arraymap.c:341:22: sparse: symbol 'bpf_timer_callback' was not declared. Should it be static?

Please review and possibly fold the followup patch.

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [RFC PATCH] bpf: bpf_timer_callback() can be static
  2015-10-19  5:34 [RFC PATCH] bpf: Add new bpf map type for timer He Kuang
  2015-10-19  6:30 ` kbuild test robot
@ 2015-10-19  6:30 ` kbuild test robot
  2015-10-21 10:02 ` [RFC PATCH] bpf: Add new bpf map type for timer He Kuang
  2 siblings, 0 replies; 8+ messages in thread
From: kbuild test robot @ 2015-10-19  6:30 UTC (permalink / raw)
  To: He Kuang
  Cc: kbuild-all, ast, davem, daniel, mingo, rostedt, xiakaixu,
	wangnan0, hekuang, linux-kernel


Signed-off-by: Fengguang Wu <fengguang.wu@intel.com>
---
 arraymap.c |    2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 1e03c70..dbdad0c 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -338,7 +338,7 @@ static int __init register_perf_event_array_map(void)
 late_initcall(register_perf_event_array_map);
 
 /* Intended empty function as ebpf stub */
-enum hrtimer_restart bpf_timer_callback(struct hrtimer *timer __maybe_unused)
+static enum hrtimer_restart bpf_timer_callback(struct hrtimer *timer __maybe_unused)
 {
 	return HRTIMER_NORESTART;
 }

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [RFC PATCH] bpf: Add new bpf map type for timer
  2015-10-19  5:34 [RFC PATCH] bpf: Add new bpf map type for timer He Kuang
  2015-10-19  6:30 ` kbuild test robot
  2015-10-19  6:30 ` [RFC PATCH] bpf: bpf_timer_callback() can be static kbuild test robot
@ 2015-10-21 10:02 ` He Kuang
  2015-10-21 10:20   ` Ingo Molnar
  2015-10-21 19:53   ` Alexei Starovoitov
  2 siblings, 2 replies; 8+ messages in thread
From: He Kuang @ 2015-10-21 10:02 UTC (permalink / raw)
  To: ast, davem, daniel, mingo, rostedt, xiakaixu, ast; +Cc: wangnan0, linux-kernel

ping and add ast@plumgrid.com, what's your opinion on this?

On 2015/10/19 13:34, He Kuang wrote:
> This patch implements a timer map type inherited from array map. eBPF
> programs can deloy a timer by updating an entry in timer map, and
> destroy that by deleting the entry. The timer delay time(ns) is set by
> updating the value field of the entries.
>
> Currently, an intended empty function is called when the timer is
> triggered, then eBPF programs can be attatched to this function and
> perfrom customized functions.
>
> Signed-off-by: He Kuang <hekuang@huawei.com>
> ---
>
> Here's a hypothetical scenario to illustrate the use of timer map.
>
> A video frame is updated between frame_refresh_start() and
> frame_refresh_end(), in most cases, the interval between these two
> functions is less than 40ms, but occasionally over 200ms.
>
> We can set a timer which alarm after the frame_refresh_start() has
> been executed 42ms(slightly larger than 40ms) and destory this timer
> if frame_refresh_end() is called. So, for most cases, the timer is not
> triggered, this can significantly reduce the amount of trace data.
>
> eBPF progs:
>
>    struct bpf_map_def SEC("maps") timer_map = {
>    	.type = BPF_MAP_TYPE_TIMER_ARRAY,
>    	.key_size = sizeof(int),
>    	.value_size = sizeof(unsigned long long),
>    	.max_entries = 4,
>    };
>
>    SEC("func1=frame_refresh_start")
>    int bpf_prog1(struct pt_regs *ctx)
>    {
>    	int delay_ns = 42 * NSEC_PER_MSEC; /* 42 milliseconds */
>    	unsigned int index = 0;
>
>    	bpf_map_update_elem(&timer_map, &index, &delay_ns, BPF_ANY);
>    	return 0;
>    }
>
>    SEC("func2=frame_refresh_end")
>    int bpf_prog2(struct pt_regs *ctx)
>    {
>    	unsigned int index = 0;
>
>    	bpf_map_delete_elem(&timer_map, &index);
>    	return 0;
>    }
>
>    SEC("func3=bpf_timer_callback")
>    int bpf_prog3(struct pt_regs *ctx, int result)
>    {
>    	char fmt[] = "timer triggered\n";
>
>    	bpf_trace_printk(fmt, sizeof(fmt));
>
>    	/* If comes here, frame refresh time is beyond the threshold
>    	   we can switch on more tracepoints or enable more detailed
>    	   trace paramaters to diagnose the problems.
>         */
>    	return 0;
>    }
>
> Then we can write an exec to test the ebpf progs above, each timeout
> frame updates will trigger the timer.
>
> ---
>   include/uapi/linux/bpf.h |   1 +
>   kernel/bpf/arraymap.c    | 161 +++++++++++++++++++++++++++++++++++++++++++----
>   2 files changed, 149 insertions(+), 13 deletions(-)
>
> diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
> index 92a48e2..c41c80e 100644
> --- a/include/uapi/linux/bpf.h
> +++ b/include/uapi/linux/bpf.h
> @@ -115,6 +115,7 @@ enum bpf_map_type {
>   	BPF_MAP_TYPE_ARRAY,
>   	BPF_MAP_TYPE_PROG_ARRAY,
>   	BPF_MAP_TYPE_PERF_EVENT_ARRAY,
> +	BPF_MAP_TYPE_TIMER_ARRAY,
>   };
>
>   enum bpf_prog_type {
> diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
> index 29ace10..1e03c70 100644
> --- a/kernel/bpf/arraymap.c
> +++ b/kernel/bpf/arraymap.c
> @@ -16,18 +16,10 @@
>   #include <linux/mm.h>
>   #include <linux/filter.h>
>
> -/* Called from syscall */
> -static struct bpf_map *array_map_alloc(union bpf_attr *attr)
> +static struct bpf_map *__array_map_alloc(union bpf_attr *attr, u32 elem_size)
>   {
>   	struct bpf_array *array;
> -	u32 elem_size, array_size;
> -
> -	/* check sanity of attributes */
> -	if (attr->max_entries == 0 || attr->key_size != 4 ||
> -	    attr->value_size == 0)
> -		return ERR_PTR(-EINVAL);
> -
> -	elem_size = round_up(attr->value_size, 8);
> +	u32 array_size;
>
>   	/* check round_up into zero and u32 overflow */
>   	if (elem_size == 0 ||
> @@ -54,6 +46,20 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
>   	return &array->map;
>   }
>
> +/* Called from syscall */
> +static struct bpf_map *array_map_alloc(union bpf_attr *attr)
> +{
> +	u32 elem_size;
> +
> +	/* check sanity of attributes */
> +	if (attr->max_entries == 0 || attr->key_size != 4 ||
> +	    attr->value_size == 0)
> +		return ERR_PTR(-EINVAL);
> +
> +	elem_size = round_up(attr->value_size, 8);
> +	return __array_map_alloc(attr, elem_size);
> +}
> +
>   /* Called from syscall or from eBPF program */
>   static void *array_map_lookup_elem(struct bpf_map *map, void *key)
>   {
> @@ -171,7 +177,7 @@ static void fd_array_map_free(struct bpf_map *map)
>   	kvfree(array);
>   }
>
> -static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
> +static void *empty_array_map_lookup_elem(struct bpf_map *map, void *key)
>   {
>   	return NULL;
>   }
> @@ -255,7 +261,7 @@ static const struct bpf_map_ops prog_array_ops = {
>   	.map_alloc = fd_array_map_alloc,
>   	.map_free = fd_array_map_free,
>   	.map_get_next_key = array_map_get_next_key,
> -	.map_lookup_elem = fd_array_map_lookup_elem,
> +	.map_lookup_elem = empty_array_map_lookup_elem,
>   	.map_update_elem = fd_array_map_update_elem,
>   	.map_delete_elem = fd_array_map_delete_elem,
>   	.map_fd_get_ptr = prog_fd_array_get_ptr,
> @@ -312,7 +318,7 @@ static const struct bpf_map_ops perf_event_array_ops = {
>   	.map_alloc = fd_array_map_alloc,
>   	.map_free = perf_event_array_map_free,
>   	.map_get_next_key = array_map_get_next_key,
> -	.map_lookup_elem = fd_array_map_lookup_elem,
> +	.map_lookup_elem = empty_array_map_lookup_elem,
>   	.map_update_elem = fd_array_map_update_elem,
>   	.map_delete_elem = fd_array_map_delete_elem,
>   	.map_fd_get_ptr = perf_event_fd_array_get_ptr,
> @@ -330,3 +336,132 @@ static int __init register_perf_event_array_map(void)
>   	return 0;
>   }
>   late_initcall(register_perf_event_array_map);
> +
> +/* Intended empty function as ebpf stub */
> +enum hrtimer_restart bpf_timer_callback(struct hrtimer *timer __maybe_unused)
> +{
> +	return HRTIMER_NORESTART;
> +}
> +
> +static void timer_array_map_init_timers(struct bpf_map *map)
> +{
> +	struct bpf_array *array = container_of(map, struct bpf_array, map);
> +	int i;
> +	struct hrtimer *timer;
> +
> +	/* init all timer */
> +	for (i = 0; i < array->map.max_entries; i++) {
> +		timer = (struct hrtimer *)(array->value +
> +					   array->elem_size * i);
> +		hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
> +		timer->function = bpf_timer_callback;
> +	}
> +}
> +
> +static struct bpf_map *timer_array_map_alloc(union bpf_attr *attr)
> +{
> +	u32 elem_size;
> +	struct bpf_map *map;
> +
> +	if (attr->value_size != sizeof(u64))
> +		return ERR_PTR(-EINVAL);
> +
> +	/* check sanity of attributes */
> +	if (attr->max_entries == 0 || attr->key_size != 4 ||
> +	    attr->value_size == 0)
> +		return ERR_PTR(-EINVAL);
> +
> +	elem_size = round_up(sizeof(struct hrtimer), 8);
> +	map = __array_map_alloc(attr, elem_size);
> +	if (IS_ERR(map))
> +		return map;
> +
> +	timer_array_map_init_timers(map);
> +
> +	return map;
> +}
> +
> +static void timer_array_map_free(struct bpf_map *map)
> +{
> +	struct bpf_array *array = container_of(map, struct bpf_array, map);
> +	int i;
> +	struct hrtimer *timer;
> +
> +	synchronize_rcu();
> +
> +	/* cancel all untriggered timer */
> +	for (i = 0; i < array->map.max_entries; i++) {
> +		timer = (struct hrtimer *)(array->value +
> +					   array->elem_size * i);
> +		hrtimer_cancel(timer);
> +	}
> +	kvfree(array);
> +}
> +
> +/* Timer activate */
> +static int timer_array_map_update_elem(struct bpf_map *map, void *key,
> +				       void *value, u64 map_flags)
> +{
> +	struct bpf_array *array = container_of(map, struct bpf_array, map);
> +	u32 index = *(u32 *)key;
> +	struct hrtimer *timer;
> +	int delay_ns = *(u64 *)value;
> +	ktime_t kt;
> +
> +	if (map_flags > BPF_EXIST)
> +		/* unknown flags */
> +		return -EINVAL;
> +
> +	if (index >= array->map.max_entries)
> +		/* all elements were pre-allocated, cannot insert a new one */
> +		return -E2BIG;
> +
> +	if (map_flags == BPF_NOEXIST)
> +		/* all elements already exist */
> +		return -EEXIST;
> +
> +	timer = (struct hrtimer *)(array->value + array->elem_size * index);
> +	kt = ktime_set(0, delay_ns);
> +	hrtimer_start(timer, kt, HRTIMER_MODE_REL);
> +
> +	return 0;
> +}
> +
> +/* Timer inactivate */
> +static int timer_array_map_delete_elem(struct bpf_map *map, void *key)
> +{
> +	struct bpf_array *array = container_of(map, struct bpf_array, map);
> +	u32 index = *(u32 *)key;
> +	struct hrtimer *timer;
> +
> +	if (index >= array->map.max_entries)
> +		return -E2BIG;
> +
> +	timer = (struct hrtimer *)(array->value + array->elem_size * index);
> +
> +	if (hrtimer_try_to_cancel(timer) >= 0)
> +		return 0;
> +	else
> +		return -EBUSY;
> +}
> +
> +static const struct bpf_map_ops timer_array_ops = {
> +	.map_alloc = timer_array_map_alloc,
> +	.map_free = timer_array_map_free,
> +	.map_get_next_key = array_map_get_next_key,
> +	.map_lookup_elem = empty_array_map_lookup_elem,
> +	.map_update_elem = timer_array_map_update_elem,
> +	.map_delete_elem = timer_array_map_delete_elem,
> +};
> +
> +static struct bpf_map_type_list timer_array_type __read_mostly = {
> +	.ops = &timer_array_ops,
> +	.type = BPF_MAP_TYPE_TIMER_ARRAY,
> +};
> +
> +static int __init register_timer_array_map(void)
> +{
> +	bpf_register_map_type(&timer_array_type);
> +	return 0;
> +}
> +late_initcall(register_timer_array_map);
>


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [RFC PATCH] bpf: Add new bpf map type for timer
  2015-10-21 10:02 ` [RFC PATCH] bpf: Add new bpf map type for timer He Kuang
@ 2015-10-21 10:20   ` Ingo Molnar
  2015-10-21 10:38     ` Wangnan (F)
  2015-10-21 19:53   ` Alexei Starovoitov
  1 sibling, 1 reply; 8+ messages in thread
From: Ingo Molnar @ 2015-10-21 10:20 UTC (permalink / raw)
  To: He Kuang
  Cc: ast, davem, daniel, rostedt, xiakaixu, ast, wangnan0, linux-kernel


* He Kuang <hekuang@huawei.com> wrote:

> ping and add ast@plumgrid.com, what's your opinion on this?

Firstly, two days isn't nearly enough for a 'review timeout', secondly, have you 
seen the kbuild test reports?

Thirdly, I suspect others will do a deeper review, but even stylistically the 
patch is a bit weird, for example these kinds of unstructured struct initializers 
are annoying:

> >   struct bpf_map_def SEC("maps") timer_map = {
> >   	.type = BPF_MAP_TYPE_TIMER_ARRAY,
> >   	.key_size = sizeof(int),
> >   	.value_size = sizeof(unsigned long long),
> >   	.max_entries = 4,
> >   };

> >  	.map_alloc = fd_array_map_alloc,
> >  	.map_free = fd_array_map_free,
> >  	.map_get_next_key = array_map_get_next_key,
> >-	.map_lookup_elem = fd_array_map_lookup_elem,
> >+	.map_lookup_elem = empty_array_map_lookup_elem,
> >  	.map_update_elem = fd_array_map_update_elem,
> >  	.map_delete_elem = fd_array_map_delete_elem,
> >  	.map_fd_get_ptr = prog_fd_array_get_ptr,
> >@@ -312,7 +318,7 @@ static const struct bpf_map_ops perf_event_array_ops = {
> >  	.map_alloc = fd_array_map_alloc,
> >  	.map_free = perf_event_array_map_free,
> >  	.map_get_next_key = array_map_get_next_key,
> >-	.map_lookup_elem = fd_array_map_lookup_elem,
> >+	.map_lookup_elem = empty_array_map_lookup_elem,
> >  	.map_update_elem = fd_array_map_update_elem,
> >  	.map_delete_elem = fd_array_map_delete_elem,
> >  	.map_fd_get_ptr = perf_event_fd_array_get_ptr,

> >+static const struct bpf_map_ops timer_array_ops = {
> >+	.map_alloc = timer_array_map_alloc,
> >+	.map_free = timer_array_map_free,
> >+	.map_get_next_key = array_map_get_next_key,
> >+	.map_lookup_elem = empty_array_map_lookup_elem,
> >+	.map_update_elem = timer_array_map_update_elem,
> >+	.map_delete_elem = timer_array_map_delete_elem,
> >+};
> >+
> >+static struct bpf_map_type_list timer_array_type __read_mostly = {
> >+	.ops = &timer_array_ops,
> >+	.type = BPF_MAP_TYPE_TIMER_ARRAY,
> >+};

Please align initializations vertically, so the second column becomes readable, 
patterns in them become easy to see and individual entries become easier to 
compare.

See for example kernel/sched/core.c:

struct cgroup_subsys cpu_cgrp_subsys = {
        .css_alloc      = cpu_cgroup_css_alloc,
        .css_free       = cpu_cgroup_css_free,
        .css_online     = cpu_cgroup_css_online,
        .css_offline    = cpu_cgroup_css_offline,
        .fork           = cpu_cgroup_fork,
        .can_attach     = cpu_cgroup_can_attach,
        .attach         = cpu_cgroup_attach,
        .exit           = cpu_cgroup_exit,
        .legacy_cftypes = cpu_files,
        .early_init     = 1,
};

That's a _lot_ more readable than:

struct cgroup_subsys cpu_cgrp_subsys = {
        .css_alloc = cpu_cgroup_css_alloc,
        .css_free = cpu_cgroup_css_free,
        .css_online = cpu_cgroup_css_online,
        .css_offline = cpu_cgroup_css_offline,
        .fork = cpu_cgroup_fork,
        .can_attach = cpu_cgroup_attach,
        .attach = cpu_cgroup_attach,
        .exit = cpu_cgroup_exit,
        .legacy_cftypes = cpu_files,
        .early_init = 1,
};

right? For example I've hidden a small initialization bug into the second variant, 
how much time does it take for you to notice it?

Thanks,

	Ingo

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [RFC PATCH] bpf: Add new bpf map type for timer
  2015-10-21 10:20   ` Ingo Molnar
@ 2015-10-21 10:38     ` Wangnan (F)
  2015-10-21 10:47       ` Ingo Molnar
  0 siblings, 1 reply; 8+ messages in thread
From: Wangnan (F) @ 2015-10-21 10:38 UTC (permalink / raw)
  To: Ingo Molnar, He Kuang
  Cc: ast, davem, daniel, rostedt, xiakaixu, ast, linux-kernel



On 2015/10/21 18:20, Ingo Molnar wrote:
> * He Kuang <hekuang@huawei.com> wrote:
>
>> ping and add ast@plumgrid.com, what's your opinion on this?
> Firstly, two days isn't nearly enough for a 'review timeout', secondly, have you
> seen the kbuild test reports?
>
> Thirdly, I suspect others will do a deeper review, but even stylistically the
> patch is a bit weird, for example these kinds of unstructured struct initializers
> are annoying:
>
>>>    struct bpf_map_def SEC("maps") timer_map = {
>>>    	.type = BPF_MAP_TYPE_TIMER_ARRAY,
>>>    	.key_size = sizeof(int),
>>>    	.value_size = sizeof(unsigned long long),
>>>    	.max_entries = 4,
>>>    };
>>>   	.map_alloc = fd_array_map_alloc,
>>>   	.map_free = fd_array_map_free,
>>>   	.map_get_next_key = array_map_get_next_key,
>>> -	.map_lookup_elem = fd_array_map_lookup_elem,
>>> +	.map_lookup_elem = empty_array_map_lookup_elem,
>>>   	.map_update_elem = fd_array_map_update_elem,
>>>   	.map_delete_elem = fd_array_map_delete_elem,
>>>   	.map_fd_get_ptr = prog_fd_array_get_ptr,
>>> @@ -312,7 +318,7 @@ static const struct bpf_map_ops perf_event_array_ops = {
>>>   	.map_alloc = fd_array_map_alloc,
>>>   	.map_free = perf_event_array_map_free,
>>>   	.map_get_next_key = array_map_get_next_key,
>>> -	.map_lookup_elem = fd_array_map_lookup_elem,
>>> +	.map_lookup_elem = empty_array_map_lookup_elem,
>>>   	.map_update_elem = fd_array_map_update_elem,
>>>   	.map_delete_elem = fd_array_map_delete_elem,
>>>   	.map_fd_get_ptr = perf_event_fd_array_get_ptr,
>>> +static const struct bpf_map_ops timer_array_ops = {
>>> +	.map_alloc = timer_array_map_alloc,
>>> +	.map_free = timer_array_map_free,
>>> +	.map_get_next_key = array_map_get_next_key,
>>> +	.map_lookup_elem = empty_array_map_lookup_elem,
>>> +	.map_update_elem = timer_array_map_update_elem,
>>> +	.map_delete_elem = timer_array_map_delete_elem,
>>> +};
>>> +
>>> +static struct bpf_map_type_list timer_array_type __read_mostly = {
>>> +	.ops = &timer_array_ops,
>>> +	.type = BPF_MAP_TYPE_TIMER_ARRAY,
>>> +};
> Please align initializations vertically, so the second column becomes readable,
> patterns in them become easy to see and individual entries become easier to
> compare.
>
> See for example kernel/sched/core.c:
>
> struct cgroup_subsys cpu_cgrp_subsys = {
>          .css_alloc      = cpu_cgroup_css_alloc,
>          .css_free       = cpu_cgroup_css_free,
>          .css_online     = cpu_cgroup_css_online,
>          .css_offline    = cpu_cgroup_css_offline,
>          .fork           = cpu_cgroup_fork,
>          .can_attach     = cpu_cgroup_can_attach,
>          .attach         = cpu_cgroup_attach,
>          .exit           = cpu_cgroup_exit,
>          .legacy_cftypes = cpu_files,
>          .early_init     = 1,
> };
>
> That's a _lot_ more readable than:
>
> struct cgroup_subsys cpu_cgrp_subsys = {
>          .css_alloc = cpu_cgroup_css_alloc,
>          .css_free = cpu_cgroup_css_free,
>          .css_online = cpu_cgroup_css_online,
>          .css_offline = cpu_cgroup_css_offline,
>          .fork = cpu_cgroup_fork,
>          .can_attach = cpu_cgroup_attach,

Here :)

>          .attach = cpu_cgroup_attach,
>          .exit = cpu_cgroup_exit,
>          .legacy_cftypes = cpu_files,
>          .early_init = 1,
> };
>
> right? For example I've hidden a small initialization bug into the second variant,
> how much time does it take for you to notice it?
>
> Thanks,
>
> 	Ingo



^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [RFC PATCH] bpf: Add new bpf map type for timer
  2015-10-21 10:38     ` Wangnan (F)
@ 2015-10-21 10:47       ` Ingo Molnar
  0 siblings, 0 replies; 8+ messages in thread
From: Ingo Molnar @ 2015-10-21 10:47 UTC (permalink / raw)
  To: Wangnan (F)
  Cc: He Kuang, ast, davem, daniel, rostedt, xiakaixu, ast, linux-kernel


* Wangnan (F) <wangnan0@huawei.com> wrote:

> 
> 
> On 2015/10/21 18:20, Ingo Molnar wrote:
> >* He Kuang <hekuang@huawei.com> wrote:
> >
> >>ping and add ast@plumgrid.com, what's your opinion on this?
> >Firstly, two days isn't nearly enough for a 'review timeout', secondly, have you
> >seen the kbuild test reports?
> >
> >Thirdly, I suspect others will do a deeper review, but even stylistically the
> >patch is a bit weird, for example these kinds of unstructured struct initializers
> >are annoying:
> >
> >>>   struct bpf_map_def SEC("maps") timer_map = {
> >>>   	.type = BPF_MAP_TYPE_TIMER_ARRAY,
> >>>   	.key_size = sizeof(int),
> >>>   	.value_size = sizeof(unsigned long long),
> >>>   	.max_entries = 4,
> >>>   };
> >>>  	.map_alloc = fd_array_map_alloc,
> >>>  	.map_free = fd_array_map_free,
> >>>  	.map_get_next_key = array_map_get_next_key,
> >>>-	.map_lookup_elem = fd_array_map_lookup_elem,
> >>>+	.map_lookup_elem = empty_array_map_lookup_elem,
> >>>  	.map_update_elem = fd_array_map_update_elem,
> >>>  	.map_delete_elem = fd_array_map_delete_elem,
> >>>  	.map_fd_get_ptr = prog_fd_array_get_ptr,
> >>>@@ -312,7 +318,7 @@ static const struct bpf_map_ops perf_event_array_ops = {
> >>>  	.map_alloc = fd_array_map_alloc,
> >>>  	.map_free = perf_event_array_map_free,
> >>>  	.map_get_next_key = array_map_get_next_key,
> >>>-	.map_lookup_elem = fd_array_map_lookup_elem,
> >>>+	.map_lookup_elem = empty_array_map_lookup_elem,
> >>>  	.map_update_elem = fd_array_map_update_elem,
> >>>  	.map_delete_elem = fd_array_map_delete_elem,
> >>>  	.map_fd_get_ptr = perf_event_fd_array_get_ptr,
> >>>+static const struct bpf_map_ops timer_array_ops = {
> >>>+	.map_alloc = timer_array_map_alloc,
> >>>+	.map_free = timer_array_map_free,
> >>>+	.map_get_next_key = array_map_get_next_key,
> >>>+	.map_lookup_elem = empty_array_map_lookup_elem,
> >>>+	.map_update_elem = timer_array_map_update_elem,
> >>>+	.map_delete_elem = timer_array_map_delete_elem,
> >>>+};
> >>>+
> >>>+static struct bpf_map_type_list timer_array_type __read_mostly = {
> >>>+	.ops = &timer_array_ops,
> >>>+	.type = BPF_MAP_TYPE_TIMER_ARRAY,
> >>>+};
> >Please align initializations vertically, so the second column becomes readable,
> >patterns in them become easy to see and individual entries become easier to
> >compare.
> >
> >See for example kernel/sched/core.c:
> >
> >struct cgroup_subsys cpu_cgrp_subsys = {
> >         .css_alloc      = cpu_cgroup_css_alloc,
> >         .css_free       = cpu_cgroup_css_free,
> >         .css_online     = cpu_cgroup_css_online,
> >         .css_offline    = cpu_cgroup_css_offline,
> >         .fork           = cpu_cgroup_fork,
> >         .can_attach     = cpu_cgroup_can_attach,
> >         .attach         = cpu_cgroup_attach,
> >         .exit           = cpu_cgroup_exit,
> >         .legacy_cftypes = cpu_files,
> >         .early_init     = 1,
> >};
> >
> >That's a _lot_ more readable than:
> >
> >struct cgroup_subsys cpu_cgrp_subsys = {
> >         .css_alloc = cpu_cgroup_css_alloc,
> >         .css_free = cpu_cgroup_css_free,
> >         .css_online = cpu_cgroup_css_online,
> >         .css_offline = cpu_cgroup_css_offline,
> >         .fork = cpu_cgroup_fork,
> >         .can_attach = cpu_cgroup_attach,
> 
> Here :)
> 
> >         .attach = cpu_cgroup_attach,
> >         .exit = cpu_cgroup_exit,
> >         .legacy_cftypes = cpu_files,
> >         .early_init = 1,
> >};
> >
> >right? For example I've hidden a small initialization bug into the second variant,
> >how much time does it take for you to notice it?

Correct, so that was 18 minutes ;-)

The bug should be easier to ffind in this form:

struct cgroup_subsys cpu_cgrp_subsys = {
         .css_alloc      = cpu_cgroup_css_alloc,
         .css_free       = cpu_cgroup_css_free,
         .css_online     = cpu_cgroup_css_online,
         .css_offline    = cpu_cgroup_css_offline,
         .fork           = cpu_cgroup_fork,
         .can_attach     = cpu_cgroup_attach,
         .attach         = cpu_cgroup_attach,
         .exit           = cpu_cgroup_exit,
         .legacy_cftypes = cpu_files,
         .early_init     = 1,
};

as there's a visual anomaly at a glance already, if you look carefully enough.

Agreed? These kinds of visual clues get hidden if the vertical alignment is 
missing.

Thanks,

	Ingo

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [RFC PATCH] bpf: Add new bpf map type for timer
  2015-10-21 10:02 ` [RFC PATCH] bpf: Add new bpf map type for timer He Kuang
  2015-10-21 10:20   ` Ingo Molnar
@ 2015-10-21 19:53   ` Alexei Starovoitov
  1 sibling, 0 replies; 8+ messages in thread
From: Alexei Starovoitov @ 2015-10-21 19:53 UTC (permalink / raw)
  To: He Kuang, ast, davem, daniel, mingo, rostedt, xiakaixu
  Cc: wangnan0, linux-kernel

On 10/21/15 3:02 AM, He Kuang wrote:
> Here's a hypothetical scenario to illustrate the use of timer map.
>
> A video frame is updated between frame_refresh_start() and
> frame_refresh_end(), in most cases, the interval between these two
> functions is less than 40ms, but occasionally over 200ms.
>
> We can set a timer which alarm after the frame_refresh_start() has
> been executed 42ms(slightly larger than 40ms) and destory this timer
> if frame_refresh_end() is called. So, for most cases, the timer is not
> triggered, this can significantly reduce the amount of trace data.

As a feature it sounds useful, but implementation is not acceptable.
You cannot have one callback stub for all users of this feature.


^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2015-10-21 19:53 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-10-19  5:34 [RFC PATCH] bpf: Add new bpf map type for timer He Kuang
2015-10-19  6:30 ` kbuild test robot
2015-10-19  6:30 ` [RFC PATCH] bpf: bpf_timer_callback() can be static kbuild test robot
2015-10-21 10:02 ` [RFC PATCH] bpf: Add new bpf map type for timer He Kuang
2015-10-21 10:20   ` Ingo Molnar
2015-10-21 10:38     ` Wangnan (F)
2015-10-21 10:47       ` Ingo Molnar
2015-10-21 19:53   ` Alexei Starovoitov

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.