bpf.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: David Miller <davem@davemloft.net>,
	bpf@vger.kernel.org, netdev@vger.kernel.org,
	Alexei Starovoitov <ast@kernel.org>,
	Daniel Borkmann <daniel@iogearbox.net>,
	Sebastian Sewior <bigeasy@linutronix.de>,
	Peter Zijlstra <peterz@infradead.org>,
	Clark Williams <williams@redhat.com>,
	Steven Rostedt <rostedt@goodmis.org>,
	Juri Lelli <juri.lelli@redhat.com>,
	Ingo Molnar <mingo@kernel.org>
Subject: [RFC patch 16/19] bpf: Factor out hashtab bucket lock operations
Date: Fri, 14 Feb 2020 14:39:33 +0100	[thread overview]
Message-ID: <20200214161504.541509611@linutronix.de> (raw)
In-Reply-To: 20200214133917.304937432@linutronix.de

As a preparation for making the BPF locking RT friendly, factor out the
hash bucket lock operations into inline functions. This allows to do the
necessary RT modification in one place instead of sprinkling it all over
the place. No functional change.

The now unused htab argument of the lock/unlock functions will be used in
the next step which adds PREEMPT_RT support.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 kernel/bpf/hashtab.c |   69 ++++++++++++++++++++++++++++++++++-----------------
 1 file changed, 46 insertions(+), 23 deletions(-)

--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -87,6 +87,32 @@ struct htab_elem {
 	char key[0] __aligned(8);
 };
 
+static void htab_init_buckets(struct bpf_htab *htab)
+{
+	unsigned i;
+
+	for (i = 0; i < htab->n_buckets; i++) {
+		INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
+		raw_spin_lock_init(&htab->buckets[i].lock);
+	}
+}
+
+static inline unsigned long htab_lock_bucket(const struct bpf_htab *htab,
+					     struct bucket *b)
+{
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&b->lock, flags);
+	return flags;
+}
+
+static inline void htab_unlock_bucket(const struct bpf_htab *htab,
+				      struct bucket *b,
+				      unsigned long flags)
+{
+	raw_spin_unlock_irqrestore(&b->lock, flags);
+}
+
 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);
 
 static bool htab_is_lru(const struct bpf_htab *htab)
@@ -336,8 +362,8 @@ static struct bpf_map *htab_map_alloc(un
 	bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
 	bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
 	struct bpf_htab *htab;
-	int err, i;
 	u64 cost;
+	int err;
 
 	htab = kzalloc(sizeof(*htab), GFP_USER);
 	if (!htab)
@@ -399,10 +425,7 @@ static struct bpf_map *htab_map_alloc(un
 	else
 		htab->hashrnd = get_random_int();
 
-	for (i = 0; i < htab->n_buckets; i++) {
-		INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
-		raw_spin_lock_init(&htab->buckets[i].lock);
-	}
+	htab_init_buckets(htab);
 
 	if (prealloc) {
 		err = prealloc_init(htab);
@@ -610,7 +633,7 @@ static bool htab_lru_map_delete_node(voi
 	b = __select_bucket(htab, tgt_l->hash);
 	head = &b->head;
 
-	raw_spin_lock_irqsave(&b->lock, flags);
+	flags = htab_lock_bucket(htab, b);
 
 	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
 		if (l == tgt_l) {
@@ -618,7 +641,7 @@ static bool htab_lru_map_delete_node(voi
 			break;
 		}
 
-	raw_spin_unlock_irqrestore(&b->lock, flags);
+	htab_unlock_bucket(htab, b, flags);
 
 	return l == tgt_l;
 }
@@ -892,7 +915,7 @@ static int htab_map_update_elem(struct b
 		 */
 	}
 
-	raw_spin_lock_irqsave(&b->lock, flags);
+	flags = htab_lock_bucket(htab, b);
 
 	l_old = lookup_elem_raw(head, hash, key, key_size);
 
@@ -933,7 +956,7 @@ static int htab_map_update_elem(struct b
 	}
 	ret = 0;
 err:
-	raw_spin_unlock_irqrestore(&b->lock, flags);
+	htab_unlock_bucket(htab, b, flags);
 	return ret;
 }
 
@@ -971,7 +994,7 @@ static int htab_lru_map_update_elem(stru
 		return -ENOMEM;
 	memcpy(l_new->key + round_up(map->key_size, 8), value, map->value_size);
 
-	raw_spin_lock_irqsave(&b->lock, flags);
+	flags = htab_lock_bucket(htab, b);
 
 	l_old = lookup_elem_raw(head, hash, key, key_size);
 
@@ -990,7 +1013,7 @@ static int htab_lru_map_update_elem(stru
 	ret = 0;
 
 err:
-	raw_spin_unlock_irqrestore(&b->lock, flags);
+	htab_unlock_bucket(htab, b, flags);
 
 	if (ret)
 		bpf_lru_push_free(&htab->lru, &l_new->lru_node);
@@ -1025,7 +1048,7 @@ static int __htab_percpu_map_update_elem
 	b = __select_bucket(htab, hash);
 	head = &b->head;
 
-	raw_spin_lock_irqsave(&b->lock, flags);
+	flags = htab_lock_bucket(htab, b);
 
 	l_old = lookup_elem_raw(head, hash, key, key_size);
 
@@ -1048,7 +1071,7 @@ static int __htab_percpu_map_update_elem
 	}
 	ret = 0;
 err:
-	raw_spin_unlock_irqrestore(&b->lock, flags);
+	htab_unlock_bucket(htab, b, flags);
 	return ret;
 }
 
@@ -1088,7 +1111,7 @@ static int __htab_lru_percpu_map_update_
 			return -ENOMEM;
 	}
 
-	raw_spin_lock_irqsave(&b->lock, flags);
+	flags = htab_lock_bucket(htab, b);
 
 	l_old = lookup_elem_raw(head, hash, key, key_size);
 
@@ -1110,7 +1133,7 @@ static int __htab_lru_percpu_map_update_
 	}
 	ret = 0;
 err:
-	raw_spin_unlock_irqrestore(&b->lock, flags);
+	htab_unlock_bucket(htab, b, flags);
 	if (l_new)
 		bpf_lru_push_free(&htab->lru, &l_new->lru_node);
 	return ret;
@@ -1148,7 +1171,7 @@ static int htab_map_delete_elem(struct b
 	b = __select_bucket(htab, hash);
 	head = &b->head;
 
-	raw_spin_lock_irqsave(&b->lock, flags);
+	flags = htab_lock_bucket(htab, b);
 
 	l = lookup_elem_raw(head, hash, key, key_size);
 
@@ -1158,7 +1181,7 @@ static int htab_map_delete_elem(struct b
 		ret = 0;
 	}
 
-	raw_spin_unlock_irqrestore(&b->lock, flags);
+	htab_unlock_bucket(htab, b, flags);
 	return ret;
 }
 
@@ -1180,7 +1203,7 @@ static int htab_lru_map_delete_elem(stru
 	b = __select_bucket(htab, hash);
 	head = &b->head;
 
-	raw_spin_lock_irqsave(&b->lock, flags);
+	flags = htab_lock_bucket(htab, b);
 
 	l = lookup_elem_raw(head, hash, key, key_size);
 
@@ -1189,7 +1212,7 @@ static int htab_lru_map_delete_elem(stru
 		ret = 0;
 	}
 
-	raw_spin_unlock_irqrestore(&b->lock, flags);
+	htab_unlock_bucket(htab, b, flags);
 	if (l)
 		bpf_lru_push_free(&htab->lru, &l->lru_node);
 	return ret;
@@ -1335,7 +1358,7 @@ static int
 	dst_val = values;
 	b = &htab->buckets[batch];
 	head = &b->head;
-	raw_spin_lock_irqsave(&b->lock, flags);
+	flags = htab_lock_bucket(htab, b);
 
 	bucket_cnt = 0;
 	hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
@@ -1344,7 +1367,7 @@ static int
 	if (bucket_cnt > (max_count - total)) {
 		if (total == 0)
 			ret = -ENOSPC;
-		raw_spin_unlock_irqrestore(&b->lock, flags);
+		htab_unlock_bucket(htab, b, flags);
 		rcu_read_unlock();
 		this_cpu_dec(bpf_prog_active);
 		migrate_enable();
@@ -1353,7 +1376,7 @@ static int
 
 	if (bucket_cnt > bucket_size) {
 		bucket_size = bucket_cnt;
-		raw_spin_unlock_irqrestore(&b->lock, flags);
+		htab_unlock_bucket(htab, b, flags);
 		rcu_read_unlock();
 		this_cpu_dec(bpf_prog_active);
 		migrate_enable();
@@ -1395,7 +1418,7 @@ static int
 		dst_val += value_size;
 	}
 
-	raw_spin_unlock_irqrestore(&b->lock, flags);
+	htab_unlock_bucket(htab, b, flags);
 	/* If we are not copying data, we can go to next bucket and avoid
 	 * unlocking the rcu.
 	 */


  parent reply	other threads:[~2020-02-14 16:45 UTC|newest]

Thread overview: 42+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-02-14 13:39 [RFC patch 00/19] bpf: Make BPF and PREEMPT_RT co-exist Thomas Gleixner
2020-02-14 13:39 ` [RFC patch 01/19] sched: Provide migrate_disable/enable() inlines Thomas Gleixner
2020-02-14 13:39 ` [RFC patch 02/19] sched: Provide cant_migrate() Thomas Gleixner
2020-02-14 13:39 ` [RFC patch 03/19] bpf: Update locking comment in hashtab code Thomas Gleixner
2020-02-14 13:39 ` [RFC patch 04/19] bpf/tracing: Remove redundant preempt_disable() in __bpf_trace_run() Thomas Gleixner
2020-02-19 16:54   ` Steven Rostedt
2020-02-19 17:26     ` Thomas Gleixner
2020-02-14 13:39 ` [RFC patch 05/19] perf/bpf: Remove preempt disable around BPF invocation Thomas Gleixner
2020-02-14 13:39 ` [RFC patch 06/19] bpf: Dont iterate over possible CPUs with interrupts disabled Thomas Gleixner
2020-02-14 13:39 ` [RFC patch 07/19] bpf: Provide BPF_PROG_RUN_PIN_ON_CPU() macro Thomas Gleixner
2020-02-14 18:50   ` Mathieu Desnoyers
2020-02-14 19:36     ` Thomas Gleixner
2020-02-14 13:39 ` [RFC patch 08/19] bpf: Replace cant_sleep() with cant_migrate() Thomas Gleixner
2020-02-14 13:39 ` [RFC patch 09/19] bpf: Use BPF_PROG_RUN_PIN_ON_CPU() at simple call sites Thomas Gleixner
2020-02-19  1:39   ` Vinicius Costa Gomes
2020-02-19  9:00     ` Thomas Gleixner
2020-02-19 16:38       ` Alexei Starovoitov
2020-02-21  0:20       ` Kees Cook
2020-02-21 14:00         ` Thomas Gleixner
2020-02-21 14:05           ` Peter Zijlstra
2020-02-21 22:15           ` Kees Cook
2020-02-14 13:39 ` [RFC patch 10/19] trace/bpf: Use migrate disable in trace_call_bpf() Thomas Gleixner
2020-02-14 13:39 ` [RFC patch 11/19] bpf/tests: Use migrate disable instead of preempt disable Thomas Gleixner
2020-02-14 13:39 ` [RFC patch 12/19] bpf: Use migrate_disable/enabe() in trampoline code Thomas Gleixner
2020-02-14 13:39 ` [RFC patch 13/19] bpf: Use migrate_disable/enable in array macros and cgroup/lirc code Thomas Gleixner
2020-02-14 13:39 ` [RFC patch 14/19] bpf: Use migrate_disable() in hashtab code Thomas Gleixner
2020-02-14 19:11   ` Mathieu Desnoyers
2020-02-14 19:56     ` Thomas Gleixner
2020-02-18 23:36       ` Alexei Starovoitov
2020-02-19  0:49         ` Thomas Gleixner
2020-02-19  1:23           ` Alexei Starovoitov
2020-02-19 15:17         ` Mathieu Desnoyers
2020-02-20  4:19           ` Alexei Starovoitov
2020-02-14 13:39 ` [RFC patch 15/19] bpf: Use migrate_disable() in sys_bpf() Thomas Gleixner
2020-02-14 13:39 ` Thomas Gleixner [this message]
2020-02-14 13:39 ` [RFC patch 17/19] bpf: Prepare hashtab locking for PREEMPT_RT Thomas Gleixner
2020-02-14 13:39 ` [RFC patch 18/19] bpf, lpm: Make locking RT friendly Thomas Gleixner
2020-02-14 13:39 ` [RFC patch 19/19] bpf/stackmap: Dont trylock mmap_sem with PREEMPT_RT and interrupts disabled Thomas Gleixner
2020-02-14 17:53 ` [RFC patch 00/19] bpf: Make BPF and PREEMPT_RT co-exist David Miller
2020-02-14 18:36   ` Thomas Gleixner
2020-02-17 12:59     ` [PATCH] bpf: Enforce map preallocation for all instrumentation programs Thomas Gleixner
2020-02-15 20:09 ` [RFC patch 00/19] bpf: Make BPF and PREEMPT_RT co-exist Jakub Kicinski

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200214161504.541509611@linutronix.de \
    --to=tglx@linutronix.de \
    --cc=ast@kernel.org \
    --cc=bigeasy@linutronix.de \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=davem@davemloft.net \
    --cc=juri.lelli@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@kernel.org \
    --cc=netdev@vger.kernel.org \
    --cc=peterz@infradead.org \
    --cc=rostedt@goodmis.org \
    --cc=williams@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).