bpf.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Björn Töpel" <bjorn.topel@gmail.com>
To: netdev@vger.kernel.org, ast@kernel.org, daniel@iogearbox.net
Cc: "Björn Töpel" <bjorn.topel@intel.com>,
	bpf@vger.kernel.org, davem@davemloft.net,
	jakub.kicinski@netronome.com, hawk@kernel.org,
	john.fastabend@gmail.com, magnus.karlsson@intel.com,
	jonathan.lemon@gmail.com
Subject: [PATCH bpf-next 2/8] xdp: simplify cpumap cleanup
Date: Wed, 18 Dec 2019 11:53:54 +0100	[thread overview]
Message-ID: <20191218105400.2895-3-bjorn.topel@gmail.com> (raw)
In-Reply-To: <20191218105400.2895-1-bjorn.topel@gmail.com>

From: Björn Töpel <bjorn.topel@intel.com>

After the RCU flavor consolidation [1], call_rcu() and
synchronize_rcu() waits for preempt-disable regions (NAPI) in addition
to the read-side critical sections. As a result of this, the cleanup
code in cpumap can be simplified

* There is no longer a need to flush in __cpu_map_entry_free, since we
  know that this has been done when the call_rcu() callback is
  triggered.

* When freeing the map, there is no need to explicitly wait for a
  flush. It's guaranteed to be done after the synchronize_rcu() call
  in cpu_map_free().

[1] https://lwn.net/Articles/777036/

Signed-off-by: Björn Töpel <bjorn.topel@intel.com>
---
 kernel/bpf/cpumap.c | 33 +++++----------------------------
 1 file changed, 5 insertions(+), 28 deletions(-)

diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index ef49e17ae47c..fbf176e0a2ab 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -75,7 +75,7 @@ struct bpf_cpu_map {
 	struct list_head __percpu *flush_list;
 };
 
-static int bq_flush_to_queue(struct xdp_bulk_queue *bq, bool in_napi_ctx);
+static int bq_flush_to_queue(struct xdp_bulk_queue *bq);
 
 static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
 {
@@ -399,7 +399,6 @@ static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu,
 static void __cpu_map_entry_free(struct rcu_head *rcu)
 {
 	struct bpf_cpu_map_entry *rcpu;
-	int cpu;
 
 	/* This cpu_map_entry have been disconnected from map and one
 	 * RCU graze-period have elapsed.  Thus, XDP cannot queue any
@@ -408,13 +407,6 @@ static void __cpu_map_entry_free(struct rcu_head *rcu)
 	 */
 	rcpu = container_of(rcu, struct bpf_cpu_map_entry, rcu);
 
-	/* Flush remaining packets in percpu bulkq */
-	for_each_online_cpu(cpu) {
-		struct xdp_bulk_queue *bq = per_cpu_ptr(rcpu->bulkq, cpu);
-
-		/* No concurrent bq_enqueue can run at this point */
-		bq_flush_to_queue(bq, false);
-	}
 	free_percpu(rcpu->bulkq);
 	/* Cannot kthread_stop() here, last put free rcpu resources */
 	put_cpu_map_entry(rcpu);
@@ -522,18 +514,6 @@ static void cpu_map_free(struct bpf_map *map)
 	bpf_clear_redirect_map(map);
 	synchronize_rcu();
 
-	/* To ensure all pending flush operations have completed wait for flush
-	 * list be empty on _all_ cpus. Because the above synchronize_rcu()
-	 * ensures the map is disconnected from the program we can assume no new
-	 * items will be added to the list.
-	 */
-	for_each_online_cpu(cpu) {
-		struct list_head *flush_list = per_cpu_ptr(cmap->flush_list, cpu);
-
-		while (!list_empty(flush_list))
-			cond_resched();
-	}
-
 	/* For cpu_map the remote CPUs can still be using the entries
 	 * (struct bpf_cpu_map_entry).
 	 */
@@ -599,7 +579,7 @@ const struct bpf_map_ops cpu_map_ops = {
 	.map_check_btf		= map_check_no_btf,
 };
 
-static int bq_flush_to_queue(struct xdp_bulk_queue *bq, bool in_napi_ctx)
+static int bq_flush_to_queue(struct xdp_bulk_queue *bq)
 {
 	struct bpf_cpu_map_entry *rcpu = bq->obj;
 	unsigned int processed = 0, drops = 0;
@@ -620,10 +600,7 @@ static int bq_flush_to_queue(struct xdp_bulk_queue *bq, bool in_napi_ctx)
 		err = __ptr_ring_produce(q, xdpf);
 		if (err) {
 			drops++;
-			if (likely(in_napi_ctx))
-				xdp_return_frame_rx_napi(xdpf);
-			else
-				xdp_return_frame(xdpf);
+			xdp_return_frame_rx_napi(xdpf);
 		}
 		processed++;
 	}
@@ -646,7 +623,7 @@ static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
 	struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
 
 	if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
-		bq_flush_to_queue(bq, true);
+		bq_flush_to_queue(bq);
 
 	/* Notice, xdp_buff/page MUST be queued here, long enough for
 	 * driver to code invoking us to finished, due to driver
@@ -688,7 +665,7 @@ void __cpu_map_flush(struct bpf_map *map)
 	struct xdp_bulk_queue *bq, *tmp;
 
 	list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
-		bq_flush_to_queue(bq, true);
+		bq_flush_to_queue(bq);
 
 		/* If already running, costs spin_lock_irqsave + smb_mb */
 		wake_up_process(bq->obj->kthread);
-- 
2.20.1


  parent reply	other threads:[~2019-12-18 10:54 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-12-18 10:53 [PATCH bpf-next 0/8] Simplify xdp_do_redirect_map()/xdp_do_flush_map() and XDP maps Björn Töpel
2019-12-18 10:53 ` [PATCH bpf-next 1/8] xdp: simplify devmap cleanup Björn Töpel
2019-12-18 11:14   ` Toke Høiland-Jørgensen
2019-12-18 10:53 ` Björn Töpel [this message]
2019-12-18 11:15   ` [PATCH bpf-next 2/8] xdp: simplify cpumap cleanup Toke Høiland-Jørgensen
2019-12-18 17:47   ` Jakub Kicinski
2019-12-18 17:48     ` Björn Töpel
2019-12-18 10:53 ` [PATCH bpf-next 3/8] xdp: fix graze->grace type-o in cpumap comments Björn Töpel
2019-12-18 11:18   ` Toke Høiland-Jørgensen
2019-12-18 10:53 ` [PATCH bpf-next 4/8] xsk: make xskmap flush_list common for all map instances Björn Töpel
2019-12-18 11:19   ` Toke Høiland-Jørgensen
2019-12-18 10:53 ` [PATCH bpf-next 5/8] xdp: make devmap " Björn Töpel
2019-12-18 11:19   ` Toke Høiland-Jørgensen
2019-12-18 10:53 ` [PATCH bpf-next 6/8] xdp: make cpumap " Björn Töpel
2019-12-18 11:19   ` Toke Høiland-Jørgensen
2019-12-18 10:53 ` [PATCH bpf-next 7/8] xdp: remove map_to_flush and map swap detection Björn Töpel
2019-12-18 11:20   ` Toke Høiland-Jørgensen
2019-12-18 13:03   ` Jesper Dangaard Brouer
2019-12-18 13:09     ` Björn Töpel
2019-12-18 10:54 ` [PATCH bpf-next 8/8] xdp: simplify __bpf_tx_xdp_map() Björn Töpel
2019-12-18 11:20   ` Toke Høiland-Jørgensen
2019-12-18 11:11 ` [PATCH bpf-next 0/8] Simplify xdp_do_redirect_map()/xdp_do_flush_map() and XDP maps Jesper Dangaard Brouer
2019-12-18 11:39   ` Björn Töpel
2019-12-18 12:03     ` Jesper Dangaard Brouer
2019-12-18 12:18       ` Björn Töpel
2019-12-18 12:32         ` Björn Töpel
2019-12-18 12:40         ` Jesper Dangaard Brouer
2019-12-18 12:48           ` Björn Töpel
2019-12-19  0:39       ` Andrii Nakryiko
2019-12-19 19:33         ` Jesper Dangaard Brouer
2019-12-19 20:08           ` Daniel Borkmann
2019-12-19 22:56             ` Andrii Nakryiko

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20191218105400.2895-3-bjorn.topel@gmail.com \
    --to=bjorn.topel@gmail.com \
    --cc=ast@kernel.org \
    --cc=bjorn.topel@intel.com \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=davem@davemloft.net \
    --cc=hawk@kernel.org \
    --cc=jakub.kicinski@netronome.com \
    --cc=john.fastabend@gmail.com \
    --cc=jonathan.lemon@gmail.com \
    --cc=magnus.karlsson@intel.com \
    --cc=netdev@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).