[3/3] blk-mq: Use llist_head for blk_cpu_done
diff mbox series

Message ID 20210123201027.3262800-4-bigeasy@linutronix.de
State New, archived
Headers show
Series
  • blk-mq: Don't complete in IRQ, use llist_head
Related show

Commit Message

Sebastian Andrzej Siewior Jan. 23, 2021, 8:10 p.m. UTC
With llist_head it is possible to avoid the locking (the irq-off region)
when items are added. This makes it possible to add items on a remote
CPU without additional locking.
llist_add() returns true if the list was previously empty. This can be
used to invoke the SMP function call / raise sofirq only if the first
item was added (otherwise it is already pending).
This simplifies the code a little and reduces the IRQ-off regions.

blk_mq_raise_softirq() needs a preempt-disable section to ensure the
request is enqueued on the same CPU as the softirq is raised.
Some callers (USB-storage) invoke this path in preemptible context.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
 block/blk-mq.c         | 97 ++++++++++++++++++------------------------
 include/linux/blkdev.h |  2 +-
 2 files changed, 42 insertions(+), 57 deletions(-)

Comments

Christoph Hellwig Jan. 25, 2021, 8:30 a.m. UTC | #1
> +static void blk_mq_complete_send_ipi(struct request *rq)
> +{
> +	struct llist_head *list;
> +	unsigned int cpu;
> +
> +	cpu = rq->mq_ctx->cpu;
> +	list = &per_cpu(blk_cpu_done, cpu);
> +	if (llist_add(&rq->ipi_list, list)) {
> +		INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
> +		smp_call_function_single_async(cpu, &rq->csd);
> +	}
> +}

Nit: it would be nice to initialize cpu and list in the declaration
lines.

Otherwise looks good:

Reviewed-by: Christoph Hellwig <hch@lst.de>
Sebastian Andrzej Siewior Jan. 25, 2021, 8:32 a.m. UTC | #2
On 2021-01-25 08:30:12 [+0000], Christoph Hellwig wrote:
> > +static void blk_mq_complete_send_ipi(struct request *rq)
> > +{
> > +	struct llist_head *list;
> > +	unsigned int cpu;
> > +
> > +	cpu = rq->mq_ctx->cpu;
> > +	list = &per_cpu(blk_cpu_done, cpu);
> > +	if (llist_add(&rq->ipi_list, list)) {
> > +		INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
> > +		smp_call_function_single_async(cpu, &rq->csd);
> > +	}
> > +}
> 
> Nit: it would be nice to initialize cpu and list in the declaration
> lines.

Why? They get initialized later.

> Otherwise looks good:
> 
> Reviewed-by: Christoph Hellwig <hch@lst.de>

Sebastian
Christoph Hellwig Jan. 25, 2021, 8:39 a.m. UTC | #3
On Mon, Jan 25, 2021 at 09:32:04AM +0100, Sebastian Andrzej Siewior wrote:
> On 2021-01-25 08:30:12 [+0000], Christoph Hellwig wrote:
> > > +static void blk_mq_complete_send_ipi(struct request *rq)
> > > +{
> > > +	struct llist_head *list;
> > > +	unsigned int cpu;
> > > +
> > > +	cpu = rq->mq_ctx->cpu;
> > > +	list = &per_cpu(blk_cpu_done, cpu);
> > > +	if (llist_add(&rq->ipi_list, list)) {
> > > +		INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
> > > +		smp_call_function_single_async(cpu, &rq->csd);
> > > +	}
> > > +}
> > 
> > Nit: it would be nice to initialize cpu and list in the declaration
> > lines.
> 
> Why? They get initialized later.

Because:

	unsigned int cpu = rq->mq_ctx->cpu;
	struct llist_head *list = &per_cpu(blk_cpu_done, cpu);

is a lot easier to follow than:

	struct llist_head *list;
	unsigned int cpu;

	cpu = rq->mq_ctx->cpu;
	list = &per_cpu(blk_cpu_done, cpu);

Patch
diff mbox series

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 90348ae518461..463de2981df8a 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -41,7 +41,7 @@ 
 #include "blk-mq-sched.h"
 #include "blk-rq-qos.h"
 
-static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
+static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
 
 static void blk_mq_poll_stats_start(struct request_queue *q);
 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
@@ -567,68 +567,29 @@  void blk_mq_end_request(struct request *rq, blk_status_t error)
 }
 EXPORT_SYMBOL(blk_mq_end_request);
 
-/*
- * Softirq action handler - move entries to local list and loop over them
- * while passing them to the queue registered handler.
- */
-static __latent_entropy void blk_done_softirq(struct softirq_action *h)
+static void blk_complete_reqs(struct llist_head *list)
 {
-	struct list_head *cpu_list, local_list;
+	struct llist_node *entry = llist_reverse_order(llist_del_all(list));
+	struct request *rq, *next;
 
-	local_irq_disable();
-	cpu_list = this_cpu_ptr(&blk_cpu_done);
-	list_replace_init(cpu_list, &local_list);
-	local_irq_enable();
-
-	while (!list_empty(&local_list)) {
-		struct request *rq;
-
-		rq = list_entry(local_list.next, struct request, ipi_list);
-		list_del_init(&rq->ipi_list);
+	llist_for_each_entry_safe(rq, next, entry, ipi_list)
 		rq->q->mq_ops->complete(rq);
-	}
 }
 
-static void blk_mq_trigger_softirq(struct request *rq)
+static __latent_entropy void blk_done_softirq(struct softirq_action *h)
 {
-	struct list_head *list;
-	unsigned long flags;
-
-	local_irq_save(flags);
-	list = this_cpu_ptr(&blk_cpu_done);
-	list_add_tail(&rq->ipi_list, list);
-
-	/*
-	 * If the list only contains our just added request, signal a raise of
-	 * the softirq.  If there are already entries there, someone already
-	 * raised the irq but it hasn't run yet.
-	 */
-	if (list->next == &rq->ipi_list)
-		raise_softirq_irqoff(BLOCK_SOFTIRQ);
-	local_irq_restore(flags);
+	blk_complete_reqs(this_cpu_ptr(&blk_cpu_done));
 }
 
 static int blk_softirq_cpu_dead(unsigned int cpu)
 {
-	/*
-	 * If a CPU goes away, splice its entries to the current CPU
-	 * and trigger a run of the softirq
-	 */
-	local_irq_disable();
-	list_splice_init(&per_cpu(blk_cpu_done, cpu),
-			 this_cpu_ptr(&blk_cpu_done));
-	raise_softirq_irqoff(BLOCK_SOFTIRQ);
-	local_irq_enable();
-
+	blk_complete_reqs(&per_cpu(blk_cpu_done, cpu));
 	return 0;
 }
 
-
 static void __blk_mq_complete_request_remote(void *data)
 {
-	struct request *rq = data;
-
-	blk_mq_trigger_softirq(rq);
+	__raise_softirq_irqoff(BLOCK_SOFTIRQ);
 }
 
 static inline bool blk_mq_complete_need_ipi(struct request *rq)
@@ -657,6 +618,30 @@  static inline bool blk_mq_complete_need_ipi(struct request *rq)
 	return cpu_online(rq->mq_ctx->cpu);
 }
 
+static void blk_mq_complete_send_ipi(struct request *rq)
+{
+	struct llist_head *list;
+	unsigned int cpu;
+
+	cpu = rq->mq_ctx->cpu;
+	list = &per_cpu(blk_cpu_done, cpu);
+	if (llist_add(&rq->ipi_list, list)) {
+		INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
+		smp_call_function_single_async(cpu, &rq->csd);
+	}
+}
+
+static void blk_mq_raise_softirq(struct request *rq)
+{
+	struct llist_head *list;
+
+	preempt_disable();
+	list = this_cpu_ptr(&blk_cpu_done);
+	if (llist_add(&rq->ipi_list, list))
+		raise_softirq(BLOCK_SOFTIRQ);
+	preempt_enable();
+}
+
 bool blk_mq_complete_request_remote(struct request *rq)
 {
 	WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
@@ -669,15 +654,15 @@  bool blk_mq_complete_request_remote(struct request *rq)
 		return false;
 
 	if (blk_mq_complete_need_ipi(rq)) {
-		INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
-		smp_call_function_single_async(rq->mq_ctx->cpu, &rq->csd);
-	} else {
-		if (rq->q->nr_hw_queues > 1)
-			return false;
-		blk_mq_trigger_softirq(rq);
+		blk_mq_complete_send_ipi(rq);
+		return true;
 	}
 
-	return true;
+	if (rq->q->nr_hw_queues == 1) {
+		blk_mq_raise_softirq(rq);
+		return true;
+	}
+	return false;
 }
 EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote);
 
@@ -3892,7 +3877,7 @@  static int __init blk_mq_init(void)
 	int i;
 
 	for_each_possible_cpu(i)
-		INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
+		init_llist_head(&per_cpu(blk_cpu_done, i));
 	open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
 
 	cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index f94ee3089e015..89a444c5a5833 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -153,7 +153,7 @@  struct request {
 	 */
 	union {
 		struct hlist_node hash;	/* merge hash */
-		struct list_head ipi_list;
+		struct llist_node ipi_list;
 	};
 
 	/*