On 2020-12-08 13:20:04 [+0000], Christoph Hellwig wrote: > > --- a/block/blk-mq.c > > +++ b/block/blk-mq.c > > @@ -41,7 +41,7 @@ > > #include "blk-mq-sched.h" > > #include "blk-rq-qos.h" > > > > +static DEFINE_PER_CPU(struct llist_head, blk_cpu_done); > > > > static void blk_mq_poll_stats_start(struct request_queue *q); > > static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb); > > @@ -567,68 +567,32 @@ void blk_mq_end_request(struct request *rq, blk_status_t error) > > } > > EXPORT_SYMBOL(blk_mq_end_request); > > > > +static void blk_complete_reqs(struct llist_head *cpu_list) > > { > > + struct llist_node *entry; > > + struct request *rq, *rq_next; > > > > + entry = llist_del_all(cpu_list); > > + entry = llist_reverse_order(entry); > > I find the variable naming and split of the assignments a little > strange. What about: > > static void blk_complete_reqs(struct llist_head *list) > { > struct llist_node *first = llist_reverse_order(llist_del_all(list)); > struct request *rq, *next; > > ? Sure. > > + llist_for_each_entry_safe(rq, rq_next, entry, ipi_list) > > rq->q->mq_ops->complete(rq); > > } > > Aren't some sanitizers going to be unhappy if we never delete the > request from the list? I don't think so. If so there is more to complain about like, flush_smp_call_function_queue(), delayed_mntput(), irq_work_run_list(), ... > > bool blk_mq_complete_request_remote(struct request *rq) > > { > > + struct llist_head *cpu_list; > > WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); > > > > /* > > @@ -669,12 +634,22 @@ bool blk_mq_complete_request_remote(struct request *rq) > > return false; > > > > if (blk_mq_complete_need_ipi(rq)) { > > + unsigned int cpu; > > + > > + cpu = rq->mq_ctx->cpu; > > + cpu_list = &per_cpu(blk_cpu_done, cpu); > > + if (llist_add(&rq->ipi_list, cpu_list)) { > > + INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq); > > + smp_call_function_single_async(cpu, &rq->csd); > > + } > > I think the above code section inside the conditional should go into a > little helper instead of being open coded here in the fast path routine. > I laso don't really see the ĥoint of the cpu and cpulist locl variables. > > > } else { > > if (rq->q->nr_hw_queues > 1) > > return false; > > + preempt_disable(); > > + cpu_list = this_cpu_ptr(&blk_cpu_done); > > + if (llist_add(&rq->ipi_list, cpu_list)) > > + raise_softirq(BLOCK_SOFTIRQ); > > + preempt_enable(); > > I think the section after the return false here also would benefit from > a little helper with a descriptive name. > > Otherwise this looks good to me. Please see below. ----->8------- From: Sebastian Andrzej Siewior Date: Wed, 28 Oct 2020 11:08:21 +0100 Subject: [PATCH] blk-mq: Use llist_head for blk_cpu_done With llist_head it is possible to avoid the locking (the irq-off region) when items are added. This makes it possible to add items on a remote CPU without additional locking. llist_add() returns true if the list was previously empty. This can be used to invoke the SMP function call / raise sofirq only if the first item was added (otherwise it is already pending). This simplifies the code a little and reduces the IRQ-off regions. blk_mq_raise_softirq() needs a preempt-disable section to ensure the request is enqueued on the same CPU as the softirq is raised. Some callers (USB-storage) invoke this path in preemptible context. Signed-off-by: Sebastian Andrzej Siewior --- block/blk-mq.c | 97 ++++++++++++++++++------------------------ include/linux/blkdev.h | 2 +- 2 files changed, 42 insertions(+), 57 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 9baa681f6ee67..959b45fd41882 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -41,7 +41,7 @@ #include "blk-mq-sched.h" #include "blk-rq-qos.h" -static DEFINE_PER_CPU(struct list_head, blk_cpu_done); +static DEFINE_PER_CPU(struct llist_head, blk_cpu_done); static void blk_mq_poll_stats_start(struct request_queue *q); static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb); @@ -567,68 +567,29 @@ void blk_mq_end_request(struct request *rq, blk_status_t error) } EXPORT_SYMBOL(blk_mq_end_request); -/* - * Softirq action handler - move entries to local list and loop over them - * while passing them to the queue registered handler. - */ -static __latent_entropy void blk_done_softirq(struct softirq_action *h) +static void blk_complete_reqs(struct llist_head *list) { - struct list_head *cpu_list, local_list; + struct llist_node *entry = llist_reverse_order(llist_del_all(list)); + struct request *rq, *next; - local_irq_disable(); - cpu_list = this_cpu_ptr(&blk_cpu_done); - list_replace_init(cpu_list, &local_list); - local_irq_enable(); - - while (!list_empty(&local_list)) { - struct request *rq; - - rq = list_entry(local_list.next, struct request, ipi_list); - list_del_init(&rq->ipi_list); + llist_for_each_entry_safe(rq, next, entry, ipi_list) rq->q->mq_ops->complete(rq); - } } -static void blk_mq_trigger_softirq(struct request *rq) +static __latent_entropy void blk_done_softirq(struct softirq_action *h) { - struct list_head *list; - unsigned long flags; - - local_irq_save(flags); - list = this_cpu_ptr(&blk_cpu_done); - list_add_tail(&rq->ipi_list, list); - - /* - * If the list only contains our just added request, signal a raise of - * the softirq. If there are already entries there, someone already - * raised the irq but it hasn't run yet. - */ - if (list->next == &rq->ipi_list) - raise_softirq_irqoff(BLOCK_SOFTIRQ); - local_irq_restore(flags); + blk_complete_reqs(this_cpu_ptr(&blk_cpu_done)); } static int blk_softirq_cpu_dead(unsigned int cpu) { - /* - * If a CPU goes away, splice its entries to the current CPU - * and trigger a run of the softirq - */ - local_irq_disable(); - list_splice_init(&per_cpu(blk_cpu_done, cpu), - this_cpu_ptr(&blk_cpu_done)); - raise_softirq_irqoff(BLOCK_SOFTIRQ); - local_irq_enable(); - + blk_complete_reqs(&per_cpu(blk_cpu_done, cpu)); return 0; } - static void __blk_mq_complete_request_remote(void *data) { - struct request *rq = data; - - blk_mq_trigger_softirq(rq); + __raise_softirq_irqoff(BLOCK_SOFTIRQ); } static inline bool blk_mq_complete_need_ipi(struct request *rq) @@ -657,6 +618,30 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq) return cpu_online(rq->mq_ctx->cpu); } +static void blk_mq_complete_send_ipi(struct request *rq) +{ + struct llist_head *list; + unsigned int cpu; + + cpu = rq->mq_ctx->cpu; + list = &per_cpu(blk_cpu_done, cpu); + if (llist_add(&rq->ipi_list, list)) { + INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq); + smp_call_function_single_async(cpu, &rq->csd); + } +} + +static void blk_mq_raise_softirq(struct request *rq) +{ + struct llist_head *list; + + preempt_disable(); + list = this_cpu_ptr(&blk_cpu_done); + if (llist_add(&rq->ipi_list, list)) + raise_softirq(BLOCK_SOFTIRQ); + preempt_enable(); +} + bool blk_mq_complete_request_remote(struct request *rq) { WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); @@ -669,15 +654,15 @@ bool blk_mq_complete_request_remote(struct request *rq) return false; if (blk_mq_complete_need_ipi(rq)) { - INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq); - smp_call_function_single_async(rq->mq_ctx->cpu, &rq->csd); - } else { - if (rq->q->nr_hw_queues > 1) - return false; - blk_mq_trigger_softirq(rq); + blk_mq_complete_send_ipi(rq); + return true; } - return true; + if (rq->q->nr_hw_queues == 1) { + blk_mq_raise_softirq(rq); + return true; + } + return false; } EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote); @@ -3917,7 +3902,7 @@ static int __init blk_mq_init(void) int i; for_each_possible_cpu(i) - INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); + init_llist_head(&per_cpu(blk_cpu_done, i)); open_softirq(BLOCK_SOFTIRQ, blk_done_softirq); cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD, diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index f94ee3089e015..89a444c5a5833 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -153,7 +153,7 @@ struct request { */ union { struct hlist_node hash; /* merge hash */ - struct list_head ipi_list; + struct llist_node ipi_list; }; /* -- 2.29.2