* [PATCH V6 1/5] blk-mq: refactor the code of issue request directly
2018-11-13 9:56 [PATCH V6 0/5] blk-mq: refactor and fix on issue request directly Jianchao Wang
@ 2018-11-13 9:56 ` Jianchao Wang
2018-11-13 9:56 ` [PATCH V6 2/5] blk-mq: fix issue directly case when q is stopped or quiesced Jianchao Wang
` (3 subsequent siblings)
4 siblings, 0 replies; 10+ messages in thread
From: Jianchao Wang @ 2018-11-13 9:56 UTC (permalink / raw)
To: axboe; +Cc: ming.lei, linux-block, linux-kernel
Merge blk_mq_try_issue_directly and __blk_mq_try_issue_directly
into one interface to unify the interfaces to issue requests
directly. The merged interface takes over the requests totally,
it could insert, end or do nothing based on the return value of
.queue_rq and 'bypass' parameter. Then caller needn't any other
handling any more.
Signed-off-by: Jianchao Wang <jianchao.w.wang@oracle.com>
---
block/blk-mq.c | 93 ++++++++++++++++++++++++++++------------------------------
1 file changed, 45 insertions(+), 48 deletions(-)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 411be60..14b4d06 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1766,78 +1766,75 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
return ret;
}
-static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+static blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
struct request *rq,
blk_qc_t *cookie,
- bool bypass_insert)
+ bool bypass)
{
struct request_queue *q = rq->q;
bool run_queue = true;
+ blk_status_t ret = BLK_STS_RESOURCE;
+ int srcu_idx;
+ hctx_lock(hctx, &srcu_idx);
/*
- * RCU or SRCU read lock is needed before checking quiesced flag.
+ * hctx_lock is needed before checking quiesced flag.
*
- * When queue is stopped or quiesced, ignore 'bypass_insert' from
- * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
- * and avoid driver to try to dispatch again.
+ * When queue is stopped or quiesced, ignore 'bypass', insert
+ * and return BLK_STS_OK to caller, and avoid driver to try to
+ * dispatch again.
*/
- if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
+ if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) {
run_queue = false;
- bypass_insert = false;
- goto insert;
+ bypass = false;
+ goto out_unlock;
}
- if (q->elevator && !bypass_insert)
- goto insert;
+ /*
+ * Bypass the potential scheduler on the bottom device.
+ */
+ if (unlikely(q->elevator && !bypass))
+ goto out_unlock;
- if (!blk_mq_get_dispatch_budget(hctx))
- goto insert;
+ if (unlikely(!blk_mq_get_dispatch_budget(hctx)))
+ goto out_unlock;
- if (!blk_mq_get_driver_tag(rq)) {
+ if (unlikely(!blk_mq_get_driver_tag(rq))) {
blk_mq_put_dispatch_budget(hctx);
- goto insert;
+ goto out_unlock;
}
- return __blk_mq_issue_directly(hctx, rq, cookie);
-insert:
- if (bypass_insert)
- return BLK_STS_RESOURCE;
-
- blk_mq_sched_insert_request(rq, false, run_queue, false);
- return BLK_STS_OK;
-}
-
-static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
- struct request *rq, blk_qc_t *cookie)
-{
- blk_status_t ret;
- int srcu_idx;
-
- might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
+ ret = __blk_mq_issue_directly(hctx, rq, cookie);
- hctx_lock(hctx, &srcu_idx);
+out_unlock:
+ hctx_unlock(hctx, srcu_idx);
- ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false);
- if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
- blk_mq_sched_insert_request(rq, false, true, false);
- else if (ret != BLK_STS_OK)
- blk_mq_end_request(rq, ret);
+ switch (ret) {
+ case BLK_STS_OK:
+ break;
+ case BLK_STS_DEV_RESOURCE:
+ case BLK_STS_RESOURCE:
+ if (!bypass) {
+ blk_mq_sched_insert_request(rq, false, run_queue, false);
+ ret = BLK_STS_OK;
+ }
+ break;
+ default:
+ if (!bypass) {
+ blk_mq_end_request(rq, ret);
+ ret = BLK_STS_OK;
+ }
+ break;
+ }
- hctx_unlock(hctx, srcu_idx);
+ return ret;
}
blk_status_t blk_mq_request_issue_directly(struct request *rq)
{
- blk_status_t ret;
- int srcu_idx;
blk_qc_t unused_cookie;
- struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
- hctx_lock(hctx, &srcu_idx);
- ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true);
- hctx_unlock(hctx, srcu_idx);
-
- return ret;
+ return blk_mq_try_issue_directly(rq->mq_hctx, rq, &unused_cookie, true);
}
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
@@ -1958,13 +1955,13 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
if (same_queue_rq) {
data.hctx = same_queue_rq->mq_hctx;
blk_mq_try_issue_directly(data.hctx, same_queue_rq,
- &cookie);
+ &cookie, false);
}
} else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
!data.hctx->dispatch_busy)) {
blk_mq_put_ctx(data.ctx);
blk_mq_bio_to_request(rq, bio);
- blk_mq_try_issue_directly(data.hctx, rq, &cookie);
+ blk_mq_try_issue_directly(data.hctx, rq, &cookie, false);
} else {
blk_mq_put_ctx(data.ctx);
blk_mq_bio_to_request(rq, bio);
--
2.7.4
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [PATCH V6 2/5] blk-mq: fix issue directly case when q is stopped or quiesced
2018-11-13 9:56 [PATCH V6 0/5] blk-mq: refactor and fix on issue request directly Jianchao Wang
2018-11-13 9:56 ` [PATCH V6 1/5] blk-mq: refactor the code of " Jianchao Wang
@ 2018-11-13 9:56 ` Jianchao Wang
2018-11-13 9:56 ` [PATCH V6 3/5] blk-mq: ensure hctx to be ran on mapped cpu when issue directly Jianchao Wang
` (2 subsequent siblings)
4 siblings, 0 replies; 10+ messages in thread
From: Jianchao Wang @ 2018-11-13 9:56 UTC (permalink / raw)
To: axboe; +Cc: ming.lei, linux-block, linux-kernel
When try to issue request directly, if the queue is stopped or
quiesced, 'bypass' will be ignored and return BLK_STS_OK to caller
to avoid it dispatch request again. Then the request will be
inserted with blk_mq_sched_insert_request. This is not correct
for dm-rq case where we should avoid to pass through the underlying
path's io scheduler.
To fix it, use blk_mq_request_bypass_insert to insert the request
to hctx->dispatch when we cannot pass through io scheduler but have
to insert.
Signed-off-by: Jianchao Wang <jianchao.w.wang@oracle.com>
---
block/blk-mq.c | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 14b4d06..11c52bb 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1772,7 +1772,7 @@ static blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
bool bypass)
{
struct request_queue *q = rq->q;
- bool run_queue = true;
+ bool run_queue = true, force = false;
blk_status_t ret = BLK_STS_RESOURCE;
int srcu_idx;
@@ -1786,7 +1786,7 @@ static blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
*/
if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) {
run_queue = false;
- bypass = false;
+ force = true;
goto out_unlock;
}
@@ -1817,6 +1817,9 @@ static blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
if (!bypass) {
blk_mq_sched_insert_request(rq, false, run_queue, false);
ret = BLK_STS_OK;
+ } else if (force) {
+ blk_mq_request_bypass_insert(rq, run_queue);
+ ret = BLK_STS_OK;
}
break;
default:
--
2.7.4
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [PATCH V6 3/5] blk-mq: ensure hctx to be ran on mapped cpu when issue directly
2018-11-13 9:56 [PATCH V6 0/5] blk-mq: refactor and fix on issue request directly Jianchao Wang
2018-11-13 9:56 ` [PATCH V6 1/5] blk-mq: refactor the code of " Jianchao Wang
2018-11-13 9:56 ` [PATCH V6 2/5] blk-mq: fix issue directly case when q is stopped or quiesced Jianchao Wang
@ 2018-11-13 9:56 ` Jianchao Wang
2018-11-13 13:44 ` Jens Axboe
2018-11-13 9:56 ` [PATCH V6 4/5] blk-mq: issue directly with bypass 'false' in blk_mq_sched_insert_requests Jianchao Wang
2018-11-13 9:56 ` [PATCH V6 5/5] blk-mq: replace and kill blk_mq_request_issue_directly Jianchao Wang
4 siblings, 1 reply; 10+ messages in thread
From: Jianchao Wang @ 2018-11-13 9:56 UTC (permalink / raw)
To: axboe; +Cc: ming.lei, linux-block, linux-kernel
When issue request directly and the task is migrated out of the
original cpu where it allocates request, hctx could be ran on
the cpu where it is not mapped.
To fix this,
- insert the request forcibly if BLK_MQ_F_BLOCKING is set.
- check whether the current is mapped to the hctx, if not, insert
forcibly.
- invoke __blk_mq_issue_directly under preemption disabled.
Signed-off-by: Jianchao Wang <jianchao.w.wang@oracle.com>
---
block/blk-mq.c | 14 +++++++++++++-
1 file changed, 13 insertions(+), 1 deletion(-)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 11c52bb..58f15cc 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1776,6 +1776,17 @@ static blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
blk_status_t ret = BLK_STS_RESOURCE;
int srcu_idx;
+ if (hctx->flags & BLK_MQ_F_BLOCKING) {
+ force = true;
+ goto out;
+ }
+
+ if (unlikely(!cpumask_test_cpu(get_cpu(), hctx->cpumask))) {
+ put_cpu();
+ force = true;
+ goto out;
+ }
+
hctx_lock(hctx, &srcu_idx);
/*
* hctx_lock is needed before checking quiesced flag.
@@ -1808,7 +1819,8 @@ static blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
out_unlock:
hctx_unlock(hctx, srcu_idx);
-
+ put_cpu();
+out:
switch (ret) {
case BLK_STS_OK:
break;
--
2.7.4
^ permalink raw reply related [flat|nested] 10+ messages in thread
* Re: [PATCH V6 3/5] blk-mq: ensure hctx to be ran on mapped cpu when issue directly
2018-11-13 9:56 ` [PATCH V6 3/5] blk-mq: ensure hctx to be ran on mapped cpu when issue directly Jianchao Wang
@ 2018-11-13 13:44 ` Jens Axboe
2018-11-14 2:15 ` jianchao.wang
0 siblings, 1 reply; 10+ messages in thread
From: Jens Axboe @ 2018-11-13 13:44 UTC (permalink / raw)
To: Jianchao Wang; +Cc: ming.lei, linux-block, linux-kernel
On 11/13/18 2:56 AM, Jianchao Wang wrote:
> When issue request directly and the task is migrated out of the
> original cpu where it allocates request, hctx could be ran on
> the cpu where it is not mapped.
> To fix this,
> - insert the request forcibly if BLK_MQ_F_BLOCKING is set.
> - check whether the current is mapped to the hctx, if not, insert
> forcibly.
> - invoke __blk_mq_issue_directly under preemption disabled.
I'm not too crazy about this one, adding a get/put_cpu() in the hot
path, and a cpumask test. The fact is that most/no drivers care
about strict placement. We always try to do so, if convenient,
since it's faster, but this seems to be doing the opposite.
I'd be more inclined to have a driver flag if it needs guaranteed
placement, using one an ops BLK_MQ_F_STRICT_CPU flag or similar.
What do you think?
--
Jens Axboe
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH V6 3/5] blk-mq: ensure hctx to be ran on mapped cpu when issue directly
2018-11-13 13:44 ` Jens Axboe
@ 2018-11-14 2:15 ` jianchao.wang
2018-11-14 3:02 ` Ming Lei
0 siblings, 1 reply; 10+ messages in thread
From: jianchao.wang @ 2018-11-14 2:15 UTC (permalink / raw)
To: Jens Axboe; +Cc: ming.lei, linux-block, linux-kernel
Hi Jens
Thanks for your kindly response.
On 11/13/18 9:44 PM, Jens Axboe wrote:
> On 11/13/18 2:56 AM, Jianchao Wang wrote:
>> When issue request directly and the task is migrated out of the
>> original cpu where it allocates request, hctx could be ran on
>> the cpu where it is not mapped.
>> To fix this,
>> - insert the request forcibly if BLK_MQ_F_BLOCKING is set.
>> - check whether the current is mapped to the hctx, if not, insert
>> forcibly.
>> - invoke __blk_mq_issue_directly under preemption disabled.
>
> I'm not too crazy about this one, adding a get/put_cpu() in the hot
> path, and a cpumask test. The fact is that most/no drivers care
> about strict placement. We always try to do so, if convenient,
> since it's faster, but this seems to be doing the opposite.
>
> I'd be more inclined to have a driver flag if it needs guaranteed
> placement, using one an ops BLK_MQ_F_STRICT_CPU flag or similar.
>
> What do you think?
>
I'd inclined blk-mq should comply with a unified rule, no matter the
issuing directly path or inserting one. Then blk-mq would have a simpler
model. And also this guarantee could be a little good for drivers,
especially the case where cpu and hw queue mapping is 1:1.
Regarding with hot path, do you concern about the nvme device ?
If so, how about split a standalone path for it ?
Thanks
Jianchao
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH V6 3/5] blk-mq: ensure hctx to be ran on mapped cpu when issue directly
2018-11-14 2:15 ` jianchao.wang
@ 2018-11-14 3:02 ` Ming Lei
2018-11-14 3:38 ` jianchao.wang
0 siblings, 1 reply; 10+ messages in thread
From: Ming Lei @ 2018-11-14 3:02 UTC (permalink / raw)
To: jianchao.wang
Cc: Jens Axboe, Ming Lei, linux-block, Linux Kernel Mailing List
On Wed, Nov 14, 2018 at 10:15 AM jianchao.wang
<jianchao.w.wang@oracle.com> wrote:
>
> Hi Jens
>
> Thanks for your kindly response.
>
> On 11/13/18 9:44 PM, Jens Axboe wrote:
> > On 11/13/18 2:56 AM, Jianchao Wang wrote:
> >> When issue request directly and the task is migrated out of the
> >> original cpu where it allocates request, hctx could be ran on
> >> the cpu where it is not mapped.
> >> To fix this,
> >> - insert the request forcibly if BLK_MQ_F_BLOCKING is set.
> >> - check whether the current is mapped to the hctx, if not, insert
> >> forcibly.
> >> - invoke __blk_mq_issue_directly under preemption disabled.
> >
> > I'm not too crazy about this one, adding a get/put_cpu() in the hot
> > path, and a cpumask test. The fact is that most/no drivers care
> > about strict placement. We always try to do so, if convenient,
> > since it's faster, but this seems to be doing the opposite.
> >
> > I'd be more inclined to have a driver flag if it needs guaranteed
> > placement, using one an ops BLK_MQ_F_STRICT_CPU flag or similar.
> >
> > What do you think?
> >
>
> I'd inclined blk-mq should comply with a unified rule, no matter the
> issuing directly path or inserting one. Then blk-mq would have a simpler
> model. And also this guarantee could be a little good for drivers,
> especially the case where cpu and hw queue mapping is 1:1.
I guess it is quite hard to respect this rule 100%, such as in case of
CPU hotplug.
Thanks,
Ming Lei
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH V6 3/5] blk-mq: ensure hctx to be ran on mapped cpu when issue directly
2018-11-14 3:02 ` Ming Lei
@ 2018-11-14 3:38 ` jianchao.wang
0 siblings, 0 replies; 10+ messages in thread
From: jianchao.wang @ 2018-11-14 3:38 UTC (permalink / raw)
To: Ming Lei; +Cc: Jens Axboe, Ming Lei, linux-block, Linux Kernel Mailing List
On 11/14/18 11:02 AM, Ming Lei wrote:
> On Wed, Nov 14, 2018 at 10:15 AM jianchao.wang
> <jianchao.w.wang@oracle.com> wrote:
>>
>> Hi Jens
>>
>> Thanks for your kindly response.
>>
>> On 11/13/18 9:44 PM, Jens Axboe wrote:
>>> On 11/13/18 2:56 AM, Jianchao Wang wrote:
>>>> When issue request directly and the task is migrated out of the
>>>> original cpu where it allocates request, hctx could be ran on
>>>> the cpu where it is not mapped.
>>>> To fix this,
>>>> - insert the request forcibly if BLK_MQ_F_BLOCKING is set.
>>>> - check whether the current is mapped to the hctx, if not, insert
>>>> forcibly.
>>>> - invoke __blk_mq_issue_directly under preemption disabled.
>>>
>>> I'm not too crazy about this one, adding a get/put_cpu() in the hot
>>> path, and a cpumask test. The fact is that most/no drivers care
>>> about strict placement. We always try to do so, if convenient,
>>> since it's faster, but this seems to be doing the opposite.
>>>
>>> I'd be more inclined to have a driver flag if it needs guaranteed
>>> placement, using one an ops BLK_MQ_F_STRICT_CPU flag or similar.
>>>
>>> What do you think?
>>>
>>
>> I'd inclined blk-mq should comply with a unified rule, no matter the
>> issuing directly path or inserting one. Then blk-mq would have a simpler
>> model. And also this guarantee could be a little good for drivers,
>> especially the case where cpu and hw queue mapping is 1:1.
>
> I guess it is quite hard to respect this rule 100%, such as in case of
> CPU hotplug.
>
Yes, it is indeed the case.
Looks like this patch is contentious.
I will drop this one and post later as a standalone one if necessary.
Thanks
Jianchao
^ permalink raw reply [flat|nested] 10+ messages in thread
* [PATCH V6 4/5] blk-mq: issue directly with bypass 'false' in blk_mq_sched_insert_requests
2018-11-13 9:56 [PATCH V6 0/5] blk-mq: refactor and fix on issue request directly Jianchao Wang
` (2 preceding siblings ...)
2018-11-13 9:56 ` [PATCH V6 3/5] blk-mq: ensure hctx to be ran on mapped cpu when issue directly Jianchao Wang
@ 2018-11-13 9:56 ` Jianchao Wang
2018-11-13 9:56 ` [PATCH V6 5/5] blk-mq: replace and kill blk_mq_request_issue_directly Jianchao Wang
4 siblings, 0 replies; 10+ messages in thread
From: Jianchao Wang @ 2018-11-13 9:56 UTC (permalink / raw)
To: axboe; +Cc: ming.lei, linux-block, linux-kernel
It is not necessary to issue request directly with bypass 'true'
in blk_mq_sched_insert_requests and handle the non-issued requests
itself. Just set bypass to 'false' and let blk_mq_try_issue_directly
handle them totally.
Signed-off-by: Jianchao Wang <jianchao.w.wang@oracle.com>
---
block/blk-mq-sched.c | 8 +++-----
block/blk-mq.c | 13 +++----------
2 files changed, 6 insertions(+), 15 deletions(-)
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 66fda19..9af57c8 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -410,12 +410,10 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
* busy in case of 'none' scheduler, and this way may save
* us one extra enqueue & dequeue to sw queue.
*/
- if (!hctx->dispatch_busy && !e && !run_queue_async) {
+ if (!hctx->dispatch_busy && !e && !run_queue_async)
blk_mq_try_issue_list_directly(hctx, list);
- if (list_empty(list))
- return;
- }
- blk_mq_insert_requests(hctx, ctx, list);
+ else
+ blk_mq_insert_requests(hctx, ctx, list);
}
blk_mq_run_hw_queue(hctx, run_queue_async);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 58f15cc..f41a815 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1855,21 +1855,14 @@ blk_status_t blk_mq_request_issue_directly(struct request *rq)
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
struct list_head *list)
{
+ blk_qc_t unused_cookie;
+
while (!list_empty(list)) {
- blk_status_t ret;
struct request *rq = list_first_entry(list, struct request,
queuelist);
list_del_init(&rq->queuelist);
- ret = blk_mq_request_issue_directly(rq);
- if (ret != BLK_STS_OK) {
- if (ret == BLK_STS_RESOURCE ||
- ret == BLK_STS_DEV_RESOURCE) {
- list_add(&rq->queuelist, list);
- break;
- }
- blk_mq_end_request(rq, ret);
- }
+ blk_mq_try_issue_directly(hctx, rq, &unused_cookie, false);
}
}
--
2.7.4
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [PATCH V6 5/5] blk-mq: replace and kill blk_mq_request_issue_directly
2018-11-13 9:56 [PATCH V6 0/5] blk-mq: refactor and fix on issue request directly Jianchao Wang
` (3 preceding siblings ...)
2018-11-13 9:56 ` [PATCH V6 4/5] blk-mq: issue directly with bypass 'false' in blk_mq_sched_insert_requests Jianchao Wang
@ 2018-11-13 9:56 ` Jianchao Wang
4 siblings, 0 replies; 10+ messages in thread
From: Jianchao Wang @ 2018-11-13 9:56 UTC (permalink / raw)
To: axboe; +Cc: ming.lei, linux-block, linux-kernel
Replace blk_mq_request_issue_directly with blk_mq_try_issue_directly
in blk_insert_cloned_request and kill it as nobody uses it any more.
Signed-off-by: Jianchao Wang <jianchao.w.wang@oracle.com>
---
block/blk-core.c | 4 +++-
block/blk-mq.c | 9 +--------
block/blk-mq.h | 7 ++++---
3 files changed, 8 insertions(+), 12 deletions(-)
diff --git a/block/blk-core.c b/block/blk-core.c
index fdc0ad2..e4eedc7 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1421,6 +1421,8 @@ static int blk_cloned_rq_check_limits(struct request_queue *q,
*/
blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
{
+ blk_qc_t unused_cookie;
+
if (blk_cloned_rq_check_limits(q, rq))
return BLK_STS_IOERR;
@@ -1436,7 +1438,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
* bypass a potential scheduler on the bottom device for
* insert.
*/
- return blk_mq_request_issue_directly(rq);
+ return blk_mq_try_issue_directly(rq->mq_hctx, rq, &unused_cookie, true);
}
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index f41a815..b5316c78 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1766,7 +1766,7 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
return ret;
}
-static blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
struct request *rq,
blk_qc_t *cookie,
bool bypass)
@@ -1845,13 +1845,6 @@ static blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
return ret;
}
-blk_status_t blk_mq_request_issue_directly(struct request *rq)
-{
- blk_qc_t unused_cookie;
-
- return blk_mq_try_issue_directly(rq->mq_hctx, rq, &unused_cookie, true);
-}
-
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
struct list_head *list)
{
diff --git a/block/blk-mq.h b/block/blk-mq.h
index facb6e9..f18c27c 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -61,9 +61,10 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
struct list_head *list);
-
-/* Used by blk_insert_cloned_request() to issue request directly */
-blk_status_t blk_mq_request_issue_directly(struct request *rq);
+blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+ struct request *rq,
+ blk_qc_t *cookie,
+ bool bypass);
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
struct list_head *list);
--
2.7.4
^ permalink raw reply related [flat|nested] 10+ messages in thread