* [PATCH v4 1/2] virtio-blk: support polling I/O
2022-04-04 9:28 [PATCH v4 0/2] virtio-blk: support polling I/O and mq_ops->queue_rqs() Suwan Kim
@ 2022-04-04 9:28 ` Suwan Kim
2022-04-05 5:21 ` Suwan Kim
2022-04-04 9:28 ` [PATCH v4 2/2] virtio-blk: support mq_ops->queue_rqs() Suwan Kim
1 sibling, 1 reply; 5+ messages in thread
From: Suwan Kim @ 2022-04-04 9:28 UTC (permalink / raw)
To: mst, jasowang, stefanha, pbonzini, mgurtovoy, dongli.zhang
Cc: virtualization, linux-block, Suwan Kim
This patch supports polling I/O via virtio-blk driver. Polling
feature is enabled by module parameter "num_poll_queues" and it
sets dedicated polling queues for virtio-blk. This patch improves
the polling I/O throughput and latency.
The virtio-blk driver doesn't not have a poll function and a poll
queue and it has been operating in interrupt driven method even if
the polling function is called in the upper layer.
virtio-blk polling is implemented upon 'batched completion' of block
layer. virtblk_poll() queues completed request to io_comp_batch->req_list
and later, virtblk_complete_batch() calls unmap function and ends
the requests in batch.
virtio-blk reads the number of poll queues from module parameter
"num_poll_queues". If VM sets queue parameter as below,
("num-queues=N" [QEMU property], "num_poll_queues=M" [module parameter])
It allocates N virtqueues to virtio_blk->vqs[N] and it uses [0..(N-M-1)]
as default queues and [(N-M)..(N-1)] as poll queues. Unlike the default
queues, the poll queues have no callback function.
Regarding HW-SW queue mapping, the default queue mapping uses the
existing method that condsiders MSI irq vector. But the poll queue
doesn't have an irq, so it uses the regular blk-mq cpu mapping.
For verifying the improvement, I did Fio polling I/O performance test
with io_uring engine with the options below.
(io_uring, hipri, randread, direct=1, bs=512, iodepth=64 numjobs=N)
I set 4 vcpu and 4 virtio-blk queues - 2 default queues and 2 poll
queues for VM.
As a result, IOPS and average latency improved about 10%.
Test result:
- Fio io_uring poll without virtio-blk poll support
-- numjobs=1 : IOPS = 339K, avg latency = 188.33us
-- numjobs=2 : IOPS = 367K, avg latency = 347.33us
-- numjobs=4 : IOPS = 383K, avg latency = 682.06us
- Fio io_uring poll with virtio-blk poll support
-- numjobs=1 : IOPS = 385K, avg latency = 165.94us
-- numjobs=2 : IOPS = 408K, avg latency = 313.28us
-- numjobs=4 : IOPS = 424K, avg latency = 613.05us
Signed-off-by: Suwan Kim <suwan.kim027@gmail.com>
---
drivers/block/virtio_blk.c | 112 +++++++++++++++++++++++++++++++++++--
1 file changed, 108 insertions(+), 4 deletions(-)
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 8c415be86732..c2d955da0006 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -37,6 +37,10 @@ MODULE_PARM_DESC(num_request_queues,
"0 for no limit. "
"Values > nr_cpu_ids truncated to nr_cpu_ids.");
+static unsigned int num_poll_queues;
+module_param(num_poll_queues, uint, 0644);
+MODULE_PARM_DESC(num_poll_queues, "The number of dedicated virtqueues for polling I/O");
+
static int major;
static DEFINE_IDA(vd_index_ida);
@@ -81,6 +85,7 @@ struct virtio_blk {
/* num of vqs */
int num_vqs;
+ int io_queues[HCTX_MAX_TYPES];
struct virtio_blk_vq *vqs;
};
@@ -548,6 +553,7 @@ static int init_vq(struct virtio_blk *vblk)
const char **names;
struct virtqueue **vqs;
unsigned short num_vqs;
+ unsigned int num_poll_vqs;
struct virtio_device *vdev = vblk->vdev;
struct irq_affinity desc = { 0, };
@@ -556,6 +562,7 @@ static int init_vq(struct virtio_blk *vblk)
&num_vqs);
if (err)
num_vqs = 1;
+
if (!err && !num_vqs) {
dev_err(&vdev->dev, "MQ advertised but zero queues reported\n");
return -EINVAL;
@@ -565,6 +572,18 @@ static int init_vq(struct virtio_blk *vblk)
min_not_zero(num_request_queues, nr_cpu_ids),
num_vqs);
+ num_poll_vqs = min_t(unsigned int, num_poll_queues, num_vqs - 1);
+
+ memset(vblk->io_queues, 0, sizeof(int) * HCTX_MAX_TYPES);
+ vblk->io_queues[HCTX_TYPE_DEFAULT] = num_vqs - num_poll_vqs;
+ vblk->io_queues[HCTX_TYPE_READ] = 0;
+ vblk->io_queues[HCTX_TYPE_POLL] = num_poll_vqs;
+
+ dev_info(&vdev->dev, "%d/%d/%d default/read/poll queues\n",
+ vblk->io_queues[HCTX_TYPE_DEFAULT],
+ vblk->io_queues[HCTX_TYPE_READ],
+ vblk->io_queues[HCTX_TYPE_POLL]);
+
vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
if (!vblk->vqs)
return -ENOMEM;
@@ -578,8 +597,13 @@ static int init_vq(struct virtio_blk *vblk)
}
for (i = 0; i < num_vqs; i++) {
- callbacks[i] = virtblk_done;
- snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
+ if (i < num_vqs - num_poll_vqs) {
+ callbacks[i] = virtblk_done;
+ snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
+ } else {
+ callbacks[i] = NULL;
+ snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req_poll.%d", i);
+ }
names[i] = vblk->vqs[i].name;
}
@@ -728,16 +752,93 @@ static const struct attribute_group *virtblk_attr_groups[] = {
static int virtblk_map_queues(struct blk_mq_tag_set *set)
{
struct virtio_blk *vblk = set->driver_data;
+ int i, qoff;
+
+ for (i = 0, qoff = 0; i < set->nr_maps; i++) {
+ struct blk_mq_queue_map *map = &set->map[i];
+
+ map->nr_queues = vblk->io_queues[i];
+ map->queue_offset = qoff;
+ qoff += map->nr_queues;
+
+ if (map->nr_queues == 0)
+ continue;
+
+ /*
+ * Regular queues have interrupts and hence CPU affinity is
+ * defined by the core virtio code, but polling queues have
+ * no interrupts so we let the block layer assign CPU affinity.
+ */
+ if (i == HCTX_TYPE_DEFAULT)
+ blk_mq_virtio_map_queues(&set->map[i], vblk->vdev, 0);
+ else
+ blk_mq_map_queues(&set->map[i]);
+ }
+
+ return 0;
+}
+
+static void virtblk_complete_batch(struct io_comp_batch *iob)
+{
+ struct request *req;
+ struct virtblk_req *vbr;
+
+ rq_list_for_each(&iob->req_list, req) {
+ vbr = blk_mq_rq_to_pdu(req);
+ virtblk_unmap_data(req, vbr);
+ virtblk_cleanup_cmd(req);
+ }
+ blk_mq_end_request_batch(iob);
+}
+
+static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
+{
+ struct virtio_blk *vblk = hctx->queue->queuedata;
+ struct virtio_blk_vq *vq = hctx->driver_data;
+ struct virtblk_req *vbr;
+ bool req_done = false;
+ unsigned long flags;
+ unsigned int len;
+ int found = 0;
+
+ spin_lock_irqsave(&vq->lock, flags);
+
+ while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) {
+ struct request *req = blk_mq_rq_from_pdu(vbr);
- return blk_mq_virtio_map_queues(&set->map[HCTX_TYPE_DEFAULT],
- vblk->vdev, 0);
+ found++;
+ if (!blk_mq_add_to_batch(req, iob, vbr->status,
+ virtblk_complete_batch))
+ blk_mq_complete_request(req);
+ req_done = true;
+ }
+
+ if (req_done)
+ blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
+
+ spin_unlock_irqrestore(&vq->lock, flags);
+
+ return found;
+}
+
+static int virtblk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ struct virtio_blk *vblk = data;
+ struct virtio_blk_vq *vq = &vblk->vqs[hctx_idx];
+
+ WARN_ON(vblk->tag_set.tags[hctx_idx] != hctx->tags);
+ hctx->driver_data = vq;
+ return 0;
}
static const struct blk_mq_ops virtio_mq_ops = {
.queue_rq = virtio_queue_rq,
.commit_rqs = virtio_commit_rqs,
+ .init_hctx = virtblk_init_hctx,
.complete = virtblk_request_done,
.map_queues = virtblk_map_queues,
+ .poll = virtblk_poll,
};
static unsigned int virtblk_queue_depth;
@@ -816,6 +917,9 @@ static int virtblk_probe(struct virtio_device *vdev)
sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
vblk->tag_set.driver_data = vblk;
vblk->tag_set.nr_hw_queues = vblk->num_vqs;
+ vblk->tag_set.nr_maps = 1;
+ if (vblk->io_queues[HCTX_TYPE_POLL])
+ vblk->tag_set.nr_maps = 3;
err = blk_mq_alloc_tag_set(&vblk->tag_set);
if (err)
--
2.26.3
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH v4 2/2] virtio-blk: support mq_ops->queue_rqs()
2022-04-04 9:28 [PATCH v4 0/2] virtio-blk: support polling I/O and mq_ops->queue_rqs() Suwan Kim
2022-04-04 9:28 ` [PATCH v4 1/2] virtio-blk: support polling I/O Suwan Kim
@ 2022-04-04 9:28 ` Suwan Kim
1 sibling, 0 replies; 5+ messages in thread
From: Suwan Kim @ 2022-04-04 9:28 UTC (permalink / raw)
To: mst, jasowang, stefanha, pbonzini, mgurtovoy, dongli.zhang
Cc: virtualization, linux-block, Suwan Kim
This patch supports mq_ops->queue_rqs() hook. It has an advantage of
batch submission to virtio-blk driver. It also helps polling I/O because
polling uses batched completion of block layer. Batch submission in
queue_rqs() can boost polling performance.
In queue_rqs(), it iterates plug->mq_list, collects requests that
belong to same HW queue until it encounters a request from other
HW queue or sees the end of the list.
Then, virtio-blk adds requests into virtqueue and kicks virtqueue
to submit requests.
If there is an error, it inserts error request to requeue_list and
passes it to ordinary block layer path.
For verification, I did fio test.
(io_uring, randread, direct=1, bs=4K, iodepth=64 numjobs=N)
I set 4 vcpu and 2 virtio-blk queues for VM and run fio test 5 times.
It shows about 2% improvement.
| numjobs=2 | numjobs=4
-----------------------------------------------------------
fio without queue_rqs() | 291K IOPS | 238K IOPS
-----------------------------------------------------------
fio with queue_rqs() | 295K IOPS | 243K IOPS
For polling I/O performance, I also did fio test as below.
(io_uring, hipri, randread, direct=1, bs=512, iodepth=64 numjobs=4)
I set 4 vcpu and 2 poll queues for VM.
It shows about 2% improvement in polling I/O.
| IOPS | avg latency
-----------------------------------------------------------
fio poll without queue_rqs() | 424K | 613.05 usec
-----------------------------------------------------------
fio poll with queue_rqs() | 435K | 601.01 usec
Signed-off-by: Suwan Kim <suwan.kim027@gmail.com>
---
drivers/block/virtio_blk.c | 110 +++++++++++++++++++++++++++++++++----
1 file changed, 99 insertions(+), 11 deletions(-)
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index c2d955da0006..03e4455c23e3 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -92,6 +92,7 @@ struct virtio_blk {
struct virtblk_req {
struct virtio_blk_outhdr out_hdr;
u8 status;
+ int sg_num;
struct sg_table sg_table;
struct scatterlist sg[];
};
@@ -311,18 +312,13 @@ static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
virtqueue_notify(vq->vq);
}
-static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd)
+static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx,
+ struct virtio_blk *vblk,
+ struct request *req,
+ struct virtblk_req *vbr)
{
- struct virtio_blk *vblk = hctx->queue->queuedata;
- struct request *req = bd->rq;
- struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
- unsigned long flags;
- int num;
- int qid = hctx->queue_num;
- bool notify = false;
blk_status_t status;
- int err;
+ int num;
status = virtblk_setup_cmd(vblk->vdev, req, vbr);
if (unlikely(status))
@@ -335,9 +331,30 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
virtblk_cleanup_cmd(req);
return BLK_STS_RESOURCE;
}
+ vbr->sg_num = num;
+
+ return BLK_STS_OK;
+}
+
+static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct virtio_blk *vblk = hctx->queue->queuedata;
+ struct request *req = bd->rq;
+ struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+ unsigned long flags;
+ int qid = hctx->queue_num;
+ bool notify = false;
+ blk_status_t status;
+ int err;
+
+ status = virtblk_prep_rq(hctx, vblk, req, vbr);
+ if (unlikely(status))
+ return status;
spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
- err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg_table.sgl, num);
+ err = virtblk_add_req(vblk->vqs[qid].vq, vbr,
+ vbr->sg_table.sgl, vbr->sg_num);
if (err) {
virtqueue_kick(vblk->vqs[qid].vq);
/* Don't stop the queue if -ENOMEM: we may have failed to
@@ -367,6 +384,76 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
+static bool virtblk_prep_rq_batch(struct request *req)
+{
+ struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
+ struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+
+ req->mq_hctx->tags->rqs[req->tag] = req;
+
+ return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK;
+}
+
+static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
+ struct request **rqlist,
+ struct request **requeue_list)
+{
+ unsigned long flags;
+ int err;
+ bool kick;
+
+ spin_lock_irqsave(&vq->lock, flags);
+
+ while (!rq_list_empty(*rqlist)) {
+ struct request *req = rq_list_pop(rqlist);
+ struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+
+ err = virtblk_add_req(vq->vq, vbr,
+ vbr->sg_table.sgl, vbr->sg_num);
+ if (err) {
+ virtblk_unmap_data(req, vbr);
+ virtblk_cleanup_cmd(req);
+ rq_list_add(requeue_list, req);
+ }
+ }
+
+ kick = virtqueue_kick_prepare(vq->vq);
+ spin_unlock_irqrestore(&vq->lock, flags);
+
+ return kick;
+}
+
+static void virtio_queue_rqs(struct request **rqlist)
+{
+ struct request *req, *next, *prev = NULL;
+ struct request *requeue_list = NULL;
+
+ rq_list_for_each_safe(rqlist, req, next) {
+ struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
+ bool kick;
+
+ if (!virtblk_prep_rq_batch(req)) {
+ rq_list_move(rqlist, &requeue_list, req, prev);
+ req = prev;
+ if (!req)
+ continue;
+ }
+
+ if (!next || req->mq_hctx != next->mq_hctx) {
+ req->rq_next = NULL;
+ kick = virtblk_add_req_batch(vq, rqlist, &requeue_list);
+ if (kick)
+ virtqueue_notify(vq->vq);
+
+ *rqlist = next;
+ prev = NULL;
+ } else
+ prev = req;
+ }
+
+ *rqlist = requeue_list;
+}
+
/* return id (s/n) string for *disk to *id_str
*/
static int virtblk_get_id(struct gendisk *disk, char *id_str)
@@ -834,6 +921,7 @@ static int virtblk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
static const struct blk_mq_ops virtio_mq_ops = {
.queue_rq = virtio_queue_rq,
+ .queue_rqs = virtio_queue_rqs,
.commit_rqs = virtio_commit_rqs,
.init_hctx = virtblk_init_hctx,
.complete = virtblk_request_done,
--
2.26.3
^ permalink raw reply related [flat|nested] 5+ messages in thread