From: Bob Liu <bob.liu@oracle.com>
To: xen-devel@lists.xen.org
Cc: linux-kernel@vger.kernel.org, roger.pau@citrix.com,
konrad.wilk@oracle.com, felipe.franciosi@citrix.com,
axboe@fb.com, avanzini.arianna@gmail.com,
rafal.mielniczuk@citrix.com, jonathan.davies@citrix.com,
david.vrabel@citrix.com, Bob Liu <bob.liu@oracle.com>
Subject: [PATCH v4 05/10] xen/blkfront: negotiate number of queues/rings to be used with backend
Date: Mon, 2 Nov 2015 12:21:41 +0800 [thread overview]
Message-ID: <1446438106-20171-6-git-send-email-bob.liu@oracle.com> (raw)
In-Reply-To: <1446438106-20171-1-git-send-email-bob.liu@oracle.com>
The number of hardware queues for xen/blkfront is set by parameter
'max_queues'(default 4), while the max value xen/blkback supported is notified
through xenstore("multi-queue-max-queues").
The negotiated number is the smaller one and would be written back to xenstore
as "multi-queue-num-queues", blkback need to read this negotiated number.
Signed-off-by: Bob Liu <bob.liu@oracle.com>
---
drivers/block/xen-blkfront.c | 166 +++++++++++++++++++++++++++++++------------
1 file changed, 120 insertions(+), 46 deletions(-)
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 8cc5995..23096d7 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -98,6 +98,10 @@ static unsigned int xen_blkif_max_segments = 32;
module_param_named(max, xen_blkif_max_segments, int, S_IRUGO);
MODULE_PARM_DESC(max, "Maximum amount of segments in indirect requests (default is 32)");
+static unsigned int xen_blkif_max_queues = 4;
+module_param_named(max_queues, xen_blkif_max_queues, uint, S_IRUGO);
+MODULE_PARM_DESC(max_queues, "Maximum number of hardware queues/rings used per virtual disk");
+
/*
* Maximum order of pages to be used for the shared ring between front and
* backend, 4KB page granularity is used.
@@ -113,6 +117,7 @@ MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the
* characters are enough. Define to 20 to keep consist with backend.
*/
#define RINGREF_NAME_LEN (20)
+#define QUEUE_NAME_LEN (12)
/*
* Per-ring info.
@@ -695,7 +700,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
memset(&info->tag_set, 0, sizeof(info->tag_set));
info->tag_set.ops = &blkfront_mq_ops;
- info->tag_set.nr_hw_queues = 1;
+ info->tag_set.nr_hw_queues = info->nr_rings;
info->tag_set.queue_depth = BLK_RING_SIZE(info);
info->tag_set.numa_node = NUMA_NO_NODE;
info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
@@ -1352,6 +1357,51 @@ fail:
return err;
}
+static int write_per_ring_nodes(struct xenbus_transaction xbt,
+ struct blkfront_ring_info *rinfo, const char *dir)
+{
+ int err, i;
+ const char *message = NULL;
+ struct blkfront_info *info = rinfo->dev_info;
+
+ if (info->nr_ring_pages == 1) {
+ err = xenbus_printf(xbt, dir, "ring-ref", "%u", rinfo->ring_ref[0]);
+ if (err) {
+ message = "writing ring-ref";
+ goto abort_transaction;
+ }
+ pr_info("%s: write ring-ref:%d\n", dir, rinfo->ring_ref[0]);
+ } else {
+ for (i = 0; i < info->nr_ring_pages; i++) {
+ char ring_ref_name[RINGREF_NAME_LEN];
+
+ snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
+ err = xenbus_printf(xbt, dir, ring_ref_name,
+ "%u", rinfo->ring_ref[i]);
+ if (err) {
+ message = "writing ring-ref";
+ goto abort_transaction;
+ }
+ pr_info("%s: write ring-ref:%d\n", dir, rinfo->ring_ref[i]);
+ }
+ }
+
+ err = xenbus_printf(xbt, dir, "event-channel", "%u", rinfo->evtchn);
+ if (err) {
+ message = "writing event-channel";
+ goto abort_transaction;
+ }
+ pr_info("%s: write event-channel:%d\n", dir, rinfo->evtchn);
+
+ return 0;
+
+abort_transaction:
+ xenbus_transaction_end(xbt, 1);
+ if (message)
+ xenbus_dev_fatal(info->xbdev, err, "%s", message);
+
+ return err;
+}
/* Common code used when first setting up, and when resuming. */
static int talk_to_blkback(struct xenbus_device *dev,
@@ -1362,7 +1412,6 @@ static int talk_to_blkback(struct xenbus_device *dev,
int err, i;
unsigned int max_page_order = 0;
unsigned int ring_page_order = 0;
- struct blkfront_ring_info *rinfo;
err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
"max-ring-page-order", "%u", &max_page_order);
@@ -1374,7 +1423,8 @@ static int talk_to_blkback(struct xenbus_device *dev,
}
for (i = 0; i < info->nr_rings; i++) {
- rinfo = &info->rinfo[i];
+ struct blkfront_ring_info *rinfo = &info->rinfo[i];
+
/* Create shared ring, alloc event channel. */
err = setup_blkring(dev, rinfo);
if (err)
@@ -1388,45 +1438,51 @@ again:
goto destroy_blkring;
}
- if (info->nr_rings == 1) {
- rinfo = &info->rinfo[0];
- if (info->nr_ring_pages == 1) {
- err = xenbus_printf(xbt, dev->nodename,
- "ring-ref", "%u", rinfo->ring_ref[0]);
- if (err) {
- message = "writing ring-ref";
- goto abort_transaction;
- }
- } else {
- err = xenbus_printf(xbt, dev->nodename,
- "ring-page-order", "%u", ring_page_order);
- if (err) {
- message = "writing ring-page-order";
- goto abort_transaction;
- }
+ if (info->nr_ring_pages > 1) {
+ err = xenbus_printf(xbt, dev->nodename, "ring-page-order", "%u",
+ ring_page_order);
+ if (err) {
+ message = "writing ring-page-order";
+ goto abort_transaction;
+ }
+ }
- for (i = 0; i < info->nr_ring_pages; i++) {
- char ring_ref_name[RINGREF_NAME_LEN];
+ /* We already got the number of queues/rings in _probe */
+ if (info->nr_rings == 1) {
+ err = write_per_ring_nodes(xbt, &info->rinfo[0], dev->nodename);
+ if (err)
+ goto destroy_blkring;
+ } else {
+ char *path;
+ size_t pathsize;
- snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
- err = xenbus_printf(xbt, dev->nodename, ring_ref_name,
- "%u", rinfo->ring_ref[i]);
- if (err) {
- message = "writing ring-ref";
- goto abort_transaction;
- }
- }
- }
- err = xenbus_printf(xbt, dev->nodename,
- "event-channel", "%u", rinfo->evtchn);
+ err = xenbus_printf(xbt, dev->nodename, "multi-queue-num-queues", "%u",
+ info->nr_rings);
if (err) {
- message = "writing event-channel";
+ message = "writing multi-queue-num-queues";
goto abort_transaction;
}
- } else {
- /* Not supported at this stage */
- goto abort_transaction;
+
+ pathsize = strlen(dev->nodename) + QUEUE_NAME_LEN;
+ path = kmalloc(pathsize, GFP_KERNEL);
+ if (!path) {
+ err = -ENOMEM;
+ message = "ENOMEM while writing ring references";
+ goto abort_transaction;
+ }
+
+ for (i = 0; i < info->nr_rings; i++) {
+ memset(path, 0, pathsize);
+ snprintf(path, pathsize, "%s/queue-%u", dev->nodename, i);
+ err = write_per_ring_nodes(xbt, &info->rinfo[i], path);
+ if (err) {
+ kfree(path);
+ goto destroy_blkring;
+ }
+ }
+ kfree(path);
}
+
err = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
XEN_IO_PROTO_ABI_NATIVE);
if (err) {
@@ -1449,7 +1505,7 @@ again:
for (i = 0; i < info->nr_rings; i++) {
int j;
- rinfo = &info->rinfo[i];
+ struct blkfront_ring_info *rinfo = &info->rinfo[i];
for (j = 0; j < BLK_RING_SIZE(info); j++)
rinfo->shadow[j].req.u.rw.id = j + 1;
@@ -1480,6 +1536,7 @@ static int blkfront_probe(struct xenbus_device *dev,
{
int err, vdevice, r_index;
struct blkfront_info *info;
+ unsigned int backend_max_queues = 0;
/* FIXME: Use dynamic device id if this is not set. */
err = xenbus_scanf(XBT_NIL, dev->nodename,
@@ -1529,7 +1586,25 @@ static int blkfront_probe(struct xenbus_device *dev,
return -ENOMEM;
}
- info->nr_rings = 1;
+ mutex_init(&info->mutex);
+ spin_lock_init(&info->dev_lock);
+ info->xbdev = dev;
+ info->vdevice = vdevice;
+ INIT_LIST_HEAD(&info->grants);
+ info->connected = BLKIF_STATE_DISCONNECTED;
+
+ /* Check if backend supports multiple queues */
+ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+ "multi-queue-max-queues", "%u", &backend_max_queues);
+ if (err < 0)
+ backend_max_queues = 1;
+
+ info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
+ if (info->nr_rings <= 0)
+ info->nr_rings = 1;
+ pr_debug("Number of queues to be used:%u, though backend supports max-queues:%u\n",
+ info->nr_rings, backend_max_queues);
+
info->rinfo = kzalloc(sizeof(struct blkfront_ring_info) * info->nr_rings, GFP_KERNEL);
if (!info->rinfo) {
xenbus_dev_fatal(dev, -ENOMEM, "allocating ring_info structure");
@@ -1547,14 +1622,6 @@ static int blkfront_probe(struct xenbus_device *dev,
spin_lock_init(&rinfo->ring_lock);
}
- mutex_init(&info->mutex);
- spin_lock_init(&info->dev_lock);
- info->xbdev = dev;
- info->vdevice = vdevice;
- INIT_LIST_HEAD(&info->grants);
- info->persistent_gnts_c = 0;
- info->connected = BLKIF_STATE_DISCONNECTED;
-
/* Front end dir is a number, which is used as the id. */
info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
dev_set_drvdata(&dev->dev, info);
@@ -2216,6 +2283,7 @@ static struct xenbus_driver blkfront_driver = {
static int __init xlblk_init(void)
{
int ret;
+ int nr_cpus = num_online_cpus();
if (!xen_domain())
return -ENODEV;
@@ -2226,6 +2294,12 @@ static int __init xlblk_init(void)
xen_blkif_max_ring_order = 0;
}
+ if (xen_blkif_max_queues > nr_cpus) {
+ pr_info("Invalid max_queues (%d), will use default max: %d.\n",
+ xen_blkif_max_queues, nr_cpus);
+ xen_blkif_max_queues = nr_cpus;
+ }
+
if (!xen_has_pv_disk_devices())
return -ENODEV;
--
1.8.3.1
next prev parent reply other threads:[~2015-11-02 4:22 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-11-02 4:21 [PATCH v4 00/10] xen-block: multi hardware-queues/rings support Bob Liu
2015-11-02 4:21 ` [PATCH v4 01/10] xen/blkif: document blkif multi-queue/ring extension Bob Liu
2015-11-02 4:21 ` [PATCH v4 02/10] xen/blkfront: separate per ring information out of device info Bob Liu
2015-11-02 4:49 ` kbuild test robot
2015-11-02 5:33 ` Bob Liu
2015-11-02 4:21 ` [PATCH v4 03/10] xen/blkfront: pseudo support for multi hardware queues/rings Bob Liu
[not found] ` <20151103194436.GE28527@char.us.oracle.com>
2015-11-04 1:01 ` Bob Liu
2015-11-02 4:21 ` [PATCH v4 04/10] xen/blkfront: split per device io_lock Bob Liu
[not found] ` <20151103200902.GF28527@char.us.oracle.com>
2015-11-04 1:07 ` Bob Liu
2015-11-04 1:51 ` Konrad Rzeszutek Wilk
2015-11-02 4:21 ` Bob Liu [this message]
[not found] ` <20151103204029.GH28527@char.us.oracle.com>
2015-11-04 1:11 ` [PATCH v4 05/10] xen/blkfront: negotiate number of queues/rings to be used with backend Bob Liu
2015-11-04 1:53 ` Konrad Rzeszutek Wilk
2015-11-02 4:21 ` [PATCH v4 06/10] xen/blkback: separate ring information out of struct xen_blkif Bob Liu
2015-11-02 4:21 ` [PATCH v4 07/10] xen/blkback: pseudo support for multi hardware queues/rings Bob Liu
2015-11-05 2:30 ` Konrad Rzeszutek Wilk
2015-11-05 3:02 ` Bob Liu
2015-11-05 3:24 ` Konrad Rzeszutek Wilk
2015-11-02 4:21 ` [PATCH v4 08/10] xen/blkback: get the number of hardware queues/rings from blkfront Bob Liu
2015-11-05 2:37 ` Konrad Rzeszutek Wilk
2015-11-02 4:21 ` [PATCH v4 09/10] xen/blkfront: make persistent grants per-queue Bob Liu
2015-11-05 2:39 ` Konrad Rzeszutek Wilk
2015-11-02 4:21 ` [PATCH v4 10/10] xen/blkback: make pool of persistent grants and free pages per-queue Bob Liu
2015-11-05 2:43 ` Konrad Rzeszutek Wilk
2015-11-05 2:46 ` Bob Liu
2015-11-05 19:50 ` Konrad Rzeszutek Wilk
2015-11-02 11:19 ` [Xen-devel] [PATCH v4 00/10] xen-block: multi hardware-queues/rings support Julien Grall
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1446438106-20171-6-git-send-email-bob.liu@oracle.com \
--to=bob.liu@oracle.com \
--cc=avanzini.arianna@gmail.com \
--cc=axboe@fb.com \
--cc=david.vrabel@citrix.com \
--cc=felipe.franciosi@citrix.com \
--cc=jonathan.davies@citrix.com \
--cc=konrad.wilk@oracle.com \
--cc=linux-kernel@vger.kernel.org \
--cc=rafal.mielniczuk@citrix.com \
--cc=roger.pau@citrix.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).