All of lore.kernel.org
 help / color / mirror / Atom feed
From: Christoph Hellwig <hch@lst.de>
To: Sagi Grimberg <sagi@grimberg.me>
Cc: linux-nvme@lists.infradead.org, Christoph Hellwig <hch@lst.de>,
	Keith Busch <keith.busch@intel.com>,
	linux-block@vger.kernel.org, Jens Axboe <axboe@kernel.dk>
Subject: Re: [PATCH 4/5] nvme-tcp: support separate queue maps for read and write
Date: Tue, 11 Dec 2018 14:41:35 +0100	[thread overview]
Message-ID: <20181211134135.GD20614@lst.de> (raw)
In-Reply-To: <20181211104936.25333-5-sagi@grimberg.me>

On Tue, Dec 11, 2018 at 02:49:34AM -0800, Sagi Grimberg wrote:
> Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
> ---
>  drivers/nvme/host/tcp.c | 53 ++++++++++++++++++++++++++++++++++++-----
>  1 file changed, 47 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
> index 15543358e245..5c0ba99fb105 100644
> --- a/drivers/nvme/host/tcp.c
> +++ b/drivers/nvme/host/tcp.c
> @@ -1215,7 +1215,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
>  	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
>  	struct nvme_tcp_queue *queue = &ctrl->queues[qid];
>  	struct linger sol = { .l_onoff = 1, .l_linger = 0 };
> -	int ret, opt, rcv_pdu_size;
> +	int ret, opt, rcv_pdu_size, n;
>  
>  	queue->ctrl = ctrl;
>  	INIT_LIST_HEAD(&queue->send_list);
> @@ -1271,7 +1271,8 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
>  	}
>  
>  	queue->sock->sk->sk_allocation = GFP_ATOMIC;
> -	queue->io_cpu = (qid == 0) ? 0 : qid - 1;
> +	n = (qid ? qid - 1 : 0) % num_online_cpus();
> +	queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
>  	queue->request = NULL;
>  	queue->data_remaining = 0;
>  	queue->ddgst_remaining = 0;
> @@ -1433,6 +1434,7 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
>  		set->driver_data = ctrl;
>  		set->nr_hw_queues = nctrl->queue_count - 1;
>  		set->timeout = NVME_IO_TIMEOUT;
> +		set->nr_maps = 2 /* default + read */;
>  	}
>  
>  	ret = blk_mq_alloc_tag_set(set);
> @@ -1527,7 +1529,12 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
>  
>  static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
>  {
> -	return min(ctrl->queue_count - 1, num_online_cpus());
> +	unsigned int nr_io_queues;
> +
> +	nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
> +	nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
> +
> +	return nr_io_queues;
>  }
>  
>  static int nvme_alloc_io_queues(struct nvme_ctrl *ctrl)
> @@ -2052,6 +2059,38 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
>  	return BLK_STS_OK;
>  }
>  
> +static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
> +{
> +	struct nvme_tcp_ctrl *ctrl = set->driver_data;
> +	struct blk_mq_queue_map *map;
> +
> +	if (ctrl->ctrl.opts->nr_write_queues) {
> +		/* separate read/write queues */
> +		map = &set->map[HCTX_TYPE_DEFAULT];
> +		map->queue_offset = 0;
> +		map->nr_queues = ctrl->ctrl.opts->nr_write_queues;
> +		blk_mq_map_queues(map);

Shouldn't this use nr_io_queues?

> +		map = &set->map[HCTX_TYPE_READ];
> +		map->nr_queues = ctrl->ctrl.opts->nr_io_queues;
> +		map->queue_offset = ctrl->ctrl.opts->nr_write_queues;
> +		blk_mq_map_queues(map);
> +	} else {
> +		/* mixed read/write queues */
> +		map = &set->map[HCTX_TYPE_DEFAULT];
> +		map->queue_offset = 0;
> +		map->nr_queues = ctrl->ctrl.opts->nr_io_queues;
> +		blk_mq_map_queues(map);
> +
> +		map = &set->map[HCTX_TYPE_READ];
> +		map->queue_offset = 0;
> +		map->nr_queues = ctrl->ctrl.opts->nr_io_queues;
> +		blk_mq_map_queues(map);

Also I find the reused local map variable a little odd and not helpful
for readability.  What about something like:

static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
{
	struct nvme_tcp_ctrl *ctrl = set->driver_data;

	set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
	set->map[HCTX_TYPE_DEFAULT].nr_queues = ctrl->ctrl.opts->nr_io_queues;
	blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);

	if (ctrl->ctrl.opts->nr_write_queues) {
		/* separate read/write queues */
		set->map[HCTX_TYPE_READ].queue_offset =
				ctrl->ctrl.opts->nr_io_queues;
		set->map[HCTX_TYPE_READ].nr_queues =
				ctrl->ctrl.opts->nr_write_queues;
	} else {
		/* mixed read/write queues */
		set->map[HCTX_TYPE_READ].queue_offset = 0;
		set->map[HCTX_TYPE_READ].nr_queues =
				ctrl->ctrl.opts->nr_io_queues;
	}
	blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
	return 0;
}

WARNING: multiple messages have this Message-ID (diff)
From: hch@lst.de (Christoph Hellwig)
Subject: [PATCH 4/5] nvme-tcp: support separate queue maps for read and write
Date: Tue, 11 Dec 2018 14:41:35 +0100	[thread overview]
Message-ID: <20181211134135.GD20614@lst.de> (raw)
In-Reply-To: <20181211104936.25333-5-sagi@grimberg.me>

On Tue, Dec 11, 2018@02:49:34AM -0800, Sagi Grimberg wrote:
> Signed-off-by: Sagi Grimberg <sagi at grimberg.me>
> ---
>  drivers/nvme/host/tcp.c | 53 ++++++++++++++++++++++++++++++++++++-----
>  1 file changed, 47 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
> index 15543358e245..5c0ba99fb105 100644
> --- a/drivers/nvme/host/tcp.c
> +++ b/drivers/nvme/host/tcp.c
> @@ -1215,7 +1215,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
>  	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
>  	struct nvme_tcp_queue *queue = &ctrl->queues[qid];
>  	struct linger sol = { .l_onoff = 1, .l_linger = 0 };
> -	int ret, opt, rcv_pdu_size;
> +	int ret, opt, rcv_pdu_size, n;
>  
>  	queue->ctrl = ctrl;
>  	INIT_LIST_HEAD(&queue->send_list);
> @@ -1271,7 +1271,8 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
>  	}
>  
>  	queue->sock->sk->sk_allocation = GFP_ATOMIC;
> -	queue->io_cpu = (qid == 0) ? 0 : qid - 1;
> +	n = (qid ? qid - 1 : 0) % num_online_cpus();
> +	queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
>  	queue->request = NULL;
>  	queue->data_remaining = 0;
>  	queue->ddgst_remaining = 0;
> @@ -1433,6 +1434,7 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
>  		set->driver_data = ctrl;
>  		set->nr_hw_queues = nctrl->queue_count - 1;
>  		set->timeout = NVME_IO_TIMEOUT;
> +		set->nr_maps = 2 /* default + read */;
>  	}
>  
>  	ret = blk_mq_alloc_tag_set(set);
> @@ -1527,7 +1529,12 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
>  
>  static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
>  {
> -	return min(ctrl->queue_count - 1, num_online_cpus());
> +	unsigned int nr_io_queues;
> +
> +	nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
> +	nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
> +
> +	return nr_io_queues;
>  }
>  
>  static int nvme_alloc_io_queues(struct nvme_ctrl *ctrl)
> @@ -2052,6 +2059,38 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
>  	return BLK_STS_OK;
>  }
>  
> +static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
> +{
> +	struct nvme_tcp_ctrl *ctrl = set->driver_data;
> +	struct blk_mq_queue_map *map;
> +
> +	if (ctrl->ctrl.opts->nr_write_queues) {
> +		/* separate read/write queues */
> +		map = &set->map[HCTX_TYPE_DEFAULT];
> +		map->queue_offset = 0;
> +		map->nr_queues = ctrl->ctrl.opts->nr_write_queues;
> +		blk_mq_map_queues(map);

Shouldn't this use nr_io_queues?

> +		map = &set->map[HCTX_TYPE_READ];
> +		map->nr_queues = ctrl->ctrl.opts->nr_io_queues;
> +		map->queue_offset = ctrl->ctrl.opts->nr_write_queues;
> +		blk_mq_map_queues(map);
> +	} else {
> +		/* mixed read/write queues */
> +		map = &set->map[HCTX_TYPE_DEFAULT];
> +		map->queue_offset = 0;
> +		map->nr_queues = ctrl->ctrl.opts->nr_io_queues;
> +		blk_mq_map_queues(map);
> +
> +		map = &set->map[HCTX_TYPE_READ];
> +		map->queue_offset = 0;
> +		map->nr_queues = ctrl->ctrl.opts->nr_io_queues;
> +		blk_mq_map_queues(map);

Also I find the reused local map variable a little odd and not helpful
for readability.  What about something like:

static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
{
	struct nvme_tcp_ctrl *ctrl = set->driver_data;

	set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
	set->map[HCTX_TYPE_DEFAULT].nr_queues = ctrl->ctrl.opts->nr_io_queues;
	blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);

	if (ctrl->ctrl.opts->nr_write_queues) {
		/* separate read/write queues */
		set->map[HCTX_TYPE_READ].queue_offset =
				ctrl->ctrl.opts->nr_io_queues;
		set->map[HCTX_TYPE_READ].nr_queues =
				ctrl->ctrl.opts->nr_write_queues;
	} else {
		/* mixed read/write queues */
		set->map[HCTX_TYPE_READ].queue_offset = 0;
		set->map[HCTX_TYPE_READ].nr_queues =
				ctrl->ctrl.opts->nr_io_queues;
	}
	blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
	return 0;
}

  reply	other threads:[~2018-12-11 13:41 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-12-11 10:49 [PATCH 0/5] implement nvmf read/write queue maps Sagi Grimberg
2018-12-11 10:49 ` Sagi Grimberg
2018-12-11 10:49 ` [PATCH 1/5] blk-mq-rdma: pass in queue map to blk_mq_rdma_map_queues Sagi Grimberg
2018-12-11 10:49   ` Sagi Grimberg
2018-12-11 13:34   ` Christoph Hellwig
2018-12-11 13:34     ` Christoph Hellwig
2018-12-11 10:49 ` [PATCH 2/5] nvme-fabrics: add missing nvmf_ctrl_options documentation Sagi Grimberg
2018-12-11 10:49   ` Sagi Grimberg
2018-12-11 13:35   ` Christoph Hellwig
2018-12-11 13:35     ` Christoph Hellwig
2018-12-11 10:49 ` [PATCH 3/5] nvme-fabrics: allow user to set nr_write_queues for separate queue maps Sagi Grimberg
2018-12-11 10:49   ` Sagi Grimberg
2018-12-11 13:35   ` Christoph Hellwig
2018-12-11 13:35     ` Christoph Hellwig
2018-12-11 10:49 ` [PATCH 4/5] nvme-tcp: support separate queue maps for read and write Sagi Grimberg
2018-12-11 10:49   ` Sagi Grimberg
2018-12-11 13:41   ` Christoph Hellwig [this message]
2018-12-11 13:41     ` Christoph Hellwig
2018-12-11 23:11     ` Sagi Grimberg
2018-12-11 23:11       ` Sagi Grimberg
2018-12-11 10:49 ` [PATCH 5/5] nvme-rdma: support read/write queue separation Sagi Grimberg
2018-12-11 10:49   ` Sagi Grimberg
2018-12-11 13:42   ` Christoph Hellwig
2018-12-11 13:42     ` Christoph Hellwig
2018-12-11 10:49 ` [PATCH nvme-cli 6/5] fabrics: pass in nr_write_queues Sagi Grimberg
2018-12-11 10:49   ` Sagi Grimberg
2018-12-11 19:30   ` Keith Busch
2018-12-11 19:30     ` Keith Busch
2018-12-11 23:34     ` Sagi Grimberg
2018-12-11 23:34       ` Sagi Grimberg
2018-12-11 13:28 ` [PATCH 0/5] implement nvmf read/write queue maps Christoph Hellwig
2018-12-11 13:28   ` Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20181211134135.GD20614@lst.de \
    --to=hch@lst.de \
    --cc=axboe@kernel.dk \
    --cc=keith.busch@intel.com \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-nvme@lists.infradead.org \
    --cc=sagi@grimberg.me \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.