linux-nvme.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2] nvme: align io queue count with allocted nvme_queue in nvme_probe
@ 2020-04-23  7:59 Weiping Zhang
  2020-04-23 10:24 ` Max Gurtovoy
  0 siblings, 1 reply; 3+ messages in thread
From: Weiping Zhang @ 2020-04-23  7:59 UTC (permalink / raw)
  To: hch, axboe, kbusch, sagi, maxg; +Cc: linux-nvme

Since the commit 147b27e4bd0 "nvme-pci: allocate device queues storage space at probe"
nvme_alloc_queue will not alloc struct nvme_queue any more.
If user change write/poll_queues to larger than the number of
allocated queue in nvme_probe, nvme_alloc_queue will touch
the memory out of boundary.

This patch add nr_allocated_queues for struct nvme_dev to record how
many queues alloctaed in nvme_probe, then nvme driver will not use
more queues than nr_allocated_queues when user update queue count
and do a controller reset.

Since global module parameter can be changed at rumtime, so it's not
safe to use these two parameter directly in the following functions:
nvme_dbbuf_dma_alloc
nvme_dbbuf_dma_free
nvme_calc_irq_sets
nvme_setup_io_queues

This patch also add nr_write_queues, nr_poll_queues for
struct nvme_dev and io_queues_reload for struct nvme_ctrl, that allow
per-controller reload module parmater when reset controller. The nvme
driver will not reload module parameter(write/poll_queues) by default
when reset controller. If user want to reload them, they should enable
it by echo 1 > /sys/block/<nvme_disk>/device/io_queues_reload.

By now, nvme pci driver allow user change io queue count for each
type(write, read, poll) within nr_allocated_queue, that's to say, if
user want to change queue dynamically by reset controller, they should
setup io queues as many as possiable when laod nvme module, and then
tune io queue count for each type.

Signed-off-by: Weiping Zhang <zhangweiping@didiglobal.com>
---
Changes since V1:
 * don't use module parameter nvme_dbbuf_dma_free, nvme_dbbuf_dma_alloc
	and nvme_calc_irq_sets.
 * add per-controller sysfs file io_queues_reload to enable/disable
	reload global module parameter.

 drivers/nvme/host/core.c | 29 +++++++++++++++++++++
 drivers/nvme/host/nvme.h |  1 +
 drivers/nvme/host/pci.c  | 55 +++++++++++++++++++++++-----------------
 3 files changed, 62 insertions(+), 23 deletions(-)

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index dfb064b4334f..80172192a9d8 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3357,6 +3357,34 @@ static ssize_t nvme_sysfs_show_address(struct device *dev,
 }
 static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
 
+static ssize_t nvme_sysfs_io_queues_reload_show(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+		ctrl->io_queues_reload ? 1 : 0);
+}
+
+static ssize_t nvme_sysfs_io_queues_reload_store(struct device *dev,
+				struct device_attribute *attr, const char *buf,
+				size_t count)
+{
+	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+	bool val;
+
+	if (kstrtobool(buf, &val))
+		return -EINVAL;
+	ctrl->io_queues_reload = val;
+
+	return count;
+}
+
+static DEVICE_ATTR(io_queues_reload, S_IRUGO | S_IWUSR,
+		nvme_sysfs_io_queues_reload_show,
+		nvme_sysfs_io_queues_reload_store);
+
 static struct attribute *nvme_dev_attrs[] = {
 	&dev_attr_reset_controller.attr,
 	&dev_attr_rescan_controller.attr,
@@ -3374,6 +3402,7 @@ static struct attribute *nvme_dev_attrs[] = {
 	&dev_attr_sqsize.attr,
 	&dev_attr_hostnqn.attr,
 	&dev_attr_hostid.attr,
+	&dev_attr_io_queues_reload.attr,
 	NULL
 };
 
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index f3ab17778349..50b6392b9a33 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -260,6 +260,7 @@ struct nvme_ctrl {
 	struct work_struct fw_act_work;
 	unsigned long events;
 	bool created;
+	bool io_queues_reload;
 
 #ifdef CONFIG_NVME_MULTIPATH
 	/* asymmetric namespace access: */
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 4e79e412b276..00f7c93d73c9 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -89,6 +89,9 @@ static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode);
  */
 struct nvme_dev {
 	struct nvme_queue *queues;
+	int nr_allocated_queue;
+	int nr_write_queues;
+	int nr_poll_queues;
 	struct blk_mq_tag_set tagset;
 	struct blk_mq_tag_set admin_tagset;
 	u32 __iomem *dbs;
@@ -209,25 +212,14 @@ struct nvme_iod {
 	struct scatterlist *sg;
 };
 
-static unsigned int max_io_queues(void)
+static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev)
 {
-	return num_possible_cpus() + write_queues + poll_queues;
-}
-
-static unsigned int max_queue_count(void)
-{
-	/* IO queues + admin queue */
-	return 1 + max_io_queues();
-}
-
-static inline unsigned int nvme_dbbuf_size(u32 stride)
-{
-	return (max_queue_count() * 8 * stride);
+	return (dev->nr_allocated_queue * 8 * dev->db_stride);
 }
 
 static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
 {
-	unsigned int mem_size = nvme_dbbuf_size(dev->db_stride);
+	unsigned int mem_size = nvme_dbbuf_size(dev);
 
 	if (dev->dbbuf_dbs)
 		return 0;
@@ -252,7 +244,7 @@ static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
 
 static void nvme_dbbuf_dma_free(struct nvme_dev *dev)
 {
-	unsigned int mem_size = nvme_dbbuf_size(dev->db_stride);
+	unsigned int mem_size = nvme_dbbuf_size(dev);
 
 	if (dev->dbbuf_dbs) {
 		dma_free_coherent(dev->dev, mem_size,
@@ -1991,7 +1983,7 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
 static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs)
 {
 	struct nvme_dev *dev = affd->priv;
-	unsigned int nr_read_queues;
+	unsigned int nr_read_queues, nr_write_queues = dev->nr_write_queues;
 
 	/*
 	 * If there is no interupt available for queues, ensure that
@@ -2007,12 +1999,12 @@ static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs)
 	if (!nrirqs) {
 		nrirqs = 1;
 		nr_read_queues = 0;
-	} else if (nrirqs == 1 || !write_queues) {
+	} else if (nrirqs == 1 || !nr_write_queues) {
 		nr_read_queues = 0;
-	} else if (write_queues >= nrirqs) {
+	} else if (nr_write_queues >= nrirqs) {
 		nr_read_queues = 1;
 	} else {
-		nr_read_queues = nrirqs - write_queues;
+		nr_read_queues = nrirqs - nr_write_queues;
 	}
 
 	dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues;
@@ -2036,7 +2028,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
 	 * Poll queues don't need interrupts, but we need at least one IO
 	 * queue left over for non-polled IO.
 	 */
-	this_p_queues = poll_queues;
+	this_p_queues = dev->nr_poll_queues;
 	if (this_p_queues >= nr_io_queues) {
 		this_p_queues = nr_io_queues - 1;
 		irq_queues = 1;
@@ -2073,7 +2065,17 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
 	int result, nr_io_queues;
 	unsigned long size;
 
-	nr_io_queues = max_io_queues();
+	/* reload io queue count from module paramters write/poll_queues */
+	if (dev->ctrl.io_queues_reload) {
+		dev->nr_write_queues = write_queues;
+		dev->nr_poll_queues = poll_queues;
+		nr_io_queues = num_possible_cpus() + dev->nr_write_queues +
+				dev->nr_poll_queues;
+		if (nr_io_queues > dev->nr_allocated_queue - 1)
+			nr_io_queues = dev->nr_allocated_queue - 1;
+	} else {
+		nr_io_queues = dev->nr_allocated_queue - 1;
+	}
 
 	/*
 	 * If tags are shared with admin queue (Apple bug), then
@@ -2742,7 +2744,7 @@ static void nvme_async_probe(void *data, async_cookie_t cookie)
 
 static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
-	int node, result = -ENOMEM;
+	int node, nr_queue, result = -ENOMEM;
 	struct nvme_dev *dev;
 	unsigned long quirks = id->driver_data;
 	size_t alloc_size;
@@ -2755,11 +2757,18 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	if (!dev)
 		return -ENOMEM;
 
-	dev->queues = kcalloc_node(max_queue_count(), sizeof(struct nvme_queue),
+	dev->nr_write_queues = write_queues;
+	dev->nr_poll_queues = poll_queues;
+	/* IO queues + admin queue */
+	nr_queue = dev->nr_write_queues + dev->nr_poll_queues +
+		num_possible_cpus() + 1;
+	dev->queues = kcalloc_node(nr_queue, sizeof(struct nvme_queue),
 					GFP_KERNEL, node);
 	if (!dev->queues)
 		goto free;
 
+	dev->nr_allocated_queue = nr_queue;
+
 	dev->dev = get_device(&pdev->dev);
 	pci_set_drvdata(pdev, dev);
 
-- 
2.18.1


_______________________________________________
linux-nvme mailing list
linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH v2] nvme: align io queue count with allocted nvme_queue in nvme_probe
  2020-04-23  7:59 [PATCH v2] nvme: align io queue count with allocted nvme_queue in nvme_probe Weiping Zhang
@ 2020-04-23 10:24 ` Max Gurtovoy
  2020-04-24  4:25   ` Weiping Zhang
  0 siblings, 1 reply; 3+ messages in thread
From: Max Gurtovoy @ 2020-04-23 10:24 UTC (permalink / raw)
  To: Weiping Zhang, hch, axboe, kbusch, sagi; +Cc: linux-nvme


On 4/23/2020 10:59 AM, Weiping Zhang wrote:
> Since the commit 147b27e4bd0 "nvme-pci: allocate device queues storage space at probe"
> nvme_alloc_queue will not alloc struct nvme_queue any more.
> If user change write/poll_queues to larger than the number of
> allocated queue in nvme_probe, nvme_alloc_queue will touch
> the memory out of boundary.
>
> This patch add nr_allocated_queues for struct nvme_dev to record how
> many queues alloctaed in nvme_probe, then nvme driver will not use
> more queues than nr_allocated_queues when user update queue count
> and do a controller reset.
>
> Since global module parameter can be changed at rumtime, so it's not
> safe to use these two parameter directly in the following functions:
> nvme_dbbuf_dma_alloc
> nvme_dbbuf_dma_free
> nvme_calc_irq_sets
> nvme_setup_io_queues
>
> This patch also add nr_write_queues, nr_poll_queues for
> struct nvme_dev and io_queues_reload for struct nvme_ctrl, that allow
> per-controller reload module parmater when reset controller. The nvme
> driver will not reload module parameter(write/poll_queues) by default
> when reset controller. If user want to reload them, they should enable
> it by echo 1 > /sys/block/<nvme_disk>/device/io_queues_reload.
>
> By now, nvme pci driver allow user change io queue count for each
> type(write, read, poll) within nr_allocated_queue, that's to say, if
> user want to change queue dynamically by reset controller, they should
> setup io queues as many as possiable when laod nvme module, and then
> tune io queue count for each type.

typo: laod --> load


> Signed-off-by: Weiping Zhang <zhangweiping@didiglobal.com>
> ---
> Changes since V1:
>   * don't use module parameter nvme_dbbuf_dma_free, nvme_dbbuf_dma_alloc
> 	and nvme_calc_irq_sets.
>   * add per-controller sysfs file io_queues_reload to enable/disable
> 	reload global module parameter.
>
>   drivers/nvme/host/core.c | 29 +++++++++++++++++++++
>   drivers/nvme/host/nvme.h |  1 +
>   drivers/nvme/host/pci.c  | 55 +++++++++++++++++++++++-----------------
>   3 files changed, 62 insertions(+), 23 deletions(-)
>
> diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
> index dfb064b4334f..80172192a9d8 100644
> --- a/drivers/nvme/host/core.c
> +++ b/drivers/nvme/host/core.c
> @@ -3357,6 +3357,34 @@ static ssize_t nvme_sysfs_show_address(struct device *dev,
>   }
>   static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
>   
> +static ssize_t nvme_sysfs_io_queues_reload_show(struct device *dev,
> +					 struct device_attribute *attr,
> +					 char *buf)
> +{
> +	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
> +
> +	return snprintf(buf, PAGE_SIZE, "%d\n",
> +		ctrl->io_queues_reload ? 1 : 0);
> +}
> +
> +static ssize_t nvme_sysfs_io_queues_reload_store(struct device *dev,
> +				struct device_attribute *attr, const char *buf,
> +				size_t count)
> +{
> +	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
> +	bool val;
> +
> +	if (kstrtobool(buf, &val))
> +		return -EINVAL;
> +	ctrl->io_queues_reload = val;
> +
> +	return count;
> +}
> +
> +static DEVICE_ATTR(io_queues_reload, S_IRUGO | S_IWUSR,
> +		nvme_sysfs_io_queues_reload_show,
> +		nvme_sysfs_io_queues_reload_store);
> +
>   static struct attribute *nvme_dev_attrs[] = {
>   	&dev_attr_reset_controller.attr,
>   	&dev_attr_rescan_controller.attr,
> @@ -3374,6 +3402,7 @@ static struct attribute *nvme_dev_attrs[] = {
>   	&dev_attr_sqsize.attr,
>   	&dev_attr_hostnqn.attr,
>   	&dev_attr_hostid.attr,
> +	&dev_attr_io_queues_reload.attr,
>   	NULL
>   };

Well for fabrics controllers it doesn't mean anything.

maybe we can do it non-visible for fabrics ?


>   
> diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
> index f3ab17778349..50b6392b9a33 100644
> --- a/drivers/nvme/host/nvme.h
> +++ b/drivers/nvme/host/nvme.h
> @@ -260,6 +260,7 @@ struct nvme_ctrl {
>   	struct work_struct fw_act_work;
>   	unsigned long events;
>   	bool created;
> +	bool io_queues_reload;
>   
>   #ifdef CONFIG_NVME_MULTIPATH
>   	/* asymmetric namespace access: */
> diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
> index 4e79e412b276..00f7c93d73c9 100644
> --- a/drivers/nvme/host/pci.c
> +++ b/drivers/nvme/host/pci.c
> @@ -89,6 +89,9 @@ static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode);
>    */
>   struct nvme_dev {
>   	struct nvme_queue *queues;
> +	int nr_allocated_queue;
> +	int nr_write_queues;
> +	int nr_poll_queues;
>   	struct blk_mq_tag_set tagset;
>   	struct blk_mq_tag_set admin_tagset;
>   	u32 __iomem *dbs;
> @@ -209,25 +212,14 @@ struct nvme_iod {
>   	struct scatterlist *sg;
>   };
>   
> -static unsigned int max_io_queues(void)
> +static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev)
>   {
> -	return num_possible_cpus() + write_queues + poll_queues;
> -}
> -
> -static unsigned int max_queue_count(void)
> -{
> -	/* IO queues + admin queue */
> -	return 1 + max_io_queues();
> -}
> -
> -static inline unsigned int nvme_dbbuf_size(u32 stride)
> -{
> -	return (max_queue_count() * 8 * stride);
> +	return (dev->nr_allocated_queue * 8 * dev->db_stride);
>   }
>   
>   static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
>   {
> -	unsigned int mem_size = nvme_dbbuf_size(dev->db_stride);
> +	unsigned int mem_size = nvme_dbbuf_size(dev);
>   
>   	if (dev->dbbuf_dbs)
>   		return 0;
> @@ -252,7 +244,7 @@ static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
>   
>   static void nvme_dbbuf_dma_free(struct nvme_dev *dev)
>   {
> -	unsigned int mem_size = nvme_dbbuf_size(dev->db_stride);
> +	unsigned int mem_size = nvme_dbbuf_size(dev);
>   
>   	if (dev->dbbuf_dbs) {
>   		dma_free_coherent(dev->dev, mem_size,
> @@ -1991,7 +1983,7 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
>   static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs)
>   {
>   	struct nvme_dev *dev = affd->priv;
> -	unsigned int nr_read_queues;
> +	unsigned int nr_read_queues, nr_write_queues = dev->nr_write_queues;
>   
>   	/*
>   	 * If there is no interupt available for queues, ensure that
> @@ -2007,12 +1999,12 @@ static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs)
>   	if (!nrirqs) {
>   		nrirqs = 1;
>   		nr_read_queues = 0;
> -	} else if (nrirqs == 1 || !write_queues) {
> +	} else if (nrirqs == 1 || !nr_write_queues) {
>   		nr_read_queues = 0;
> -	} else if (write_queues >= nrirqs) {
> +	} else if (nr_write_queues >= nrirqs) {
>   		nr_read_queues = 1;
>   	} else {
> -		nr_read_queues = nrirqs - write_queues;
> +		nr_read_queues = nrirqs - nr_write_queues;
>   	}
>   
>   	dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues;
> @@ -2036,7 +2028,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
>   	 * Poll queues don't need interrupts, but we need at least one IO
>   	 * queue left over for non-polled IO.
>   	 */
> -	this_p_queues = poll_queues;
> +	this_p_queues = dev->nr_poll_queues;
>   	if (this_p_queues >= nr_io_queues) {
>   		this_p_queues = nr_io_queues - 1;
>   		irq_queues = 1;
> @@ -2073,7 +2065,17 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
>   	int result, nr_io_queues;
>   	unsigned long size;
>   
> -	nr_io_queues = max_io_queues();
> +	/* reload io queue count from module paramters write/poll_queues */
> +	if (dev->ctrl.io_queues_reload) {
> +		dev->nr_write_queues = write_queues;
> +		dev->nr_poll_queues = poll_queues;
> +		nr_io_queues = num_possible_cpus() + dev->nr_write_queues +
> +				dev->nr_poll_queues;
> +		if (nr_io_queues > dev->nr_allocated_queue - 1)
> +			nr_io_queues = dev->nr_allocated_queue - 1;
> +	} else {
> +		nr_io_queues = dev->nr_allocated_queue - 1;
> +	}
>   
>   	/*
>   	 * If tags are shared with admin queue (Apple bug), then
> @@ -2742,7 +2744,7 @@ static void nvme_async_probe(void *data, async_cookie_t cookie)
>   
>   static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
>   {
> -	int node, result = -ENOMEM;
> +	int node, nr_queue, result = -ENOMEM;
>   	struct nvme_dev *dev;
>   	unsigned long quirks = id->driver_data;
>   	size_t alloc_size;
> @@ -2755,11 +2757,18 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
>   	if (!dev)
>   		return -ENOMEM;
>   
> -	dev->queues = kcalloc_node(max_queue_count(), sizeof(struct nvme_queue),
> +	dev->nr_write_queues = write_queues;
> +	dev->nr_poll_queues = poll_queues;
> +	/* IO queues + admin queue */
> +	nr_queue = dev->nr_write_queues + dev->nr_poll_queues +
> +		num_possible_cpus() + 1;
> +	dev->queues = kcalloc_node(nr_queue, sizeof(struct nvme_queue),
>   					GFP_KERNEL, node);
>   	if (!dev->queues)
>   		goto free;
>   
> +	dev->nr_allocated_queue = nr_queue;
> +
>   	dev->dev = get_device(&pdev->dev);
>   	pci_set_drvdata(pdev, dev);
>   

_______________________________________________
linux-nvme mailing list
linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH v2] nvme: align io queue count with allocted nvme_queue in nvme_probe
  2020-04-23 10:24 ` Max Gurtovoy
@ 2020-04-24  4:25   ` Weiping Zhang
  0 siblings, 0 replies; 3+ messages in thread
From: Weiping Zhang @ 2020-04-24  4:25 UTC (permalink / raw)
  To: Max Gurtovoy
  Cc: Jens Axboe, sagi, Weiping Zhang, linux-nvme, Christoph Hellwig,
	Keith Busch

On Thu, Apr 23, 2020 at 6:25 PM Max Gurtovoy <maxg@mellanox.com> wrote:
>
>
> On 4/23/2020 10:59 AM, Weiping Zhang wrote:
> > Since the commit 147b27e4bd0 "nvme-pci: allocate device queues storage space at probe"
> > nvme_alloc_queue will not alloc struct nvme_queue any more.
> > If user change write/poll_queues to larger than the number of
> > allocated queue in nvme_probe, nvme_alloc_queue will touch
> > the memory out of boundary.
> >
> > This patch add nr_allocated_queues for struct nvme_dev to record how
> > many queues alloctaed in nvme_probe, then nvme driver will not use
> > more queues than nr_allocated_queues when user update queue count
> > and do a controller reset.
> >
> > Since global module parameter can be changed at rumtime, so it's not
> > safe to use these two parameter directly in the following functions:
> > nvme_dbbuf_dma_alloc
> > nvme_dbbuf_dma_free
> > nvme_calc_irq_sets
> > nvme_setup_io_queues
> >
> > This patch also add nr_write_queues, nr_poll_queues for
> > struct nvme_dev and io_queues_reload for struct nvme_ctrl, that allow
> > per-controller reload module parmater when reset controller. The nvme
> > driver will not reload module parameter(write/poll_queues) by default
> > when reset controller. If user want to reload them, they should enable
> > it by echo 1 > /sys/block/<nvme_disk>/device/io_queues_reload.
> >
> > By now, nvme pci driver allow user change io queue count for each
> > type(write, read, poll) within nr_allocated_queue, that's to say, if
> > user want to change queue dynamically by reset controller, they should
> > setup io queues as many as possiable when laod nvme module, and then
> > tune io queue count for each type.
>
> typo: laod --> load
>
OK, fix it in V3.
>
> > Signed-off-by: Weiping Zhang <zhangweiping@didiglobal.com>
> > ---
> > Changes since V1:
> >   * don't use module parameter nvme_dbbuf_dma_free, nvme_dbbuf_dma_alloc
> >       and nvme_calc_irq_sets.
> >   * add per-controller sysfs file io_queues_reload to enable/disable
> >       reload global module parameter.
> >
> >   drivers/nvme/host/core.c | 29 +++++++++++++++++++++
> >   drivers/nvme/host/nvme.h |  1 +
> >   drivers/nvme/host/pci.c  | 55 +++++++++++++++++++++++-----------------
> >   3 files changed, 62 insertions(+), 23 deletions(-)
> >
> > diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
> > index dfb064b4334f..80172192a9d8 100644
> > --- a/drivers/nvme/host/core.c
> > +++ b/drivers/nvme/host/core.c
> > @@ -3357,6 +3357,34 @@ static ssize_t nvme_sysfs_show_address(struct device *dev,
> >   }
> >   static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
> >
> > +static ssize_t nvme_sysfs_io_queues_reload_show(struct device *dev,
> > +                                      struct device_attribute *attr,
> > +                                      char *buf)
> > +{
> > +     struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
> > +
> > +     return snprintf(buf, PAGE_SIZE, "%d\n",
> > +             ctrl->io_queues_reload ? 1 : 0);
> > +}
> > +
> > +static ssize_t nvme_sysfs_io_queues_reload_store(struct device *dev,
> > +                             struct device_attribute *attr, const char *buf,
> > +                             size_t count)
> > +{
> > +     struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
> > +     bool val;
> > +
> > +     if (kstrtobool(buf, &val))
> > +             return -EINVAL;
> > +     ctrl->io_queues_reload = val;
> > +
> > +     return count;
> > +}
> > +
> > +static DEVICE_ATTR(io_queues_reload, S_IRUGO | S_IWUSR,
> > +             nvme_sysfs_io_queues_reload_show,
> > +             nvme_sysfs_io_queues_reload_store);
> > +
> >   static struct attribute *nvme_dev_attrs[] = {
> >       &dev_attr_reset_controller.attr,
> >       &dev_attr_rescan_controller.attr,
> > @@ -3374,6 +3402,7 @@ static struct attribute *nvme_dev_attrs[] = {
> >       &dev_attr_sqsize.attr,
> >       &dev_attr_hostnqn.attr,
> >       &dev_attr_hostid.attr,
> > +     &dev_attr_io_queues_reload.attr,
> >       NULL
> >   };
>
> Well for fabrics controllers it doesn't mean anything.
>
> maybe we can do it non-visible for fabrics ?
>

Make sense, fix in v3.

Thanks

> _______________________________________________
> linux-nvme mailing list
> linux-nvme@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-nvme

_______________________________________________
linux-nvme mailing list
linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2020-04-24  4:25 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-04-23  7:59 [PATCH v2] nvme: align io queue count with allocted nvme_queue in nvme_probe Weiping Zhang
2020-04-23 10:24 ` Max Gurtovoy
2020-04-24  4:25   ` Weiping Zhang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).