All of lore.kernel.org
 help / color / mirror / Atom feed
From: Christoph Hellwig <hch@lst.de>
To: Keith Busch <kbusch@kernel.org>
Cc: Sagi Grimberg <sagi@grimberg.me>,
	Chaitanya Kulkarni <kch@nvidia.com>,
	Gerd Bayer <gbayer@linux.ibm.com>,
	asahi@lists.linux.dev, linux-arm-kernel@lists.infradead.org,
	linux-nvme@lists.infradead.org
Subject: [PATCH 11/12] nvme-pci: split the initial probe from the rest path
Date: Tue,  8 Nov 2022 16:02:51 +0100	[thread overview]
Message-ID: <20221108150252.2123727-12-hch@lst.de> (raw)
In-Reply-To: <20221108150252.2123727-1-hch@lst.de>

nvme_reset_work is a little fragile as it needs to handle both resetting
a live controller and initializing one during probe.  Split out the initial
probe and open code it in nvme_probe and leave nvme_reset_work to just do
the live controller reset.

This fixes a recently introduced bug where nvme_dev_disable causes a NULL
pointer dereferences in blk_mq_quiesce_tagset because the tagset pointer
is not set when the reset state is entered directly from the new state.
The separate probe code can skip the reset state and probe directly and
fixes this.

To make sure the system isn't single threaded on enabling nvme
controllers, set the PROBE_PREFER_ASYNCHRONOUS flag in the device_driver
structure so that the driver core probes in parallel.

Fixes: 98d81f0df70c ("nvme: use blk_mq_[un]quiesce_tagset")
Reported-by: Gerd Bayer <gbayer@linux.ibm.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/nvme/host/pci.c | 139 ++++++++++++++++++++++++----------------
 1 file changed, 83 insertions(+), 56 deletions(-)

diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 1b3c96a4b7c90..1c8c70767cb8a 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2821,15 +2821,7 @@ static void nvme_reset_work(struct work_struct *work)
 	result = nvme_pci_enable(dev);
 	if (result)
 		goto out_unlock;
-
-	if (!dev->ctrl.admin_q) {
-		result = nvme_pci_alloc_admin_tag_set(dev);
-		if (result)
-			goto out_unlock;
-	} else {
-		nvme_start_admin_queue(&dev->ctrl);
-	}
-
+	nvme_start_admin_queue(&dev->ctrl);
 	mutex_unlock(&dev->shutdown_lock);
 
 	/*
@@ -2854,9 +2846,6 @@ static void nvme_reset_work(struct work_struct *work)
 		 */
 		memset(dev->dbbuf_dbs, 0, nvme_dbbuf_size(dev));
 		memset(dev->dbbuf_eis, 0, nvme_dbbuf_size(dev));
-	} else {
-		if (dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP)
-			nvme_dbbuf_dma_alloc(dev);
 	}
 
 	if (dev->ctrl.hmpre) {
@@ -2869,37 +2858,23 @@ static void nvme_reset_work(struct work_struct *work)
 	if (result)
 		goto out;
 
-	if (dev->ctrl.tagset) {
-		/*
-		 * This is a controller reset and we already have a tagset.
-		 * Freeze and update the number of I/O queues as thos might have
-		 * changed.  If there are no I/O queues left after this reset,
-		 * keep the controller around but remove all namespaces.
-		 */
-		if (dev->online_queues > 1) {
-			nvme_start_queues(&dev->ctrl);
-			nvme_wait_freeze(&dev->ctrl);
-			nvme_pci_update_nr_queues(dev);
-			nvme_dbbuf_set(dev);
-			nvme_unfreeze(&dev->ctrl);
-		} else {
-			dev_warn(dev->ctrl.device, "IO queues lost\n");
-			nvme_mark_namespaces_dead(&dev->ctrl);
-			nvme_start_queues(&dev->ctrl);
-			nvme_remove_namespaces(&dev->ctrl);
-			nvme_free_tagset(dev);
-		}
+	/*
+	 * Freeze and update the number of I/O queues as thos might have
+	 * changed.  If there are no I/O queues left after this reset, keep the
+	 * controller around but remove all namespaces.
+	 */
+	if (dev->online_queues > 1) {
+		nvme_start_queues(&dev->ctrl);
+		nvme_wait_freeze(&dev->ctrl);
+		nvme_pci_update_nr_queues(dev);
+		nvme_dbbuf_set(dev);
+		nvme_unfreeze(&dev->ctrl);
 	} else {
-		/*
-		 * First probe.  Still allow the controller to show up even if
-		 * there are no namespaces.
-		 */
-		if (dev->online_queues > 1) {
-			nvme_pci_alloc_tag_set(dev);
-			nvme_dbbuf_set(dev);
-		} else {
-			dev_warn(dev->ctrl.device, "IO queues not created\n");
-		}
+		dev_warn(dev->ctrl.device, "IO queues lost\n");
+		nvme_mark_namespaces_dead(&dev->ctrl);
+		nvme_start_queues(&dev->ctrl);
+		nvme_remove_namespaces(&dev->ctrl);
+		nvme_free_tagset(dev);
 	}
 
 	/*
@@ -3055,15 +3030,6 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
 	return 0;
 }
 
-static void nvme_async_probe(void *data, async_cookie_t cookie)
-{
-	struct nvme_dev *dev = data;
-
-	flush_work(&dev->ctrl.reset_work);
-	flush_work(&dev->ctrl.scan_work);
-	nvme_put_ctrl(&dev->ctrl);
-}
-
 static struct nvme_dev *nvme_pci_alloc_ctrl(struct pci_dev *pdev,
 		const struct pci_device_id *id)
 {
@@ -3155,12 +3121,72 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 		goto out_release_prp_pools;
 
 	dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
+
+	result = nvme_pci_enable(dev);
+	if (result)
+		goto out_release_iod_mempool;
+
+	result = nvme_pci_alloc_admin_tag_set(dev);
+	if (result)
+		goto out_disable;
+
+	/*
+	 * Mark the controller as connecting before sending admin commands to
+	 * allow the timeout handler to do the right thing.
+	 */
+	if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
+		dev_warn(dev->ctrl.device,
+			"failed to mark controller CONNECTING\n");
+		result = -EBUSY;
+		goto out_disable;
+	}
+
+	result = nvme_init_ctrl_finish(&dev->ctrl, false);
+	if (result)
+		goto out_disable;
+
+	if (dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP)
+		nvme_dbbuf_dma_alloc(dev);
+
+	if (dev->ctrl.hmpre) {
+		result = nvme_setup_host_mem(dev);
+		if (result < 0)
+			goto out_disable;
+	}
+
+	result = nvme_setup_io_queues(dev);
+	if (result)
+		goto out_disable;
+
+	if (dev->online_queues > 1) {
+		nvme_pci_alloc_tag_set(dev);
+		nvme_dbbuf_set(dev);
+	} else {
+		dev_warn(dev->ctrl.device, "IO queues not created\n");
+	}
+
+	if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) {
+		dev_warn(dev->ctrl.device,
+			"failed to mark controller live state\n");
+		result = -ENODEV;
+		goto out_disable;
+	}
+
 	pci_set_drvdata(pdev, dev);
 
-	nvme_reset_ctrl(&dev->ctrl);
-	async_schedule(nvme_async_probe, dev);
+	nvme_start_ctrl(&dev->ctrl);
+	nvme_put_ctrl(&dev->ctrl);
 	return 0;
 
+out_disable:
+	nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
+	nvme_dev_disable(dev, true);
+	nvme_free_host_mem(dev);
+	nvme_dev_remove_admin(dev);
+	nvme_dbbuf_dma_free(dev);
+	nvme_free_queues(dev, 0);
+out_release_iod_mempool:
+	mempool_destroy(dev->iod_mempool);
 out_release_prp_pools:
 	nvme_release_prp_pools(dev);
 out_dev_unmap:
@@ -3556,11 +3582,12 @@ static struct pci_driver nvme_driver = {
 	.probe		= nvme_probe,
 	.remove		= nvme_remove,
 	.shutdown	= nvme_shutdown,
-#ifdef CONFIG_PM_SLEEP
 	.driver		= {
-		.pm	= &nvme_dev_pm_ops,
-	},
+		.probe_type	= PROBE_PREFER_ASYNCHRONOUS,
+#ifdef CONFIG_PM_SLEEP
+		.pm		= &nvme_dev_pm_ops,
 #endif
+	},
 	.sriov_configure = pci_sriov_configure_simple,
 	.err_handler	= &nvme_err_handler,
 };
-- 
2.30.2


WARNING: multiple messages have this Message-ID (diff)
From: Christoph Hellwig <hch@lst.de>
To: Keith Busch <kbusch@kernel.org>
Cc: Sagi Grimberg <sagi@grimberg.me>,
	Chaitanya Kulkarni <kch@nvidia.com>,
	Gerd Bayer <gbayer@linux.ibm.com>,
	asahi@lists.linux.dev, linux-arm-kernel@lists.infradead.org,
	linux-nvme@lists.infradead.org
Subject: [PATCH 11/12] nvme-pci: split the initial probe from the rest path
Date: Tue,  8 Nov 2022 16:02:51 +0100	[thread overview]
Message-ID: <20221108150252.2123727-12-hch@lst.de> (raw)
In-Reply-To: <20221108150252.2123727-1-hch@lst.de>

nvme_reset_work is a little fragile as it needs to handle both resetting
a live controller and initializing one during probe.  Split out the initial
probe and open code it in nvme_probe and leave nvme_reset_work to just do
the live controller reset.

This fixes a recently introduced bug where nvme_dev_disable causes a NULL
pointer dereferences in blk_mq_quiesce_tagset because the tagset pointer
is not set when the reset state is entered directly from the new state.
The separate probe code can skip the reset state and probe directly and
fixes this.

To make sure the system isn't single threaded on enabling nvme
controllers, set the PROBE_PREFER_ASYNCHRONOUS flag in the device_driver
structure so that the driver core probes in parallel.

Fixes: 98d81f0df70c ("nvme: use blk_mq_[un]quiesce_tagset")
Reported-by: Gerd Bayer <gbayer@linux.ibm.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/nvme/host/pci.c | 139 ++++++++++++++++++++++++----------------
 1 file changed, 83 insertions(+), 56 deletions(-)

diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 1b3c96a4b7c90..1c8c70767cb8a 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2821,15 +2821,7 @@ static void nvme_reset_work(struct work_struct *work)
 	result = nvme_pci_enable(dev);
 	if (result)
 		goto out_unlock;
-
-	if (!dev->ctrl.admin_q) {
-		result = nvme_pci_alloc_admin_tag_set(dev);
-		if (result)
-			goto out_unlock;
-	} else {
-		nvme_start_admin_queue(&dev->ctrl);
-	}
-
+	nvme_start_admin_queue(&dev->ctrl);
 	mutex_unlock(&dev->shutdown_lock);
 
 	/*
@@ -2854,9 +2846,6 @@ static void nvme_reset_work(struct work_struct *work)
 		 */
 		memset(dev->dbbuf_dbs, 0, nvme_dbbuf_size(dev));
 		memset(dev->dbbuf_eis, 0, nvme_dbbuf_size(dev));
-	} else {
-		if (dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP)
-			nvme_dbbuf_dma_alloc(dev);
 	}
 
 	if (dev->ctrl.hmpre) {
@@ -2869,37 +2858,23 @@ static void nvme_reset_work(struct work_struct *work)
 	if (result)
 		goto out;
 
-	if (dev->ctrl.tagset) {
-		/*
-		 * This is a controller reset and we already have a tagset.
-		 * Freeze and update the number of I/O queues as thos might have
-		 * changed.  If there are no I/O queues left after this reset,
-		 * keep the controller around but remove all namespaces.
-		 */
-		if (dev->online_queues > 1) {
-			nvme_start_queues(&dev->ctrl);
-			nvme_wait_freeze(&dev->ctrl);
-			nvme_pci_update_nr_queues(dev);
-			nvme_dbbuf_set(dev);
-			nvme_unfreeze(&dev->ctrl);
-		} else {
-			dev_warn(dev->ctrl.device, "IO queues lost\n");
-			nvme_mark_namespaces_dead(&dev->ctrl);
-			nvme_start_queues(&dev->ctrl);
-			nvme_remove_namespaces(&dev->ctrl);
-			nvme_free_tagset(dev);
-		}
+	/*
+	 * Freeze and update the number of I/O queues as thos might have
+	 * changed.  If there are no I/O queues left after this reset, keep the
+	 * controller around but remove all namespaces.
+	 */
+	if (dev->online_queues > 1) {
+		nvme_start_queues(&dev->ctrl);
+		nvme_wait_freeze(&dev->ctrl);
+		nvme_pci_update_nr_queues(dev);
+		nvme_dbbuf_set(dev);
+		nvme_unfreeze(&dev->ctrl);
 	} else {
-		/*
-		 * First probe.  Still allow the controller to show up even if
-		 * there are no namespaces.
-		 */
-		if (dev->online_queues > 1) {
-			nvme_pci_alloc_tag_set(dev);
-			nvme_dbbuf_set(dev);
-		} else {
-			dev_warn(dev->ctrl.device, "IO queues not created\n");
-		}
+		dev_warn(dev->ctrl.device, "IO queues lost\n");
+		nvme_mark_namespaces_dead(&dev->ctrl);
+		nvme_start_queues(&dev->ctrl);
+		nvme_remove_namespaces(&dev->ctrl);
+		nvme_free_tagset(dev);
 	}
 
 	/*
@@ -3055,15 +3030,6 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
 	return 0;
 }
 
-static void nvme_async_probe(void *data, async_cookie_t cookie)
-{
-	struct nvme_dev *dev = data;
-
-	flush_work(&dev->ctrl.reset_work);
-	flush_work(&dev->ctrl.scan_work);
-	nvme_put_ctrl(&dev->ctrl);
-}
-
 static struct nvme_dev *nvme_pci_alloc_ctrl(struct pci_dev *pdev,
 		const struct pci_device_id *id)
 {
@@ -3155,12 +3121,72 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 		goto out_release_prp_pools;
 
 	dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
+
+	result = nvme_pci_enable(dev);
+	if (result)
+		goto out_release_iod_mempool;
+
+	result = nvme_pci_alloc_admin_tag_set(dev);
+	if (result)
+		goto out_disable;
+
+	/*
+	 * Mark the controller as connecting before sending admin commands to
+	 * allow the timeout handler to do the right thing.
+	 */
+	if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
+		dev_warn(dev->ctrl.device,
+			"failed to mark controller CONNECTING\n");
+		result = -EBUSY;
+		goto out_disable;
+	}
+
+	result = nvme_init_ctrl_finish(&dev->ctrl, false);
+	if (result)
+		goto out_disable;
+
+	if (dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP)
+		nvme_dbbuf_dma_alloc(dev);
+
+	if (dev->ctrl.hmpre) {
+		result = nvme_setup_host_mem(dev);
+		if (result < 0)
+			goto out_disable;
+	}
+
+	result = nvme_setup_io_queues(dev);
+	if (result)
+		goto out_disable;
+
+	if (dev->online_queues > 1) {
+		nvme_pci_alloc_tag_set(dev);
+		nvme_dbbuf_set(dev);
+	} else {
+		dev_warn(dev->ctrl.device, "IO queues not created\n");
+	}
+
+	if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) {
+		dev_warn(dev->ctrl.device,
+			"failed to mark controller live state\n");
+		result = -ENODEV;
+		goto out_disable;
+	}
+
 	pci_set_drvdata(pdev, dev);
 
-	nvme_reset_ctrl(&dev->ctrl);
-	async_schedule(nvme_async_probe, dev);
+	nvme_start_ctrl(&dev->ctrl);
+	nvme_put_ctrl(&dev->ctrl);
 	return 0;
 
+out_disable:
+	nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
+	nvme_dev_disable(dev, true);
+	nvme_free_host_mem(dev);
+	nvme_dev_remove_admin(dev);
+	nvme_dbbuf_dma_free(dev);
+	nvme_free_queues(dev, 0);
+out_release_iod_mempool:
+	mempool_destroy(dev->iod_mempool);
 out_release_prp_pools:
 	nvme_release_prp_pools(dev);
 out_dev_unmap:
@@ -3556,11 +3582,12 @@ static struct pci_driver nvme_driver = {
 	.probe		= nvme_probe,
 	.remove		= nvme_remove,
 	.shutdown	= nvme_shutdown,
-#ifdef CONFIG_PM_SLEEP
 	.driver		= {
-		.pm	= &nvme_dev_pm_ops,
-	},
+		.probe_type	= PROBE_PREFER_ASYNCHRONOUS,
+#ifdef CONFIG_PM_SLEEP
+		.pm		= &nvme_dev_pm_ops,
 #endif
+	},
 	.sriov_configure = pci_sriov_configure_simple,
 	.err_handler	= &nvme_err_handler,
 };
-- 
2.30.2


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2022-11-08 15:03 UTC|newest]

Thread overview: 80+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-11-08 15:02 RFC: nvme-pci: split the probe and reset handlers Christoph Hellwig
2022-11-08 15:02 ` Christoph Hellwig
2022-11-08 15:02 ` [PATCH 01/12] nvme-pci: don't call nvme_init_ctrl_finish from nvme_passthru_end Christoph Hellwig
2022-11-08 15:02   ` Christoph Hellwig
2022-11-09  2:55   ` Sagi Grimberg
2022-11-09  2:55     ` Sagi Grimberg
2022-11-09  6:26     ` Christoph Hellwig
2022-11-09  6:26       ` Christoph Hellwig
2022-11-08 15:02 ` [PATCH 02/12] nvme: move OPAL setup from PCIe to core Christoph Hellwig
2022-11-08 15:02   ` Christoph Hellwig
2022-11-09  2:55   ` Sagi Grimberg
2022-11-09  2:55     ` Sagi Grimberg
2022-11-09 20:44   ` Keith Busch
2022-11-09 20:44     ` Keith Busch
2022-11-09 23:22     ` Chaitanya Kulkarni
2022-11-09 23:22       ` Chaitanya Kulkarni
2022-11-13 16:15       ` Christoph Hellwig
2022-11-13 16:15         ` Christoph Hellwig
2022-11-08 15:02 ` [PATCH 03/12] nvme: simplify transport specific device attribute handling Christoph Hellwig
2022-11-08 15:02   ` Christoph Hellwig
2022-11-09  2:57   ` Sagi Grimberg
2022-11-09  2:57     ` Sagi Grimberg
2022-11-08 15:02 ` [PATCH 04/12] nvme-pci: put the admin queue in nvme_dev_remove_admin Christoph Hellwig
2022-11-08 15:02   ` Christoph Hellwig
2022-11-09  2:58   ` Sagi Grimberg
2022-11-09  2:58     ` Sagi Grimberg
2022-11-09  6:28     ` Christoph Hellwig
2022-11-09  6:28       ` Christoph Hellwig
2022-11-08 15:02 ` [PATCH 05/12] nvme-pci: move more teardown work to nvme_remove Christoph Hellwig
2022-11-08 15:02   ` Christoph Hellwig
2022-11-09  3:00   ` Sagi Grimberg
2022-11-09  3:00     ` Sagi Grimberg
2022-11-08 15:02 ` [PATCH 06/12] nvme-pci: factor the iod mempool creation into a helper Christoph Hellwig
2022-11-08 15:02   ` Christoph Hellwig
2022-11-09  3:00   ` Sagi Grimberg
2022-11-09  3:00     ` Sagi Grimberg
2022-11-08 15:02 ` [PATCH 07/12] nvme-pci: factor out a nvme_pci_alloc_ctrl helper Christoph Hellwig
2022-11-08 15:02   ` Christoph Hellwig
2022-11-09  3:03   ` Sagi Grimberg
2022-11-09  3:03     ` Sagi Grimberg
2022-11-09  6:28     ` Christoph Hellwig
2022-11-09  6:28       ` Christoph Hellwig
2022-11-08 15:02 ` [PATCH 08/12] nvme-pci: set constant paramters in nvme_pci_alloc_ctrl Christoph Hellwig
2022-11-08 15:02   ` Christoph Hellwig
2022-11-09  3:03   ` Sagi Grimberg
2022-11-09  3:03     ` Sagi Grimberg
2022-11-08 15:02 ` [PATCH 09/12] nvme-pci: call nvme_pci_configure_admin_queue from nvme_pci_enable Christoph Hellwig
2022-11-08 15:02   ` Christoph Hellwig
2022-11-09  3:04   ` Sagi Grimberg
2022-11-09  3:04     ` Sagi Grimberg
2022-11-08 15:02 ` [PATCH 10/12] nvme-pci: split nvme_dbbuf_dma_alloc Christoph Hellwig
2022-11-08 15:02   ` Christoph Hellwig
2022-11-09  3:05   ` Sagi Grimberg
2022-11-09  3:05     ` Sagi Grimberg
2022-11-08 15:02 ` Christoph Hellwig [this message]
2022-11-08 15:02   ` [PATCH 11/12] nvme-pci: split the initial probe from the rest path Christoph Hellwig
2022-11-09  3:14   ` Sagi Grimberg
2022-11-09  3:14     ` Sagi Grimberg
2022-11-09  6:31     ` Christoph Hellwig
2022-11-09  6:31       ` Christoph Hellwig
2022-11-09 17:00       ` Keith Busch
2022-11-09 17:00         ` Keith Busch
2022-11-09 15:18   ` Gerd Bayer
2022-11-09 15:18     ` Gerd Bayer
2022-11-09 15:51   ` Keith Busch
2022-11-09 15:51     ` Keith Busch
2022-11-09 15:56   ` Keith Busch
2022-11-09 15:56     ` Keith Busch
2022-11-10  3:17   ` Chao Leng
2022-11-10  3:17     ` Chao Leng
2022-11-13 16:19     ` Christoph Hellwig
2022-11-13 16:19       ` Christoph Hellwig
2022-11-08 15:02 ` [PATCH 12/12] nvme-pci: don't unbind the driver on reset failure Christoph Hellwig
2022-11-08 15:02   ` Christoph Hellwig
2022-11-09  3:15   ` Sagi Grimberg
2022-11-09  3:15     ` Sagi Grimberg
2022-11-09 17:10   ` Keith Busch
2022-11-09 17:10     ` Keith Busch
2022-11-09 17:12 ` RFC: nvme-pci: split the probe and reset handlers Keith Busch
2022-11-09 17:12   ` Keith Busch

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221108150252.2123727-12-hch@lst.de \
    --to=hch@lst.de \
    --cc=asahi@lists.linux.dev \
    --cc=gbayer@linux.ibm.com \
    --cc=kbusch@kernel.org \
    --cc=kch@nvidia.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-nvme@lists.infradead.org \
    --cc=sagi@grimberg.me \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.