All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/5] nvme-pci: iod optimisations
@ 2022-07-28 22:11 Keith Busch
  2022-07-28 22:11 ` [PATCH 1/5] nvme-pci: remove nvme_queue from nvme_iod Keith Busch
                   ` (4 more replies)
  0 siblings, 5 replies; 13+ messages in thread
From: Keith Busch @ 2022-07-28 22:11 UTC (permalink / raw)
  To: linux-nvme; +Cc: hch, sagi, Keith Busch

From: Keith Busch <kbusch@kernel.org>

The series adjusts the struct nvme_iod fields for optimal sizing. The
end result on 64-bit arch removes 24 bytes, from 152 down to 128. We
typically allocate many thousands of these, and the cache aligned size
is a nice bonus.

Keith Busch (5):
  nvme-pci: remove nvme_queue from nvme_iod
  nvme-pci: iod's 'aborted' is a bool
  nvme-pci: iod npages fits in s8
  nvme-pci: iod nents fits in s8
  nvme-pci: move iod dma_len to better pack

 drivers/nvme/host/pci.c | 40 +++++++++++++++++++---------------------
 1 file changed, 19 insertions(+), 21 deletions(-)

-- 
2.30.2



^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 1/5] nvme-pci: remove nvme_queue from nvme_iod
  2022-07-28 22:11 [PATCH 0/5] nvme-pci: iod optimisations Keith Busch
@ 2022-07-28 22:11 ` Keith Busch
  2022-07-28 22:11 ` [PATCH 2/5] nvme-pci: iod's 'aborted' is a bool Keith Busch
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 13+ messages in thread
From: Keith Busch @ 2022-07-28 22:11 UTC (permalink / raw)
  To: linux-nvme; +Cc: hch, sagi, Keith Busch

From: Keith Busch <kbusch@kernel.org>

We can get the nvme_queue from the req just as easily, so save some
space by removing the duplicate pointer to the same structure.

Signed-off-by: Keith Busch <kbusch@kernel.org>
---
 drivers/nvme/host/pci.c | 28 +++++++++++++---------------
 1 file changed, 13 insertions(+), 15 deletions(-)

diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 644664098ae7..138d408f7306 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -226,7 +226,6 @@ struct nvme_queue {
 struct nvme_iod {
 	struct nvme_request req;
 	struct nvme_command cmd;
-	struct nvme_queue *nvmeq;
 	bool use_sgl;
 	int aborted;
 	int npages;		/* In the PRP list. 0 means small pool in use */
@@ -431,11 +430,6 @@ static int nvme_pci_init_request(struct blk_mq_tag_set *set,
 {
 	struct nvme_dev *dev = set->driver_data;
 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
-	int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0;
-	struct nvme_queue *nvmeq = &dev->queues[queue_idx];
-
-	BUG_ON(!nvmeq);
-	iod->nvmeq = nvmeq;
 
 	nvme_req(req)->ctrl = &dev->ctrl;
 	nvme_req(req)->cmd = &iod->cmd;
@@ -529,7 +523,7 @@ static void **nvme_pci_iod_list(struct request *req)
 
 static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
 {
-	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+	struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
 	int nseg = blk_rq_nr_phys_segments(req);
 	unsigned int avg_seg_size;
 
@@ -537,7 +531,7 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
 
 	if (!nvme_ctrl_sgl_supported(&dev->ctrl))
 		return false;
-	if (!iod->nvmeq->qid)
+	if (!nvmeq->qid)
 		return false;
 	if (!sgl_threshold || avg_seg_size < sgl_threshold)
 		return false;
@@ -843,6 +837,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
 	int nr_mapped;
 
 	if (blk_rq_nr_phys_segments(req) == 1) {
+		struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
 		struct bio_vec bv = req_bvec(req);
 
 		if (!is_pci_p2pdma_page(bv.bv_page)) {
@@ -850,7 +845,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
 				return nvme_setup_prp_simple(dev, req,
 							     &cmnd->rw, &bv);
 
-			if (iod->nvmeq->qid && sgl_threshold &&
+			if (nvmeq->qid && sgl_threshold &&
 			    nvme_ctrl_sgl_supported(&dev->ctrl))
 				return nvme_setup_sgl_simple(dev, req,
 							     &cmnd->rw, &bv);
@@ -1030,12 +1025,16 @@ static void nvme_queue_rqs(struct request **rqlist)
 
 static __always_inline void nvme_pci_unmap_rq(struct request *req)
 {
-	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
-	struct nvme_dev *dev = iod->nvmeq->dev;
+	struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+	struct nvme_dev *dev = nvmeq->dev;
+
+	if (blk_integrity_rq(req)) {
+	        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
 
-	if (blk_integrity_rq(req))
 		dma_unmap_page(dev->dev, iod->meta_dma,
 			       rq_integrity_vec(req)->bv_len, rq_data_dir(req));
+	}
+
 	if (blk_rq_nr_phys_segments(req))
 		nvme_unmap_data(dev, req);
 }
@@ -1283,8 +1282,7 @@ static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
 
 static void abort_endio(struct request *req, blk_status_t error)
 {
-	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
-	struct nvme_queue *nvmeq = iod->nvmeq;
+	struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
 
 	dev_warn(nvmeq->dev->ctrl.device,
 		 "Abort status: 0x%x", nvme_req(req)->status);
@@ -1346,7 +1344,7 @@ static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
 static enum blk_eh_timer_return nvme_timeout(struct request *req)
 {
 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
-	struct nvme_queue *nvmeq = iod->nvmeq;
+	struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
 	struct nvme_dev *dev = nvmeq->dev;
 	struct request *abort_req;
 	struct nvme_command cmd = { };
-- 
2.30.2



^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH 2/5] nvme-pci: iod's 'aborted' is a bool
  2022-07-28 22:11 [PATCH 0/5] nvme-pci: iod optimisations Keith Busch
  2022-07-28 22:11 ` [PATCH 1/5] nvme-pci: remove nvme_queue from nvme_iod Keith Busch
@ 2022-07-28 22:11 ` Keith Busch
  2022-07-28 23:13   ` Chaitanya Kulkarni
  2022-07-28 22:11 ` [PATCH 3/5] nvme-pci: iod npages fits in s8 Keith Busch
                   ` (2 subsequent siblings)
  4 siblings, 1 reply; 13+ messages in thread
From: Keith Busch @ 2022-07-28 22:11 UTC (permalink / raw)
  To: linux-nvme; +Cc: hch, sagi, Keith Busch

From: Keith Busch <kbusch@kernel.org>

It's only true or false, so make this a bool to reflect that and save
some space in nvme_iod.

Signed-off-by: Keith Busch <kbusch@kernel.org>
---
 drivers/nvme/host/pci.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 138d408f7306..a8ae043e33de 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -227,7 +227,7 @@ struct nvme_iod {
 	struct nvme_request req;
 	struct nvme_command cmd;
 	bool use_sgl;
-	int aborted;
+	bool aborted;
 	int npages;		/* In the PRP list. 0 means small pool in use */
 	int nents;		/* Used in scatterlist */
 	dma_addr_t first_dma;
@@ -904,7 +904,7 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
 	blk_status_t ret;
 
-	iod->aborted = 0;
+	iod->aborted = false;
 	iod->npages = -1;
 	iod->nents = 0;
 
@@ -1425,7 +1425,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
 		atomic_inc(&dev->ctrl.abort_limit);
 		return BLK_EH_RESET_TIMER;
 	}
-	iod->aborted = 1;
+	iod->aborted = true;
 
 	cmd.abort.opcode = nvme_admin_abort_cmd;
 	cmd.abort.cid = nvme_cid(req);
-- 
2.30.2



^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH 3/5] nvme-pci: iod npages fits in s8
  2022-07-28 22:11 [PATCH 0/5] nvme-pci: iod optimisations Keith Busch
  2022-07-28 22:11 ` [PATCH 1/5] nvme-pci: remove nvme_queue from nvme_iod Keith Busch
  2022-07-28 22:11 ` [PATCH 2/5] nvme-pci: iod's 'aborted' is a bool Keith Busch
@ 2022-07-28 22:11 ` Keith Busch
  2022-07-28 23:14   ` Chaitanya Kulkarni
  2022-07-29 13:19   ` Christoph Hellwig
  2022-07-28 22:11 ` [PATCH 4/5] nvme-pci: iod nents " Keith Busch
  2022-07-28 22:11 ` [PATCH 5/5] nvme-pci: move iod dma_len to better pack Keith Busch
  4 siblings, 2 replies; 13+ messages in thread
From: Keith Busch @ 2022-07-28 22:11 UTC (permalink / raw)
  To: linux-nvme; +Cc: hch, sagi, Keith Busch

From: Keith Busch <kbusch@kernel.org>

The largest allowed transfer is 4MB, which can use at most 1025 PRPs.
Each PRP is 8 bytes, so the maximum number of 4k nvme pages needed for
the iod_list is 3, which fits in an 's8'.

Signed-off-by: Keith Busch <kbusch@kernel.org>
---
 drivers/nvme/host/pci.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index a8ae043e33de..9e5bbf4e3e07 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -228,7 +228,7 @@ struct nvme_iod {
 	struct nvme_command cmd;
 	bool use_sgl;
 	bool aborted;
-	int npages;		/* In the PRP list. 0 means small pool in use */
+	s8 npages;		/* In the PRP list. 0 means small pool in use */
 	int nents;		/* Used in scatterlist */
 	dma_addr_t first_dma;
 	unsigned int dma_len;	/* length of single DMA segment mapping */
-- 
2.30.2



^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH 4/5] nvme-pci: iod nents fits in s8
  2022-07-28 22:11 [PATCH 0/5] nvme-pci: iod optimisations Keith Busch
                   ` (2 preceding siblings ...)
  2022-07-28 22:11 ` [PATCH 3/5] nvme-pci: iod npages fits in s8 Keith Busch
@ 2022-07-28 22:11 ` Keith Busch
  2022-07-28 23:15   ` Chaitanya Kulkarni
  2022-07-29 13:21   ` Christoph Hellwig
  2022-07-28 22:11 ` [PATCH 5/5] nvme-pci: move iod dma_len to better pack Keith Busch
  4 siblings, 2 replies; 13+ messages in thread
From: Keith Busch @ 2022-07-28 22:11 UTC (permalink / raw)
  To: linux-nvme; +Cc: hch, sagi, Keith Busch

From: Keith Busch <kbusch@kernel.org>

The maximum number of 'nents' allowed by the queue limit is 127, which
fits in an 's8'.

Signed-off-by: Keith Busch <kbusch@kernel.org>
---
 drivers/nvme/host/pci.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 9e5bbf4e3e07..546de3c2000b 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -229,7 +229,7 @@ struct nvme_iod {
 	bool use_sgl;
 	bool aborted;
 	s8 npages;		/* In the PRP list. 0 means small pool in use */
-	int nents;		/* Used in scatterlist */
+	s8 nents;		/* Used in scatterlist */
 	dma_addr_t first_dma;
 	unsigned int dma_len;	/* length of single DMA segment mapping */
 	dma_addr_t meta_dma;
-- 
2.30.2



^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH 5/5] nvme-pci: move iod dma_len to better pack
  2022-07-28 22:11 [PATCH 0/5] nvme-pci: iod optimisations Keith Busch
                   ` (3 preceding siblings ...)
  2022-07-28 22:11 ` [PATCH 4/5] nvme-pci: iod nents " Keith Busch
@ 2022-07-28 22:11 ` Keith Busch
  2022-07-28 23:15   ` Chaitanya Kulkarni
  4 siblings, 1 reply; 13+ messages in thread
From: Keith Busch @ 2022-07-28 22:11 UTC (permalink / raw)
  To: linux-nvme; +Cc: hch, sagi, Keith Busch

From: Keith Busch <kbusch@kernel.org>

The 32-bit 'dma_len' field packs in the iod struct above the dma_addr_t
without creating a gap.

Signed-off-by: Keith Busch <kbusch@kernel.org>
---
 drivers/nvme/host/pci.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 546de3c2000b..66f7204ab905 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -230,8 +230,8 @@ struct nvme_iod {
 	bool aborted;
 	s8 npages;		/* In the PRP list. 0 means small pool in use */
 	s8 nents;		/* Used in scatterlist */
-	dma_addr_t first_dma;
 	unsigned int dma_len;	/* length of single DMA segment mapping */
+	dma_addr_t first_dma;
 	dma_addr_t meta_dma;
 	struct scatterlist *sg;
 };
-- 
2.30.2



^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [PATCH 2/5] nvme-pci: iod's 'aborted' is a bool
  2022-07-28 22:11 ` [PATCH 2/5] nvme-pci: iod's 'aborted' is a bool Keith Busch
@ 2022-07-28 23:13   ` Chaitanya Kulkarni
  0 siblings, 0 replies; 13+ messages in thread
From: Chaitanya Kulkarni @ 2022-07-28 23:13 UTC (permalink / raw)
  To: Keith Busch; +Cc: hch, sagi, linux-nvme, Keith Busch

On 7/28/22 15:11, Keith Busch wrote:
> From: Keith Busch <kbusch@kernel.org>
> 
> It's only true or false, so make this a bool to reflect that and save
> some space in nvme_iod.
> 
> Signed-off-by: Keith Busch <kbusch@kernel.org>
> ---

Looks good.

Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>

-ck



^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 3/5] nvme-pci: iod npages fits in s8
  2022-07-28 22:11 ` [PATCH 3/5] nvme-pci: iod npages fits in s8 Keith Busch
@ 2022-07-28 23:14   ` Chaitanya Kulkarni
  2022-07-29 13:19   ` Christoph Hellwig
  1 sibling, 0 replies; 13+ messages in thread
From: Chaitanya Kulkarni @ 2022-07-28 23:14 UTC (permalink / raw)
  To: Keith Busch; +Cc: hch, sagi, linux-nvme, Keith Busch

On 7/28/22 15:11, Keith Busch wrote:
> From: Keith Busch <kbusch@kernel.org>
> 
> The largest allowed transfer is 4MB, which can use at most 1025 PRPs.
> Each PRP is 8 bytes, so the maximum number of 4k nvme pages needed for
> the iod_list is 3, which fits in an 's8'.
> 
> Signed-off-by: Keith Busch <kbusch@kernel.org>
> ---


Looks good.

Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>

-ck



^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 4/5] nvme-pci: iod nents fits in s8
  2022-07-28 22:11 ` [PATCH 4/5] nvme-pci: iod nents " Keith Busch
@ 2022-07-28 23:15   ` Chaitanya Kulkarni
  2022-07-29 13:21   ` Christoph Hellwig
  1 sibling, 0 replies; 13+ messages in thread
From: Chaitanya Kulkarni @ 2022-07-28 23:15 UTC (permalink / raw)
  To: Keith Busch; +Cc: hch, sagi, linux-nvme, Keith Busch

On 7/28/22 15:11, Keith Busch wrote:
> From: Keith Busch <kbusch@kernel.org>
> 
> The maximum number of 'nents' allowed by the queue limit is 127, which
> fits in an 's8'.
> 
> Signed-off-by: Keith Busch <kbusch@kernel.org>


Looks good.

Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>

-ck



^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 5/5] nvme-pci: move iod dma_len to better pack
  2022-07-28 22:11 ` [PATCH 5/5] nvme-pci: move iod dma_len to better pack Keith Busch
@ 2022-07-28 23:15   ` Chaitanya Kulkarni
  0 siblings, 0 replies; 13+ messages in thread
From: Chaitanya Kulkarni @ 2022-07-28 23:15 UTC (permalink / raw)
  To: Keith Busch; +Cc: hch, sagi, linux-nvme, Keith Busch

On 7/28/22 15:11, Keith Busch wrote:
> From: Keith Busch <kbusch@kernel.org>
> 
> The 32-bit 'dma_len' field packs in the iod struct above the dma_addr_t
> without creating a gap.
> 
> Signed-off-by: Keith Busch <kbusch@kernel.org>
> ---

Looks good.

Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>

-ck



^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 3/5] nvme-pci: iod npages fits in s8
  2022-07-28 22:11 ` [PATCH 3/5] nvme-pci: iod npages fits in s8 Keith Busch
  2022-07-28 23:14   ` Chaitanya Kulkarni
@ 2022-07-29 13:19   ` Christoph Hellwig
  1 sibling, 0 replies; 13+ messages in thread
From: Christoph Hellwig @ 2022-07-29 13:19 UTC (permalink / raw)
  To: Keith Busch; +Cc: linux-nvme, hch, sagi, Keith Busch

On Thu, Jul 28, 2022 at 03:11:49PM -0700, Keith Busch wrote:
> From: Keith Busch <kbusch@kernel.org>
> 
> The largest allowed transfer is 4MB, which can use at most 1025 PRPs.
> Each PRP is 8 bytes, so the maximum number of 4k nvme pages needed for
> the iod_list is 3, which fits in an 's8'.

Can we use the chance to rename the field to nr_allocations or
so?  The nr_pages always confused me, and the comment does not exactly
help.



^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 4/5] nvme-pci: iod nents fits in s8
  2022-07-28 22:11 ` [PATCH 4/5] nvme-pci: iod nents " Keith Busch
  2022-07-28 23:15   ` Chaitanya Kulkarni
@ 2022-07-29 13:21   ` Christoph Hellwig
  2022-07-29 15:24     ` Keith Busch
  1 sibling, 1 reply; 13+ messages in thread
From: Christoph Hellwig @ 2022-07-29 13:21 UTC (permalink / raw)
  To: Keith Busch; +Cc: linux-nvme, hch, sagi, Keith Busch

On Thu, Jul 28, 2022 at 03:11:50PM -0700, Keith Busch wrote:
> From: Keith Busch <kbusch@kernel.org>
> 
> The maximum number of 'nents' allowed by the queue limit is 127, which
> fits in an 's8'.
> 
> Signed-off-by: Keith Busch <kbusch@kernel.org>
> ---
>  drivers/nvme/host/pci.c | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
> diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
> index 9e5bbf4e3e07..546de3c2000b 100644
> --- a/drivers/nvme/host/pci.c
> +++ b/drivers/nvme/host/pci.c
> @@ -229,7 +229,7 @@ struct nvme_iod {
>  	bool use_sgl;
>  	bool aborted;
>  	s8 npages;		/* In the PRP list. 0 means small pool in use */
> -	int nents;		/* Used in scatterlist */
> +	s8 nents;		/* Used in scatterlist */

Can we please have a BUILD_BUG_ON to protect us from an increased
max_segments?  As I can totally see us incrementing that at some point,
and the limit is somewhat close.


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 4/5] nvme-pci: iod nents fits in s8
  2022-07-29 13:21   ` Christoph Hellwig
@ 2022-07-29 15:24     ` Keith Busch
  0 siblings, 0 replies; 13+ messages in thread
From: Keith Busch @ 2022-07-29 15:24 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: Keith Busch, linux-nvme, sagi

On Fri, Jul 29, 2022 at 03:21:21PM +0200, Christoph Hellwig wrote:
> On Thu, Jul 28, 2022 at 03:11:50PM -0700, Keith Busch wrote:
> > From: Keith Busch <kbusch@kernel.org>
> > 
> > The maximum number of 'nents' allowed by the queue limit is 127, which
> > fits in an 's8'.
> > 
> > Signed-off-by: Keith Busch <kbusch@kernel.org>
> > ---
> >  drivers/nvme/host/pci.c | 2 +-
> >  1 file changed, 1 insertion(+), 1 deletion(-)
> > 
> > diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
> > index 9e5bbf4e3e07..546de3c2000b 100644
> > --- a/drivers/nvme/host/pci.c
> > +++ b/drivers/nvme/host/pci.c
> > @@ -229,7 +229,7 @@ struct nvme_iod {
> >  	bool use_sgl;
> >  	bool aborted;
> >  	s8 npages;		/* In the PRP list. 0 means small pool in use */
> > -	int nents;		/* Used in scatterlist */
> > +	s8 nents;		/* Used in scatterlist */
> 
> Can we please have a BUILD_BUG_ON to protect us from an increased
> max_segments?  As I can totally see us incrementing that at some point,
> and the limit is somewhat close.

Yes, will do. Also, this one is only ever assigned unsigned values, so I'll
make this a u8 in the next version.


^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2022-07-29 15:25 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-07-28 22:11 [PATCH 0/5] nvme-pci: iod optimisations Keith Busch
2022-07-28 22:11 ` [PATCH 1/5] nvme-pci: remove nvme_queue from nvme_iod Keith Busch
2022-07-28 22:11 ` [PATCH 2/5] nvme-pci: iod's 'aborted' is a bool Keith Busch
2022-07-28 23:13   ` Chaitanya Kulkarni
2022-07-28 22:11 ` [PATCH 3/5] nvme-pci: iod npages fits in s8 Keith Busch
2022-07-28 23:14   ` Chaitanya Kulkarni
2022-07-29 13:19   ` Christoph Hellwig
2022-07-28 22:11 ` [PATCH 4/5] nvme-pci: iod nents " Keith Busch
2022-07-28 23:15   ` Chaitanya Kulkarni
2022-07-29 13:21   ` Christoph Hellwig
2022-07-29 15:24     ` Keith Busch
2022-07-28 22:11 ` [PATCH 5/5] nvme-pci: move iod dma_len to better pack Keith Busch
2022-07-28 23:15   ` Chaitanya Kulkarni

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.