linux-nvme.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] nvmet: allow block device to use buffered I/O
@ 2019-10-30  4:26 Chaitanya Kulkarni
  2019-10-30  6:36 ` Keith Busch
                   ` (2 more replies)
  0 siblings, 3 replies; 6+ messages in thread
From: Chaitanya Kulkarni @ 2019-10-30  4:26 UTC (permalink / raw)
  To: linux-nvme; +Cc: kbusch, hch, Chaitanya Kulkarni, sagi

NVMeOF target already has a support to execute requests in the
buffered I/O mode with file backend. This patch allows block devices
to be used with file backend code so that buffered I/O parameter can
be set for a block device backed namespace when newly introduce
configfs parameter use_vfs=1.

Following is the performance improvement :-

Device_path = /dev/nullb0, I/O type = randread.

With this patch and buffered I/O = 0, use_vfs=0 :
  read: IOPS=198k, BW=772MiB/s (809MB/s)(45.2GiB/60002msec)
  read: IOPS=198k, BW=774MiB/s (811MB/s)(45.3GiB/60002msec)
  read: IOPS=197k, BW=771MiB/s (808MB/s)(45.2GiB/60002msec)

With this patch and buffered I/O = 1, use_vfs=1:
  read: IOPS=979k, BW=3825MiB/s (4010MB/s)(224GiB/60002msec)
  read: IOPS=983k, BW=3841MiB/s (4028MB/s)(225GiB/60003msec)
  read: IOPS=980k, BW=3828MiB/s (4014MB/s)(224GiB/60002msec)

Cachestate shows the following output for above case:-
<snip>
    HITS   MISSES  DIRTIES    RATIO   BUFFERS_MB   CACHE_MB
  795057    78070        0    91.1%         1026       7961
 1028017        4        0   100.0%         1026       7962
 1025259        0        0   100.0%         1026       7962
 1003651        0        0   100.0%         1026       7962
 1024775        0        0   100.0%         1026       7962
 1003080        0        0   100.0%         1026       7962
 1004128        0        0   100.0%         1026       7962
 1003831        0        0   100.0%         1026       7962
 1026133        0        0   100.0%         1026       7962
<snip>

Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
---
Hi,

Pleae note that patch on the similar concept was posted to resolve
different issue. Even though this patch is based on similar concept
it has completely different purpose and code changes.

Regards,
Chaitanya
---
 drivers/nvme/target/configfs.c    | 29 +++++++++++++++++++++++++++++
 drivers/nvme/target/core.c        |  1 +
 drivers/nvme/target/io-cmd-bdev.c |  2 +-
 drivers/nvme/target/io-cmd-file.c | 33 ++++++++++++++++++++-------------
 drivers/nvme/target/nvmet.h       |  1 +
 5 files changed, 52 insertions(+), 14 deletions(-)

diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 98613a4..1ca727b 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -545,6 +545,34 @@ static ssize_t nvmet_ns_buffered_io_store(struct config_item *item,
 
 CONFIGFS_ATTR(nvmet_ns_, buffered_io);
 
+static ssize_t nvmet_ns_use_vfs_show(struct config_item *item, char *page)
+{
+	return sprintf(page, "%d\n", to_nvmet_ns(item)->use_vfs);
+}
+
+static ssize_t nvmet_ns_use_vfs_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct nvmet_ns *ns = to_nvmet_ns(item);
+	bool val;
+
+	if (strtobool(page, &val))
+		return -EINVAL;
+
+	mutex_lock(&ns->subsys->lock);
+	if (ns->enabled) {
+		pr_err("disable ns before setting buffered_io value.\n");
+		mutex_unlock(&ns->subsys->lock);
+		return -EINVAL;
+	}
+
+	ns->use_vfs = val;
+	mutex_unlock(&ns->subsys->lock);
+	return count;
+}
+
+CONFIGFS_ATTR(nvmet_ns_, use_vfs);
+
 static struct configfs_attribute *nvmet_ns_attrs[] = {
 	&nvmet_ns_attr_device_path,
 	&nvmet_ns_attr_device_nguid,
@@ -552,6 +580,7 @@ static ssize_t nvmet_ns_buffered_io_store(struct config_item *item,
 	&nvmet_ns_attr_ana_grpid,
 	&nvmet_ns_attr_enable,
 	&nvmet_ns_attr_buffered_io,
+	&nvmet_ns_attr_use_vfs,
 #ifdef CONFIG_PCI_P2PDMA
 	&nvmet_ns_attr_p2pmem,
 #endif
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 28438b8..083e8846 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -653,6 +653,7 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
 
 	uuid_gen(&ns->uuid);
 	ns->buffered_io = false;
+	ns->use_vfs = false;
 
 	return ns;
 }
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index b6fca0e..ec807e1 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -64,7 +64,7 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
 	}
 	ns->size = i_size_read(ns->bdev->bd_inode);
 	ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
-	return 0;
+	return ns->use_vfs ? -ENOTBLK : 0;
 }
 
 void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index caebfce..5f22784 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -25,6 +25,10 @@ void nvmet_file_ns_disable(struct nvmet_ns *ns)
 		fput(ns->file);
 		ns->file = NULL;
 	}
+
+	/* when using vfs layer we have opend bdev see target.core.c */
+	if (ns->use_vfs)
+		nvmet_bdev_ns_disable(ns);
 }
 
 int nvmet_file_ns_enable(struct nvmet_ns *ns)
@@ -43,19 +47,22 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns)
 		return PTR_ERR(ns->file);
 	}
 
-	ret = vfs_getattr(&ns->file->f_path,
-			&stat, STATX_SIZE, AT_STATX_FORCE_SYNC);
-	if (ret)
-		goto err;
-
-	ns->size = stat.size;
-	/*
-	 * i_blkbits can be greater than the universally accepted upper bound,
-	 * so make sure we export a sane namespace lba_shift.
-	 */
-	ns->blksize_shift = min_t(u8,
-			file_inode(ns->file)->i_blkbits, 12);
-
+	if (!ns->use_vfs) {
+		ret = vfs_getattr(&ns->file->f_path,
+				  &stat, STATX_SIZE, AT_STATX_FORCE_SYNC);
+		if (ret) {
+			pr_err("failed to stat device file %s\n",
+					ns->device_path);
+			goto err;
+		}
+		/*
+		 * i_blkbits can be greater than the universally accepted upper
+		 * bound, so make sure we export a sane namespace lba_shift.
+		 */
+		ns->size = stat.size;
+		ns->blksize_shift = min_t(u8,
+				file_inode(ns->file)->i_blkbits, 12);
+	}
 	ns->bvec_cache = kmem_cache_create("nvmet-bvec",
 			NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec),
 			0, SLAB_HWCACHE_ALIGN, NULL);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 46df45e..0f68b82 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -63,6 +63,7 @@ struct nvmet_ns {
 	u32			anagrpid;
 
 	bool			buffered_io;
+	bool			use_vfs;
 	bool			enabled;
 	struct nvmet_subsys	*subsys;
 	const char		*device_path;
-- 
1.8.3.1


_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH] nvmet: allow block device to use buffered I/O
  2019-10-30  4:26 [PATCH] nvmet: allow block device to use buffered I/O Chaitanya Kulkarni
@ 2019-10-30  6:36 ` Keith Busch
  2019-10-31 14:42   ` Christoph Hellwig
  2019-10-31 14:45 ` Christoph Hellwig
  2019-10-31 16:30 ` Sagi Grimberg
  2 siblings, 1 reply; 6+ messages in thread
From: Keith Busch @ 2019-10-30  6:36 UTC (permalink / raw)
  To: Chaitanya Kulkarni; +Cc: hch, linux-nvme, sagi

On Tue, Oct 29, 2019 at 09:26:23PM -0700, Chaitanya Kulkarni wrote:
> NVMeOF target already has a support to execute requests in the
> buffered I/O mode with file backend. This patch allows block devices
> to be used with file backend code so that buffered I/O parameter can
> be set for a block device backed namespace when newly introduce
> configfs parameter use_vfs=1.

Is the fs overhead so bad that we can't just remove the bio based nvme
target? It seems low enough that having a single path to optimize is
worth considering.

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] nvmet: allow block device to use buffered I/O
  2019-10-30  6:36 ` Keith Busch
@ 2019-10-31 14:42   ` Christoph Hellwig
  0 siblings, 0 replies; 6+ messages in thread
From: Christoph Hellwig @ 2019-10-31 14:42 UTC (permalink / raw)
  To: Keith Busch; +Cc: sagi, linux-nvme, Chaitanya Kulkarni, hch

On Wed, Oct 30, 2019 at 03:36:09PM +0900, Keith Busch wrote:
> On Tue, Oct 29, 2019 at 09:26:23PM -0700, Chaitanya Kulkarni wrote:
> > NVMeOF target already has a support to execute requests in the
> > buffered I/O mode with file backend. This patch allows block devices
> > to be used with file backend code so that buffered I/O parameter can
> > be set for a block device backed namespace when newly introduce
> > configfs parameter use_vfs=1.
> 
> Is the fs overhead so bad that we can't just remove the bio based nvme
> target? It seems low enough that having a single path to optimize is
> worth considering.

Once we go down bemchmarketing for latency and iops it matters, as it
is significantly less code and cachelines.  The actual I/O pattern is
the same, so for more usual setups that do bigger I/O or have slower
devices they'll be mostly the same.

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] nvmet: allow block device to use buffered I/O
  2019-10-30  4:26 [PATCH] nvmet: allow block device to use buffered I/O Chaitanya Kulkarni
  2019-10-30  6:36 ` Keith Busch
@ 2019-10-31 14:45 ` Christoph Hellwig
  2019-11-01  6:35   ` Chaitanya Kulkarni
  2019-10-31 16:30 ` Sagi Grimberg
  2 siblings, 1 reply; 6+ messages in thread
From: Christoph Hellwig @ 2019-10-31 14:45 UTC (permalink / raw)
  To: Chaitanya Kulkarni; +Cc: kbusch, hch, linux-nvme, sagi

IFF we want to allow selecting the backend it should be a configfs
file with the backend name, use_vfs is not very descriptive.  But I'm
still not sure it is an all that useful feature.

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] nvmet: allow block device to use buffered I/O
  2019-10-30  4:26 [PATCH] nvmet: allow block device to use buffered I/O Chaitanya Kulkarni
  2019-10-30  6:36 ` Keith Busch
  2019-10-31 14:45 ` Christoph Hellwig
@ 2019-10-31 16:30 ` Sagi Grimberg
  2 siblings, 0 replies; 6+ messages in thread
From: Sagi Grimberg @ 2019-10-31 16:30 UTC (permalink / raw)
  To: Chaitanya Kulkarni, linux-nvme; +Cc: kbusch, hch


> NVMeOF target already has a support to execute requests in the
> buffered I/O mode with file backend. This patch allows block devices
> to be used with file backend code so that buffered I/O parameter can
> be set for a block device backed namespace when newly introduce
> configfs parameter use_vfs=1.
> 
> Following is the performance improvement :-
> 
> Device_path = /dev/nullb0, I/O type = randread.
> 
> With this patch and buffered I/O = 0, use_vfs=0 :
>    read: IOPS=198k, BW=772MiB/s (809MB/s)(45.2GiB/60002msec)
>    read: IOPS=198k, BW=774MiB/s (811MB/s)(45.3GiB/60002msec)
>    read: IOPS=197k, BW=771MiB/s (808MB/s)(45.2GiB/60002msec)
> 
> With this patch and buffered I/O = 1, use_vfs=1:
>    read: IOPS=979k, BW=3825MiB/s (4010MB/s)(224GiB/60002msec)
>    read: IOPS=983k, BW=3841MiB/s (4028MB/s)(225GiB/60003msec)
>    read: IOPS=980k, BW=3828MiB/s (4014MB/s)(224GiB/60002msec)

How come there is such a massive change for a null block device
that is effectively a short-circuit? What is the bottleneck with
null_blk that this patch overcomes with the page cache?

_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] nvmet: allow block device to use buffered I/O
  2019-10-31 14:45 ` Christoph Hellwig
@ 2019-11-01  6:35   ` Chaitanya Kulkarni
  0 siblings, 0 replies; 6+ messages in thread
From: Chaitanya Kulkarni @ 2019-11-01  6:35 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: kbusch, sagi, linux-nvme

On 10/31/2019 07:45 AM, Christoph Hellwig wrote:
> IFF we want to allow selecting the backend it should be a configfs
> file with the backend name, use_vfs is not very descriptive.  But I'm
> still not sure it is an all that useful feature.
>

Right now we don't have any caching support on the target side for
the block device backend. With this patch we can at least have
minimum caching support. Although, with this we are depending on the
page cache for performance, but it is still better than having
nothing.

For applications with datasets which can fit in the Target DRAM
and where write once and read many pattern is common this feature
is very useful.

With the above suggestion how about something like this:-

 From 2e640a1a34875fa8920794afc55181b636ce0d00 Mon Sep 17 00:00:00 2001
From: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Date: Tue, 29 Oct 2019 21:05:03 -0700
Subject: [PATCH] nvmet: allow block device to use buffered I/O
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

NVMeOF target already has a support to execute requests in the buffered
I/O mode with file backend. This patch allows block devices to be used
with file backend code so that buffered I/O parameter can be set for a
block device backed namespace when newly introduced per namespace
optional configfs parameter "backend" is set to string value "file".

Please note that this parameter is only in effect when device_path is
configured as a block device.

Following is the performance improvement :-

Device_path = /dev/nullb0, I/O type = randread.

With this patch and buffered I/O = 0, backend=block:
   read: IOPS=198k, BW=772MiB/s (809MB/s)(45.2GiB/60002msec)
   read: IOPS=198k, BW=774MiB/s (811MB/s)(45.3GiB/60002msec)
   read: IOPS=197k, BW=771MiB/s (808MB/s)(45.2GiB/60002msec)

With this patch and buffered I/O = 1, backend=file:
   read: IOPS=979k, BW=3825MiB/s (4010MB/s)(224GiB/60002msec)
   read: IOPS=983k, BW=3841MiB/s (4028MB/s)(225GiB/60003msec)
   read: IOPS=980k, BW=3828MiB/s (4014MB/s)(224GiB/60002msec)

<snip>
     HITS   MISSES  DIRTIES    RATIO   BUFFERS_MB   CACHE_MB
   795057    78070        0    91.1%         1026       7961
  1028017        4        0   100.0%         1026       7962
  1025259        0        0   100.0%         1026       7962
  1003651        0        0   100.0%         1026       7962
  1024775        0        0   100.0%         1026       7962
  1003080        0        0   100.0%         1026       7962
  1004128        0        0   100.0%         1026       7962
  1003831        0        0   100.0%         1026       7962
  1026133        0        0   100.0%         1026       7962
<snip>

Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
---
  drivers/nvme/target/configfs.c    | 34 ++++++++++++++++++++++++++++++++++
  drivers/nvme/target/core.c        |  1 +
  drivers/nvme/target/io-cmd-bdev.c |  2 +-
  drivers/nvme/target/io-cmd-file.c | 38 
+++++++++++++++++++++++++-------------
  drivers/nvme/target/nvmet.h       |  1 +
  5 files changed, 62 insertions(+), 14 deletions(-)

diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 98613a4..538a591 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -545,6 +545,39 @@ static ssize_t nvmet_ns_buffered_io_store(struct 
config_item *item,

  CONFIGFS_ATTR(nvmet_ns_, buffered_io);

+static ssize_t nvmet_ns_backend_show(struct config_item *item, char *page)
+{
+	struct nvmet_ns *ns = to_nvmet_ns(item);
+	int ret;
+
+	/* ns->backend value is valid only when we have valid ns->bdev set */
+	if (ns->bdev)
+		ret = sprintf(page, "%s\n", ns->backend ? "file" : "block");
+	else
+		ret = sprintf(page, "ns is either not enabled or not bdev.\n");
+
+	return ret;
+}
+
+static ssize_t nvmet_ns_backend_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct nvmet_ns *ns = to_nvmet_ns(item);
+
+	mutex_lock(&ns->subsys->lock);
+	if (ns->enabled) {
+		pr_err("disable ns before setting backend value.\n");
+		mutex_unlock(&ns->subsys->lock);
+		return -EINVAL;
+	}
+
+	ns->backend = strncasecmp(page, "file", 4) == 0 ? true : false;
+	mutex_unlock(&ns->subsys->lock);
+	return count;
+}
+
+CONFIGFS_ATTR(nvmet_ns_, backend);
+
  static struct configfs_attribute *nvmet_ns_attrs[] = {
  	&nvmet_ns_attr_device_path,
  	&nvmet_ns_attr_device_nguid,
@@ -552,6 +585,7 @@ static ssize_t nvmet_ns_buffered_io_store(struct 
config_item *item,
  	&nvmet_ns_attr_ana_grpid,
  	&nvmet_ns_attr_enable,
  	&nvmet_ns_attr_buffered_io,
+	&nvmet_ns_attr_backend,
  #ifdef CONFIG_PCI_P2PDMA
  	&nvmet_ns_attr_p2pmem,
  #endif
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 28438b8..fb9676d 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -653,6 +653,7 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys 
*subsys, u32 nsid)

  	uuid_gen(&ns->uuid);
  	ns->buffered_io = false;
+	ns->backend = false;

  	return ns;
  }
diff --git a/drivers/nvme/target/io-cmd-bdev.c 
b/drivers/nvme/target/io-cmd-bdev.c
index b6fca0e..0179675 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -64,7 +64,7 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
  	}
  	ns->size = i_size_read(ns->bdev->bd_inode);
  	ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
-	return 0;
+	return ns->backend ? -ENOTBLK : 0;
  }

  void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
diff --git a/drivers/nvme/target/io-cmd-file.c 
b/drivers/nvme/target/io-cmd-file.c
index caebfce..ec258a4 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -25,6 +25,10 @@ void nvmet_file_ns_disable(struct nvmet_ns *ns)
  		fput(ns->file);
  		ns->file = NULL;
  	}
+
+	/* when using vfs layer we have opend bdev see target.core.c */
+	if (ns->backend)
+		nvmet_bdev_ns_disable(ns);
  }

  int nvmet_file_ns_enable(struct nvmet_ns *ns)
@@ -33,6 +37,11 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns)
  	struct kstat stat;
  	int ret;

+	if (!ns->bdev && ns->backend) {
+		pr_info("backend value only applies to block device\n");
+		return -EINVAL;
+	}
+
  	if (!ns->buffered_io)
  		flags |= O_DIRECT;

@@ -43,19 +52,22 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns)
  		return PTR_ERR(ns->file);
  	}

-	ret = vfs_getattr(&ns->file->f_path,
-			&stat, STATX_SIZE, AT_STATX_FORCE_SYNC);
-	if (ret)
-		goto err;
-
-	ns->size = stat.size;
-	/*
-	 * i_blkbits can be greater than the universally accepted upper bound,
-	 * so make sure we export a sane namespace lba_shift.
-	 */
-	ns->blksize_shift = min_t(u8,
-			file_inode(ns->file)->i_blkbits, 12);
-
+	if (!ns->backend) {
+		ret = vfs_getattr(&ns->file->f_path,
+				  &stat, STATX_SIZE, AT_STATX_FORCE_SYNC);
+		if (ret) {
+			pr_err("failed to stat device file %s\n",
+					ns->device_path);
+			goto err;
+		}
+		/*
+		 * i_blkbits can be greater than the universally accepted upper
+		 * bound, so make sure we export a sane namespace lba_shift.
+		 */
+		ns->size = stat.size;
+		ns->blksize_shift = min_t(u8,
+				file_inode(ns->file)->i_blkbits, 12);
+	}
  	ns->bvec_cache = kmem_cache_create("nvmet-bvec",
  			NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec),
  			0, SLAB_HWCACHE_ALIGN, NULL);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 46df45e..b7f7b97 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -63,6 +63,7 @@ struct nvmet_ns {
  	u32			anagrpid;

  	bool			buffered_io;
+	bool			backend;
  	bool			enabled;
  	struct nvmet_subsys	*subsys;
  	const char		*device_path;
-- 
1.8.3.1






_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply related	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2019-11-01  6:35 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-10-30  4:26 [PATCH] nvmet: allow block device to use buffered I/O Chaitanya Kulkarni
2019-10-30  6:36 ` Keith Busch
2019-10-31 14:42   ` Christoph Hellwig
2019-10-31 14:45 ` Christoph Hellwig
2019-11-01  6:35   ` Chaitanya Kulkarni
2019-10-31 16:30 ` Sagi Grimberg

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).