linux-nvme.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] nvmet: allow block device to use buffered I/O
@ 2019-10-30  4:26 Chaitanya Kulkarni
  2019-10-30  6:36 ` Keith Busch
                   ` (2 more replies)
  0 siblings, 3 replies; 6+ messages in thread
From: Chaitanya Kulkarni @ 2019-10-30  4:26 UTC (permalink / raw)
  To: linux-nvme; +Cc: kbusch, hch, Chaitanya Kulkarni, sagi

NVMeOF target already has a support to execute requests in the
buffered I/O mode with file backend. This patch allows block devices
to be used with file backend code so that buffered I/O parameter can
be set for a block device backed namespace when newly introduce
configfs parameter use_vfs=1.

Following is the performance improvement :-

Device_path = /dev/nullb0, I/O type = randread.

With this patch and buffered I/O = 0, use_vfs=0 :
  read: IOPS=198k, BW=772MiB/s (809MB/s)(45.2GiB/60002msec)
  read: IOPS=198k, BW=774MiB/s (811MB/s)(45.3GiB/60002msec)
  read: IOPS=197k, BW=771MiB/s (808MB/s)(45.2GiB/60002msec)

With this patch and buffered I/O = 1, use_vfs=1:
  read: IOPS=979k, BW=3825MiB/s (4010MB/s)(224GiB/60002msec)
  read: IOPS=983k, BW=3841MiB/s (4028MB/s)(225GiB/60003msec)
  read: IOPS=980k, BW=3828MiB/s (4014MB/s)(224GiB/60002msec)

Cachestate shows the following output for above case:-
<snip>
    HITS   MISSES  DIRTIES    RATIO   BUFFERS_MB   CACHE_MB
  795057    78070        0    91.1%         1026       7961
 1028017        4        0   100.0%         1026       7962
 1025259        0        0   100.0%         1026       7962
 1003651        0        0   100.0%         1026       7962
 1024775        0        0   100.0%         1026       7962
 1003080        0        0   100.0%         1026       7962
 1004128        0        0   100.0%         1026       7962
 1003831        0        0   100.0%         1026       7962
 1026133        0        0   100.0%         1026       7962
<snip>

Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
---
Hi,

Pleae note that patch on the similar concept was posted to resolve
different issue. Even though this patch is based on similar concept
it has completely different purpose and code changes.

Regards,
Chaitanya
---
 drivers/nvme/target/configfs.c    | 29 +++++++++++++++++++++++++++++
 drivers/nvme/target/core.c        |  1 +
 drivers/nvme/target/io-cmd-bdev.c |  2 +-
 drivers/nvme/target/io-cmd-file.c | 33 ++++++++++++++++++++-------------
 drivers/nvme/target/nvmet.h       |  1 +
 5 files changed, 52 insertions(+), 14 deletions(-)

diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 98613a4..1ca727b 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -545,6 +545,34 @@ static ssize_t nvmet_ns_buffered_io_store(struct config_item *item,
 
 CONFIGFS_ATTR(nvmet_ns_, buffered_io);
 
+static ssize_t nvmet_ns_use_vfs_show(struct config_item *item, char *page)
+{
+	return sprintf(page, "%d\n", to_nvmet_ns(item)->use_vfs);
+}
+
+static ssize_t nvmet_ns_use_vfs_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct nvmet_ns *ns = to_nvmet_ns(item);
+	bool val;
+
+	if (strtobool(page, &val))
+		return -EINVAL;
+
+	mutex_lock(&ns->subsys->lock);
+	if (ns->enabled) {
+		pr_err("disable ns before setting buffered_io value.\n");
+		mutex_unlock(&ns->subsys->lock);
+		return -EINVAL;
+	}
+
+	ns->use_vfs = val;
+	mutex_unlock(&ns->subsys->lock);
+	return count;
+}
+
+CONFIGFS_ATTR(nvmet_ns_, use_vfs);
+
 static struct configfs_attribute *nvmet_ns_attrs[] = {
 	&nvmet_ns_attr_device_path,
 	&nvmet_ns_attr_device_nguid,
@@ -552,6 +580,7 @@ static ssize_t nvmet_ns_buffered_io_store(struct config_item *item,
 	&nvmet_ns_attr_ana_grpid,
 	&nvmet_ns_attr_enable,
 	&nvmet_ns_attr_buffered_io,
+	&nvmet_ns_attr_use_vfs,
 #ifdef CONFIG_PCI_P2PDMA
 	&nvmet_ns_attr_p2pmem,
 #endif
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 28438b8..083e8846 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -653,6 +653,7 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
 
 	uuid_gen(&ns->uuid);
 	ns->buffered_io = false;
+	ns->use_vfs = false;
 
 	return ns;
 }
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index b6fca0e..ec807e1 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -64,7 +64,7 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
 	}
 	ns->size = i_size_read(ns->bdev->bd_inode);
 	ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
-	return 0;
+	return ns->use_vfs ? -ENOTBLK : 0;
 }
 
 void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index caebfce..5f22784 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -25,6 +25,10 @@ void nvmet_file_ns_disable(struct nvmet_ns *ns)
 		fput(ns->file);
 		ns->file = NULL;
 	}
+
+	/* when using vfs layer we have opend bdev see target.core.c */
+	if (ns->use_vfs)
+		nvmet_bdev_ns_disable(ns);
 }
 
 int nvmet_file_ns_enable(struct nvmet_ns *ns)
@@ -43,19 +47,22 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns)
 		return PTR_ERR(ns->file);
 	}
 
-	ret = vfs_getattr(&ns->file->f_path,
-			&stat, STATX_SIZE, AT_STATX_FORCE_SYNC);
-	if (ret)
-		goto err;
-
-	ns->size = stat.size;
-	/*
-	 * i_blkbits can be greater than the universally accepted upper bound,
-	 * so make sure we export a sane namespace lba_shift.
-	 */
-	ns->blksize_shift = min_t(u8,
-			file_inode(ns->file)->i_blkbits, 12);
-
+	if (!ns->use_vfs) {
+		ret = vfs_getattr(&ns->file->f_path,
+				  &stat, STATX_SIZE, AT_STATX_FORCE_SYNC);
+		if (ret) {
+			pr_err("failed to stat device file %s\n",
+					ns->device_path);
+			goto err;
+		}
+		/*
+		 * i_blkbits can be greater than the universally accepted upper
+		 * bound, so make sure we export a sane namespace lba_shift.
+		 */
+		ns->size = stat.size;
+		ns->blksize_shift = min_t(u8,
+				file_inode(ns->file)->i_blkbits, 12);
+	}
 	ns->bvec_cache = kmem_cache_create("nvmet-bvec",
 			NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec),
 			0, SLAB_HWCACHE_ALIGN, NULL);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 46df45e..0f68b82 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -63,6 +63,7 @@ struct nvmet_ns {
 	u32			anagrpid;
 
 	bool			buffered_io;
+	bool			use_vfs;
 	bool			enabled;
 	struct nvmet_subsys	*subsys;
 	const char		*device_path;
-- 
1.8.3.1


_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme

^ permalink raw reply related	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2019-11-01  6:35 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-10-30  4:26 [PATCH] nvmet: allow block device to use buffered I/O Chaitanya Kulkarni
2019-10-30  6:36 ` Keith Busch
2019-10-31 14:42   ` Christoph Hellwig
2019-10-31 14:45 ` Christoph Hellwig
2019-11-01  6:35   ` Chaitanya Kulkarni
2019-10-31 16:30 ` Sagi Grimberg

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).