All of lore.kernel.org
 help / color / mirror / Atom feed
From: Dan Williams <dan.j.williams@intel.com>
To: linux-nvdimm@lists.01.org
Cc: Jan Kara <jack@suse.cz>, Matthew Wilcox <mawilcox@microsoft.com>,
	x86@kernel.org, linux-kernel@vger.kernel.org,
	Ingo Molnar <mingo@redhat.com>, Al Viro <viro@zeniv.linux.org.uk>,
	"H. Peter Anvin" <hpa@zytor.com>,
	linux-fsdevel@vger.kernel.org,
	Thomas Gleixner <tglx@linutronix.de>,
	Christoph Hellwig <hch@lst.de>
Subject: [PATCH 03/13] x86, dax, pmem: introduce 'copy_from_iter' dax operation
Date: Thu, 19 Jan 2017 19:50:29 -0800	[thread overview]
Message-ID: <148488422955.37913.7723740119156814265.stgit@dwillia2-desk3.amr.corp.intel.com> (raw)
In-Reply-To: <148488421301.37913.12835362165895864897.stgit@dwillia2-desk3.amr.corp.intel.com>

The direct-I/O write path for a pmem device must ensure that data is flushed
to a power-fail safe zone when the operation is complete. However, other
dax capable block devices, like brd, do not have this requirement.
Introduce a 'copy_from_iter' dax operation so that pmem can inject
cache management without imposing this overhead on other dax capable
block_device drivers.

Cc: <x86@kernel.org>
Cc: Jan Kara <jack@suse.cz>
Cc: Jeff Moyer <jmoyer@redhat.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Matthew Wilcox <mawilcox@microsoft.com>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
 arch/x86/include/asm/pmem.h |   31 -------------------------------
 drivers/nvdimm/pmem.c       |   10 ++++++++++
 fs/dax.c                    |   11 ++++++++++-
 include/linux/blkdev.h      |    1 +
 include/linux/pmem.h        |   24 ------------------------
 5 files changed, 21 insertions(+), 56 deletions(-)

diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
index f26ba430d853..0ca5e693f4a2 100644
--- a/arch/x86/include/asm/pmem.h
+++ b/arch/x86/include/asm/pmem.h
@@ -64,37 +64,6 @@ static inline void arch_wb_cache_pmem(void *addr, size_t size)
 		clwb(p);
 }
 
-/*
- * copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
- * iterators, so for other types (bvec & kvec) we must do a cache write-back.
- */
-static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
-{
-	return iter_is_iovec(i) == false;
-}
-
-/**
- * arch_copy_from_iter_pmem - copy data from an iterator to PMEM
- * @addr:	PMEM destination address
- * @bytes:	number of bytes to copy
- * @i:		iterator with source data
- *
- * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
- */
-static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
-		struct iov_iter *i)
-{
-	size_t len;
-
-	/* TODO: skip the write-back by always using non-temporal stores */
-	len = copy_from_iter_nocache(addr, bytes, i);
-
-	if (__iter_needs_pmem_wb(i))
-		arch_wb_cache_pmem(addr, bytes);
-
-	return len;
-}
-
 /**
  * arch_clear_pmem - zero a PMEM memory range
  * @addr:	virtual start address
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 6e5442174245..71e5e365d3fc 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -217,8 +217,18 @@ __weak long pmem_direct_access(struct block_device *bdev, sector_t sector,
 	return pmem->size - pmem->pfn_pad - offset;
 }
 
+static size_t pmem_copy_from_iter(void *addr, size_t bytes,
+		struct iov_iter *i)
+{
+	size_t rc = copy_from_iter_nocache(addr, bytes, i);
+
+	wb_cache_pmem(addr, bytes);
+	return rc;
+}
+
 static const struct dax_operations pmem_dax_ops = {
 	.direct_access = pmem_direct_access,
+	.copy_from_iter = pmem_copy_from_iter,
 };
 
 static const struct block_device_operations pmem_fops = {
diff --git a/fs/dax.c b/fs/dax.c
index 81a77c070344..22cd57424a55 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1006,6 +1006,10 @@ static loff_t
 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 		struct iomap *iomap)
 {
+	struct block_device *bdev = iomap->bdev;
+	size_t (*dax_copy_from_iter)(void *, size_t, struct iov_iter *);
+	const struct block_device_operations *ops = bdev->bd_disk->fops;
+	const struct dax_operations *dax_ops = ops->dax_ops;
 	struct iov_iter *iter = data;
 	loff_t end = pos + length, done = 0;
 	ssize_t ret = 0;
@@ -1033,6 +1037,11 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 					      (end - 1) >> PAGE_SHIFT);
 	}
 
+	if (dax_ops->copy_from_iter)
+		dax_copy_from_iter = dax_ops->copy_from_iter;
+	else
+		dax_copy_from_iter = copy_from_iter_nocache;
+
 	while (pos < end) {
 		unsigned offset = pos & (PAGE_SIZE - 1);
 		struct blk_dax_ctl dax = { 0 };
@@ -1052,7 +1061,7 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 			map_len = end - pos;
 
 		if (iov_iter_rw(iter) == WRITE)
-			map_len = copy_from_iter_pmem(dax.addr, map_len, iter);
+			map_len = dax_copy_from_iter(dax.addr, map_len, iter);
 		else
 			map_len = copy_to_iter(dax.addr, map_len, iter);
 		dax_unmap_atomic(iomap->bdev, &dax);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 8afce34823f5..7ca559d124a3 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1866,6 +1866,7 @@ struct blk_dax_ctl {
 struct dax_operations {
 	long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *,
 			long);
+	size_t (*copy_from_iter)(void *, size_t, struct iov_iter *);
 };
 
 struct block_device_operations {
diff --git a/include/linux/pmem.h b/include/linux/pmem.h
index 71ecf3d46aac..9d542a5600e4 100644
--- a/include/linux/pmem.h
+++ b/include/linux/pmem.h
@@ -31,13 +31,6 @@ static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n)
 	BUG();
 }
 
-static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
-		struct iov_iter *i)
-{
-	BUG();
-	return 0;
-}
-
 static inline void arch_clear_pmem(void *addr, size_t size)
 {
 	BUG();
@@ -80,23 +73,6 @@ static inline void memcpy_to_pmem(void *dst, const void *src, size_t n)
 }
 
 /**
- * copy_from_iter_pmem - copy data from an iterator to PMEM
- * @addr:	PMEM destination address
- * @bytes:	number of bytes to copy
- * @i:		iterator with source data
- *
- * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
- * See blkdev_issue_flush() note for memcpy_to_pmem().
- */
-static inline size_t copy_from_iter_pmem(void *addr, size_t bytes,
-		struct iov_iter *i)
-{
-	if (arch_has_pmem_api())
-		return arch_copy_from_iter_pmem(addr, bytes, i);
-	return copy_from_iter_nocache(addr, bytes, i);
-}
-
-/**
  * clear_pmem - zero a PMEM memory range
  * @addr:	virtual start address
  * @size:	number of bytes to zero

_______________________________________________
Linux-nvdimm mailing list
Linux-nvdimm@lists.01.org
https://lists.01.org/mailman/listinfo/linux-nvdimm

WARNING: multiple messages have this Message-ID (diff)
From: Dan Williams <dan.j.williams@intel.com>
To: linux-nvdimm@ml01.01.org
Cc: Jan Kara <jack@suse.cz>, Matthew Wilcox <mawilcox@microsoft.com>,
	x86@kernel.org, linux-kernel@vger.kernel.org,
	Christoph Hellwig <hch@lst.de>, Jeff Moyer <jmoyer@redhat.com>,
	Ingo Molnar <mingo@redhat.com>, Al Viro <viro@zeniv.linux.org.uk>,
	"H. Peter Anvin" <hpa@zytor.com>,
	linux-fsdevel@vger.kernel.org,
	Thomas Gleixner <tglx@linutronix.de>,
	Ross Zwisler <ross.zwisler@linux.intel.com>
Subject: [PATCH 03/13] x86, dax, pmem: introduce 'copy_from_iter' dax operation
Date: Thu, 19 Jan 2017 19:50:29 -0800	[thread overview]
Message-ID: <148488422955.37913.7723740119156814265.stgit@dwillia2-desk3.amr.corp.intel.com> (raw)
In-Reply-To: <148488421301.37913.12835362165895864897.stgit@dwillia2-desk3.amr.corp.intel.com>

The direct-I/O write path for a pmem device must ensure that data is flushed
to a power-fail safe zone when the operation is complete. However, other
dax capable block devices, like brd, do not have this requirement.
Introduce a 'copy_from_iter' dax operation so that pmem can inject
cache management without imposing this overhead on other dax capable
block_device drivers.

Cc: <x86@kernel.org>
Cc: Jan Kara <jack@suse.cz>
Cc: Jeff Moyer <jmoyer@redhat.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Matthew Wilcox <mawilcox@microsoft.com>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
 arch/x86/include/asm/pmem.h |   31 -------------------------------
 drivers/nvdimm/pmem.c       |   10 ++++++++++
 fs/dax.c                    |   11 ++++++++++-
 include/linux/blkdev.h      |    1 +
 include/linux/pmem.h        |   24 ------------------------
 5 files changed, 21 insertions(+), 56 deletions(-)

diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
index f26ba430d853..0ca5e693f4a2 100644
--- a/arch/x86/include/asm/pmem.h
+++ b/arch/x86/include/asm/pmem.h
@@ -64,37 +64,6 @@ static inline void arch_wb_cache_pmem(void *addr, size_t size)
 		clwb(p);
 }
 
-/*
- * copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
- * iterators, so for other types (bvec & kvec) we must do a cache write-back.
- */
-static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
-{
-	return iter_is_iovec(i) == false;
-}
-
-/**
- * arch_copy_from_iter_pmem - copy data from an iterator to PMEM
- * @addr:	PMEM destination address
- * @bytes:	number of bytes to copy
- * @i:		iterator with source data
- *
- * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
- */
-static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
-		struct iov_iter *i)
-{
-	size_t len;
-
-	/* TODO: skip the write-back by always using non-temporal stores */
-	len = copy_from_iter_nocache(addr, bytes, i);
-
-	if (__iter_needs_pmem_wb(i))
-		arch_wb_cache_pmem(addr, bytes);
-
-	return len;
-}
-
 /**
  * arch_clear_pmem - zero a PMEM memory range
  * @addr:	virtual start address
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 6e5442174245..71e5e365d3fc 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -217,8 +217,18 @@ __weak long pmem_direct_access(struct block_device *bdev, sector_t sector,
 	return pmem->size - pmem->pfn_pad - offset;
 }
 
+static size_t pmem_copy_from_iter(void *addr, size_t bytes,
+		struct iov_iter *i)
+{
+	size_t rc = copy_from_iter_nocache(addr, bytes, i);
+
+	wb_cache_pmem(addr, bytes);
+	return rc;
+}
+
 static const struct dax_operations pmem_dax_ops = {
 	.direct_access = pmem_direct_access,
+	.copy_from_iter = pmem_copy_from_iter,
 };
 
 static const struct block_device_operations pmem_fops = {
diff --git a/fs/dax.c b/fs/dax.c
index 81a77c070344..22cd57424a55 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1006,6 +1006,10 @@ static loff_t
 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 		struct iomap *iomap)
 {
+	struct block_device *bdev = iomap->bdev;
+	size_t (*dax_copy_from_iter)(void *, size_t, struct iov_iter *);
+	const struct block_device_operations *ops = bdev->bd_disk->fops;
+	const struct dax_operations *dax_ops = ops->dax_ops;
 	struct iov_iter *iter = data;
 	loff_t end = pos + length, done = 0;
 	ssize_t ret = 0;
@@ -1033,6 +1037,11 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 					      (end - 1) >> PAGE_SHIFT);
 	}
 
+	if (dax_ops->copy_from_iter)
+		dax_copy_from_iter = dax_ops->copy_from_iter;
+	else
+		dax_copy_from_iter = copy_from_iter_nocache;
+
 	while (pos < end) {
 		unsigned offset = pos & (PAGE_SIZE - 1);
 		struct blk_dax_ctl dax = { 0 };
@@ -1052,7 +1061,7 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 			map_len = end - pos;
 
 		if (iov_iter_rw(iter) == WRITE)
-			map_len = copy_from_iter_pmem(dax.addr, map_len, iter);
+			map_len = dax_copy_from_iter(dax.addr, map_len, iter);
 		else
 			map_len = copy_to_iter(dax.addr, map_len, iter);
 		dax_unmap_atomic(iomap->bdev, &dax);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 8afce34823f5..7ca559d124a3 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1866,6 +1866,7 @@ struct blk_dax_ctl {
 struct dax_operations {
 	long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *,
 			long);
+	size_t (*copy_from_iter)(void *, size_t, struct iov_iter *);
 };
 
 struct block_device_operations {
diff --git a/include/linux/pmem.h b/include/linux/pmem.h
index 71ecf3d46aac..9d542a5600e4 100644
--- a/include/linux/pmem.h
+++ b/include/linux/pmem.h
@@ -31,13 +31,6 @@ static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n)
 	BUG();
 }
 
-static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
-		struct iov_iter *i)
-{
-	BUG();
-	return 0;
-}
-
 static inline void arch_clear_pmem(void *addr, size_t size)
 {
 	BUG();
@@ -80,23 +73,6 @@ static inline void memcpy_to_pmem(void *dst, const void *src, size_t n)
 }
 
 /**
- * copy_from_iter_pmem - copy data from an iterator to PMEM
- * @addr:	PMEM destination address
- * @bytes:	number of bytes to copy
- * @i:		iterator with source data
- *
- * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
- * See blkdev_issue_flush() note for memcpy_to_pmem().
- */
-static inline size_t copy_from_iter_pmem(void *addr, size_t bytes,
-		struct iov_iter *i)
-{
-	if (arch_has_pmem_api())
-		return arch_copy_from_iter_pmem(addr, bytes, i);
-	return copy_from_iter_nocache(addr, bytes, i);
-}
-
-/**
  * clear_pmem - zero a PMEM memory range
  * @addr:	virtual start address
  * @size:	number of bytes to zero

WARNING: multiple messages have this Message-ID (diff)
From: Dan Williams <dan.j.williams@intel.com>
To: linux-nvdimm@lists.01.org
Cc: Jan Kara <jack@suse.cz>, Matthew Wilcox <mawilcox@microsoft.com>,
	x86@kernel.org, linux-kernel@vger.kernel.org,
	Christoph Hellwig <hch@lst.de>, Jeff Moyer <jmoyer@redhat.com>,
	Ingo Molnar <mingo@redhat.com>, Al Viro <viro@zeniv.linux.org.uk>,
	"H. Peter Anvin" <hpa@zytor.com>,
	linux-fsdevel@vger.kernel.org,
	Thomas Gleixner <tglx@linutronix.de>,
	Ross Zwisler <ross.zwisler@linux.intel.com>
Subject: [PATCH 03/13] x86, dax, pmem: introduce 'copy_from_iter' dax operation
Date: Thu, 19 Jan 2017 19:50:29 -0800	[thread overview]
Message-ID: <148488422955.37913.7723740119156814265.stgit@dwillia2-desk3.amr.corp.intel.com> (raw)
In-Reply-To: <148488421301.37913.12835362165895864897.stgit@dwillia2-desk3.amr.corp.intel.com>

The direct-I/O write path for a pmem device must ensure that data is flushed
to a power-fail safe zone when the operation is complete. However, other
dax capable block devices, like brd, do not have this requirement.
Introduce a 'copy_from_iter' dax operation so that pmem can inject
cache management without imposing this overhead on other dax capable
block_device drivers.

Cc: <x86@kernel.org>
Cc: Jan Kara <jack@suse.cz>
Cc: Jeff Moyer <jmoyer@redhat.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Matthew Wilcox <mawilcox@microsoft.com>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
 arch/x86/include/asm/pmem.h |   31 -------------------------------
 drivers/nvdimm/pmem.c       |   10 ++++++++++
 fs/dax.c                    |   11 ++++++++++-
 include/linux/blkdev.h      |    1 +
 include/linux/pmem.h        |   24 ------------------------
 5 files changed, 21 insertions(+), 56 deletions(-)

diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
index f26ba430d853..0ca5e693f4a2 100644
--- a/arch/x86/include/asm/pmem.h
+++ b/arch/x86/include/asm/pmem.h
@@ -64,37 +64,6 @@ static inline void arch_wb_cache_pmem(void *addr, size_t size)
 		clwb(p);
 }
 
-/*
- * copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
- * iterators, so for other types (bvec & kvec) we must do a cache write-back.
- */
-static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
-{
-	return iter_is_iovec(i) == false;
-}
-
-/**
- * arch_copy_from_iter_pmem - copy data from an iterator to PMEM
- * @addr:	PMEM destination address
- * @bytes:	number of bytes to copy
- * @i:		iterator with source data
- *
- * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
- */
-static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
-		struct iov_iter *i)
-{
-	size_t len;
-
-	/* TODO: skip the write-back by always using non-temporal stores */
-	len = copy_from_iter_nocache(addr, bytes, i);
-
-	if (__iter_needs_pmem_wb(i))
-		arch_wb_cache_pmem(addr, bytes);
-
-	return len;
-}
-
 /**
  * arch_clear_pmem - zero a PMEM memory range
  * @addr:	virtual start address
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 6e5442174245..71e5e365d3fc 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -217,8 +217,18 @@ __weak long pmem_direct_access(struct block_device *bdev, sector_t sector,
 	return pmem->size - pmem->pfn_pad - offset;
 }
 
+static size_t pmem_copy_from_iter(void *addr, size_t bytes,
+		struct iov_iter *i)
+{
+	size_t rc = copy_from_iter_nocache(addr, bytes, i);
+
+	wb_cache_pmem(addr, bytes);
+	return rc;
+}
+
 static const struct dax_operations pmem_dax_ops = {
 	.direct_access = pmem_direct_access,
+	.copy_from_iter = pmem_copy_from_iter,
 };
 
 static const struct block_device_operations pmem_fops = {
diff --git a/fs/dax.c b/fs/dax.c
index 81a77c070344..22cd57424a55 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1006,6 +1006,10 @@ static loff_t
 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 		struct iomap *iomap)
 {
+	struct block_device *bdev = iomap->bdev;
+	size_t (*dax_copy_from_iter)(void *, size_t, struct iov_iter *);
+	const struct block_device_operations *ops = bdev->bd_disk->fops;
+	const struct dax_operations *dax_ops = ops->dax_ops;
 	struct iov_iter *iter = data;
 	loff_t end = pos + length, done = 0;
 	ssize_t ret = 0;
@@ -1033,6 +1037,11 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 					      (end - 1) >> PAGE_SHIFT);
 	}
 
+	if (dax_ops->copy_from_iter)
+		dax_copy_from_iter = dax_ops->copy_from_iter;
+	else
+		dax_copy_from_iter = copy_from_iter_nocache;
+
 	while (pos < end) {
 		unsigned offset = pos & (PAGE_SIZE - 1);
 		struct blk_dax_ctl dax = { 0 };
@@ -1052,7 +1061,7 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 			map_len = end - pos;
 
 		if (iov_iter_rw(iter) == WRITE)
-			map_len = copy_from_iter_pmem(dax.addr, map_len, iter);
+			map_len = dax_copy_from_iter(dax.addr, map_len, iter);
 		else
 			map_len = copy_to_iter(dax.addr, map_len, iter);
 		dax_unmap_atomic(iomap->bdev, &dax);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 8afce34823f5..7ca559d124a3 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1866,6 +1866,7 @@ struct blk_dax_ctl {
 struct dax_operations {
 	long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *,
 			long);
+	size_t (*copy_from_iter)(void *, size_t, struct iov_iter *);
 };
 
 struct block_device_operations {
diff --git a/include/linux/pmem.h b/include/linux/pmem.h
index 71ecf3d46aac..9d542a5600e4 100644
--- a/include/linux/pmem.h
+++ b/include/linux/pmem.h
@@ -31,13 +31,6 @@ static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n)
 	BUG();
 }
 
-static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
-		struct iov_iter *i)
-{
-	BUG();
-	return 0;
-}
-
 static inline void arch_clear_pmem(void *addr, size_t size)
 {
 	BUG();
@@ -80,23 +73,6 @@ static inline void memcpy_to_pmem(void *dst, const void *src, size_t n)
 }
 
 /**
- * copy_from_iter_pmem - copy data from an iterator to PMEM
- * @addr:	PMEM destination address
- * @bytes:	number of bytes to copy
- * @i:		iterator with source data
- *
- * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
- * See blkdev_issue_flush() note for memcpy_to_pmem().
- */
-static inline size_t copy_from_iter_pmem(void *addr, size_t bytes,
-		struct iov_iter *i)
-{
-	if (arch_has_pmem_api())
-		return arch_copy_from_iter_pmem(addr, bytes, i);
-	return copy_from_iter_nocache(addr, bytes, i);
-}
-
-/**
  * clear_pmem - zero a PMEM memory range
  * @addr:	virtual start address
  * @size:	number of bytes to zero


  parent reply	other threads:[~2017-01-20  3:54 UTC|newest]

Thread overview: 126+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-01-20  3:50 [PATCH 00/13] dax, pmem: move cpu cache maintenance to libnvdimm Dan Williams
2017-01-20  3:50 ` Dan Williams
2017-01-20  3:50 ` Dan Williams
2017-01-20  3:50 ` Dan Williams
2017-01-20  3:50 ` [PATCH 01/13] x86, dax, pmem: remove indirection around memcpy_from_pmem() Dan Williams
2017-01-20  3:50   ` Dan Williams
2017-01-20  3:50   ` Dan Williams
2017-01-20  3:50 ` [PATCH 02/13] block, dax: introduce dax_operations Dan Williams
2017-01-20  3:50   ` Dan Williams
2017-01-20  3:50   ` Dan Williams
2017-01-20 17:28   ` Dan Williams
2017-01-20 17:28     ` Dan Williams
2017-01-20 17:28     ` Dan Williams
2017-01-20 17:28     ` Dan Williams
2017-01-20  3:50 ` Dan Williams [this message]
2017-01-20  3:50   ` [PATCH 03/13] x86, dax, pmem: introduce 'copy_from_iter' dax operation Dan Williams
2017-01-20  3:50   ` Dan Williams
2017-02-03  1:52   ` [lkp-robot] [x86, dax, pmem] 2e12109d1c: fio.write_bw_MBps -75% regression kernel test robot
2017-02-03  1:52     ` kernel test robot
2017-02-03  1:52     ` kernel test robot
2017-02-03  1:52     ` kernel test robot
2017-02-17  3:52   ` [PATCH 03/13] x86, dax, pmem: introduce 'copy_from_iter' dax operation Ross Zwisler
2017-02-17  3:52     ` Ross Zwisler
2017-02-17  3:52     ` Ross Zwisler
2017-02-17  3:56     ` Dan Williams
2017-02-17  3:56       ` Dan Williams
2017-02-17  3:56       ` Dan Williams
2017-01-20  3:50 ` [PATCH 04/13] dax, pmem: introduce an optional 'flush' " Dan Williams
2017-01-20  3:50   ` Dan Williams
2017-01-20  3:50   ` Dan Williams
2017-01-20  3:50 ` [PATCH 05/13] x86, dax: replace clear_pmem() with open coded memset + dax_ops->flush Dan Williams
2017-01-20  3:50   ` Dan Williams
2017-01-20  3:50   ` Dan Williams
2017-01-20 10:27   ` Jan Kara
2017-01-20 10:27     ` Jan Kara
2017-01-20 10:27     ` Jan Kara
2017-01-20 15:33     ` Dan Williams
2017-01-20 15:33       ` Dan Williams
2017-01-20 15:33       ` Dan Williams
2017-01-20  3:50 ` [PATCH 06/13] x86, dax, libnvdimm: move wb_cache_pmem() to libnvdimm Dan Williams
2017-01-20  3:50   ` Dan Williams
2017-01-20  3:50   ` Dan Williams
2017-01-20  3:50 ` [PATCH 07/13] x86, libnvdimm, pmem: move arch_invalidate_pmem() " Dan Williams
2017-01-20  3:50   ` Dan Williams
2017-01-20  3:50   ` Dan Williams
2017-01-20  3:50 ` [PATCH 08/13] x86, libnvdimm, dax: stop abusing __copy_user_nocache Dan Williams
2017-01-20  3:50   ` Dan Williams
2017-01-20  3:50   ` Dan Williams
2017-03-28 16:21   ` Ross Zwisler
2017-03-28 16:21     ` Ross Zwisler
2017-03-28 16:21     ` Ross Zwisler
2017-03-28 16:26     ` Dan Williams
2017-03-28 16:26       ` Dan Williams
2017-03-28 16:26       ` Dan Williams
2017-01-20  3:51 ` [PATCH 09/13] libnvdimm, pmem: implement cache bypass for all copy_from_iter() operations Dan Williams
2017-01-20  3:51   ` Dan Williams
2017-01-20  3:51   ` Dan Williams
2017-01-20  3:51 ` [PATCH 10/13] libnvdimm, pmem: fix persistence warning Dan Williams
2017-01-20  3:51   ` Dan Williams
2017-01-20  3:51   ` Dan Williams
2017-01-20  3:51 ` [PATCH 11/13] libnvdimm, nfit: enable support for volatile ranges Dan Williams
2017-01-20  3:51   ` Dan Williams
2017-01-20  3:51   ` Dan Williams
2017-01-20  3:51 ` [PATCH 12/13] libnvdimm, pmem: disable dax flushing when pmem is fronting a volatile region Dan Williams
2017-01-20  3:51   ` Dan Williams
2017-01-20  3:51   ` Dan Williams
2017-01-20  3:51 ` [PATCH 13/13] libnvdimm, pmem: disable dax flushing for 'cache flush on fail' platforms Dan Williams
2017-01-20  3:51   ` Dan Williams
2017-01-20  3:51   ` Dan Williams
     [not found] ` <148488421301.37913.12835362165895864897.stgit-p8uTFz9XbKj2zm6wflaqv1nYeNYlB/vhral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
2017-01-21 16:28   ` [PATCH 00/13] dax, pmem: move cpu cache maintenance to libnvdimm Matthew Wilcox
2017-01-21 16:28     ` Matthew Wilcox
2017-01-21 17:52     ` Christoph Hellwig
2017-01-21 17:52       ` Christoph Hellwig
2017-01-21 17:52       ` Christoph Hellwig
2017-01-21 17:52       ` Christoph Hellwig
     [not found]       ` <20170121175212.GA28180-jcswGhMUV9g@public.gmane.org>
2017-01-22 15:43         ` Matthew Wilcox
2017-01-22 15:43           ` Matthew Wilcox
2017-01-22 15:43           ` Matthew Wilcox
2017-01-22 16:29           ` Christoph Hellwig
2017-01-22 16:29             ` Christoph Hellwig
2017-01-22 16:29             ` Christoph Hellwig
2017-01-22 16:29             ` Christoph Hellwig
2017-01-22 18:19             ` Matthew Wilcox
2017-01-22 18:19               ` Matthew Wilcox
2017-01-22 18:30               ` Christoph Hellwig
2017-01-22 18:30                 ` Christoph Hellwig
2017-01-22 18:30                 ` Christoph Hellwig
2017-01-22 18:30                 ` Christoph Hellwig
     [not found]                 ` <20170122183046.GA7359-jcswGhMUV9g@public.gmane.org>
2017-01-22 18:39                   ` Matthew Wilcox
2017-01-22 18:39                     ` Matthew Wilcox
2017-01-22 18:39                     ` Matthew Wilcox
2017-01-22 18:44                     ` Christoph Hellwig
2017-01-22 18:44                       ` Christoph Hellwig
2017-01-22 18:44                       ` Christoph Hellwig
2017-01-22 18:44                       ` Christoph Hellwig
2017-01-23  6:37                       ` Matthew Wilcox
2017-01-23  6:37                         ` Matthew Wilcox
2017-01-23  7:10                         ` Dan Williams
2017-01-23  7:10                           ` Dan Williams
2017-01-23  7:10                           ` Dan Williams
2017-01-23  7:10                           ` Dan Williams
2017-01-23 16:00                           ` Christoph Hellwig
2017-01-23 16:00                             ` Christoph Hellwig
2017-01-23 16:00                             ` Christoph Hellwig
2017-01-23 17:14                             ` Dan Williams
2017-01-23 17:14                               ` Dan Williams
2017-01-23 17:14                               ` Dan Williams
2017-01-23 18:03                               ` Christoph Hellwig
2017-01-23 18:03                                 ` Christoph Hellwig
2017-01-23 18:03                                 ` Christoph Hellwig
2017-01-23 18:03                                 ` Christoph Hellwig
2017-01-23 18:31                                 ` Dan Williams
2017-01-23 18:31                                   ` Dan Williams
2017-01-23 18:31                                   ` Dan Williams
2017-01-23 18:31                                   ` Dan Williams
2017-01-23 15:58                         ` Christoph Hellwig
2017-01-23 15:58                           ` Christoph Hellwig
2017-01-23 15:58                           ` Christoph Hellwig
2017-01-22 17:30       ` Dan Williams
2017-01-22 17:30         ` Dan Williams
2017-01-22 17:30         ` Dan Williams
2017-01-22 17:30         ` Dan Williams
2017-01-23 16:01         ` Christoph Hellwig
2017-01-23 16:01           ` Christoph Hellwig
2017-01-23 16:01           ` Christoph Hellwig
2017-01-23 16:01           ` Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=148488422955.37913.7723740119156814265.stgit@dwillia2-desk3.amr.corp.intel.com \
    --to=dan.j.williams@intel.com \
    --cc=hch@lst.de \
    --cc=hpa@zytor.com \
    --cc=jack@suse.cz \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-nvdimm@lists.01.org \
    --cc=mawilcox@microsoft.com \
    --cc=mingo@redhat.com \
    --cc=tglx@linutronix.de \
    --cc=viro@zeniv.linux.org.uk \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.