All of lore.kernel.org
 help / color / mirror / Atom feed
From: Dan Williams <dan.j.williams@intel.com>
To: linux-nvdimm@lists.01.org
Cc: david@fromorbit.com, linux-kernel@vger.kernel.org, hch@lst.de
Subject: [PATCH 10/13] pmem: kill wmb_pmem()
Date: Sat, 04 Jun 2016 13:53:25 -0700	[thread overview]
Message-ID: <146507360564.8347.14991384832117517627.stgit@dwillia2-desk3.amr.corp.intel.com> (raw)
In-Reply-To: <146507355220.8347.12117020810872172684.stgit@dwillia2-desk3.amr.corp.intel.com>

All users have been replaced with flushing in the pmem driver.

Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
 arch/x86/include/asm/pmem.h |   36 ++-------------------------------
 include/linux/pmem.h        |   47 ++++---------------------------------------
 2 files changed, 6 insertions(+), 77 deletions(-)

diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
index fbc5e92e1ecc..a8cf2a6b14d9 100644
--- a/arch/x86/include/asm/pmem.h
+++ b/arch/x86/include/asm/pmem.h
@@ -26,8 +26,7 @@
  * @n: length of the copy in bytes
  *
  * Copy data to persistent memory media via non-temporal stores so that
- * a subsequent arch_wmb_pmem() can flush cpu and memory controller
- * write buffers to guarantee durability.
+ * a subsequent pmem driver flush operation will drain posted write queues.
  */
 static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
 		size_t n)
@@ -57,32 +56,12 @@ static inline int arch_memcpy_from_pmem(void *dst, const void __pmem *src,
 }
 
 /**
- * arch_wmb_pmem - synchronize writes to persistent memory
- *
- * After a series of arch_memcpy_to_pmem() operations this drains data
- * from cpu write buffers and any platform (memory controller) buffers
- * to ensure that written data is durable on persistent memory media.
- */
-static inline void arch_wmb_pmem(void)
-{
-	/*
-	 * wmb() to 'sfence' all previous writes such that they are
-	 * architecturally visible to 'pcommit'.  Note, that we've
-	 * already arranged for pmem writes to avoid the cache via
-	 * arch_memcpy_to_pmem().
-	 */
-	wmb();
-	pcommit_sfence();
-}
-
-/**
  * arch_wb_cache_pmem - write back a cache range with CLWB
  * @vaddr:	virtual start address
  * @size:	number of bytes to write back
  *
  * Write back a cache range using the CLWB (cache line write back)
- * instruction.  This function requires explicit ordering with an
- * arch_wmb_pmem() call.
+ * instruction.
  */
 static inline void arch_wb_cache_pmem(void __pmem *addr, size_t size)
 {
@@ -113,7 +92,6 @@ static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
  * @i:		iterator with source data
  *
  * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
- * This function requires explicit ordering with an arch_wmb_pmem() call.
  */
 static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
 		struct iov_iter *i)
@@ -136,7 +114,6 @@ static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
  * @size:	number of bytes to zero
  *
  * Write zeros into the memory range starting at 'addr' for 'size' bytes.
- * This function requires explicit ordering with an arch_wmb_pmem() call.
  */
 static inline void arch_clear_pmem(void __pmem *addr, size_t size)
 {
@@ -150,14 +127,5 @@ static inline void arch_invalidate_pmem(void __pmem *addr, size_t size)
 {
 	clflush_cache_range((void __force *) addr, size);
 }
-
-static inline bool __arch_has_wmb_pmem(void)
-{
-	/*
-	 * We require that wmb() be an 'sfence', that is only guaranteed on
-	 * 64-bit builds
-	 */
-	return static_cpu_has(X86_FEATURE_PCOMMIT);
-}
 #endif /* CONFIG_ARCH_HAS_PMEM_API */
 #endif /* __ASM_X86_PMEM_H__ */
diff --git a/include/linux/pmem.h b/include/linux/pmem.h
index 57d146fe44dd..9e3ea94b8157 100644
--- a/include/linux/pmem.h
+++ b/include/linux/pmem.h
@@ -26,16 +26,6 @@
  * calling these symbols with arch_has_pmem_api() and redirect to the
  * implementation in asm/pmem.h.
  */
-static inline bool __arch_has_wmb_pmem(void)
-{
-	return false;
-}
-
-static inline void arch_wmb_pmem(void)
-{
-	BUG();
-}
-
 static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
 		size_t n)
 {
@@ -101,20 +91,6 @@ static inline int memcpy_from_pmem(void *dst, void __pmem const *src,
 		return default_memcpy_from_pmem(dst, src, size);
 }
 
-/**
- * arch_has_wmb_pmem - true if wmb_pmem() ensures durability
- *
- * For a given cpu implementation within an architecture it is possible
- * that wmb_pmem() resolves to a nop.  In the case this returns
- * false, pmem api users are unable to ensure durability and may want to
- * fall back to a different data consistency model, or otherwise notify
- * the user.
- */
-static inline bool arch_has_wmb_pmem(void)
-{
-	return arch_has_pmem_api() && __arch_has_wmb_pmem();
-}
-
 /*
  * These defaults seek to offer decent performance and minimize the
  * window between i/o completion and writes being durable on media.
@@ -152,7 +128,7 @@ static inline void default_clear_pmem(void __pmem *addr, size_t size)
  * being effectively evicted from, or never written to, the processor
  * cache hierarchy after the copy completes.  After memcpy_to_pmem()
  * data may still reside in cpu or platform buffers, so this operation
- * must be followed by a wmb_pmem().
+ * must be followed by a blkdev_issue_flush() on the pmem block device.
  */
 static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n)
 {
@@ -163,28 +139,13 @@ static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n)
 }
 
 /**
- * wmb_pmem - synchronize writes to persistent memory
- *
- * After a series of memcpy_to_pmem() operations this drains data from
- * cpu write buffers and any platform (memory controller) buffers to
- * ensure that written data is durable on persistent memory media.
- */
-static inline void wmb_pmem(void)
-{
-	if (arch_has_wmb_pmem())
-		arch_wmb_pmem();
-	else
-		wmb();
-}
-
-/**
  * copy_from_iter_pmem - copy data from an iterator to PMEM
  * @addr:	PMEM destination address
  * @bytes:	number of bytes to copy
  * @i:		iterator with source data
  *
  * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
- * This function requires explicit ordering with a wmb_pmem() call.
+ * See blkdev_issue_flush() note for memcpy_to_pmem().
  */
 static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes,
 		struct iov_iter *i)
@@ -200,7 +161,7 @@ static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes,
  * @size:	number of bytes to zero
  *
  * Write zeros into the memory range starting at 'addr' for 'size' bytes.
- * This function requires explicit ordering with a wmb_pmem() call.
+ * See blkdev_issue_flush() note for memcpy_to_pmem().
  */
 static inline void clear_pmem(void __pmem *addr, size_t size)
 {
@@ -230,7 +191,7 @@ static inline void invalidate_pmem(void __pmem *addr, size_t size)
  * @size:	number of bytes to write back
  *
  * Write back the processor cache range starting at 'addr' for 'size' bytes.
- * This function requires explicit ordering with a wmb_pmem() call.
+ * See blkdev_issue_flush() note for memcpy_to_pmem().
  */
 static inline void wb_cache_pmem(void __pmem *addr, size_t size)
 {

_______________________________________________
Linux-nvdimm mailing list
Linux-nvdimm@lists.01.org
https://lists.01.org/mailman/listinfo/linux-nvdimm

WARNING: multiple messages have this Message-ID (diff)
From: Dan Williams <dan.j.williams@intel.com>
To: linux-nvdimm@ml01.01.org
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>,
	david@fromorbit.com, linux-kernel@vger.kernel.org, hch@lst.de
Subject: [PATCH 10/13] pmem: kill wmb_pmem()
Date: Sat, 04 Jun 2016 13:53:25 -0700	[thread overview]
Message-ID: <146507360564.8347.14991384832117517627.stgit@dwillia2-desk3.amr.corp.intel.com> (raw)
In-Reply-To: <146507355220.8347.12117020810872172684.stgit@dwillia2-desk3.amr.corp.intel.com>

All users have been replaced with flushing in the pmem driver.

Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
 arch/x86/include/asm/pmem.h |   36 ++-------------------------------
 include/linux/pmem.h        |   47 ++++---------------------------------------
 2 files changed, 6 insertions(+), 77 deletions(-)

diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
index fbc5e92e1ecc..a8cf2a6b14d9 100644
--- a/arch/x86/include/asm/pmem.h
+++ b/arch/x86/include/asm/pmem.h
@@ -26,8 +26,7 @@
  * @n: length of the copy in bytes
  *
  * Copy data to persistent memory media via non-temporal stores so that
- * a subsequent arch_wmb_pmem() can flush cpu and memory controller
- * write buffers to guarantee durability.
+ * a subsequent pmem driver flush operation will drain posted write queues.
  */
 static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
 		size_t n)
@@ -57,32 +56,12 @@ static inline int arch_memcpy_from_pmem(void *dst, const void __pmem *src,
 }
 
 /**
- * arch_wmb_pmem - synchronize writes to persistent memory
- *
- * After a series of arch_memcpy_to_pmem() operations this drains data
- * from cpu write buffers and any platform (memory controller) buffers
- * to ensure that written data is durable on persistent memory media.
- */
-static inline void arch_wmb_pmem(void)
-{
-	/*
-	 * wmb() to 'sfence' all previous writes such that they are
-	 * architecturally visible to 'pcommit'.  Note, that we've
-	 * already arranged for pmem writes to avoid the cache via
-	 * arch_memcpy_to_pmem().
-	 */
-	wmb();
-	pcommit_sfence();
-}
-
-/**
  * arch_wb_cache_pmem - write back a cache range with CLWB
  * @vaddr:	virtual start address
  * @size:	number of bytes to write back
  *
  * Write back a cache range using the CLWB (cache line write back)
- * instruction.  This function requires explicit ordering with an
- * arch_wmb_pmem() call.
+ * instruction.
  */
 static inline void arch_wb_cache_pmem(void __pmem *addr, size_t size)
 {
@@ -113,7 +92,6 @@ static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
  * @i:		iterator with source data
  *
  * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
- * This function requires explicit ordering with an arch_wmb_pmem() call.
  */
 static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
 		struct iov_iter *i)
@@ -136,7 +114,6 @@ static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
  * @size:	number of bytes to zero
  *
  * Write zeros into the memory range starting at 'addr' for 'size' bytes.
- * This function requires explicit ordering with an arch_wmb_pmem() call.
  */
 static inline void arch_clear_pmem(void __pmem *addr, size_t size)
 {
@@ -150,14 +127,5 @@ static inline void arch_invalidate_pmem(void __pmem *addr, size_t size)
 {
 	clflush_cache_range((void __force *) addr, size);
 }
-
-static inline bool __arch_has_wmb_pmem(void)
-{
-	/*
-	 * We require that wmb() be an 'sfence', that is only guaranteed on
-	 * 64-bit builds
-	 */
-	return static_cpu_has(X86_FEATURE_PCOMMIT);
-}
 #endif /* CONFIG_ARCH_HAS_PMEM_API */
 #endif /* __ASM_X86_PMEM_H__ */
diff --git a/include/linux/pmem.h b/include/linux/pmem.h
index 57d146fe44dd..9e3ea94b8157 100644
--- a/include/linux/pmem.h
+++ b/include/linux/pmem.h
@@ -26,16 +26,6 @@
  * calling these symbols with arch_has_pmem_api() and redirect to the
  * implementation in asm/pmem.h.
  */
-static inline bool __arch_has_wmb_pmem(void)
-{
-	return false;
-}
-
-static inline void arch_wmb_pmem(void)
-{
-	BUG();
-}
-
 static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
 		size_t n)
 {
@@ -101,20 +91,6 @@ static inline int memcpy_from_pmem(void *dst, void __pmem const *src,
 		return default_memcpy_from_pmem(dst, src, size);
 }
 
-/**
- * arch_has_wmb_pmem - true if wmb_pmem() ensures durability
- *
- * For a given cpu implementation within an architecture it is possible
- * that wmb_pmem() resolves to a nop.  In the case this returns
- * false, pmem api users are unable to ensure durability and may want to
- * fall back to a different data consistency model, or otherwise notify
- * the user.
- */
-static inline bool arch_has_wmb_pmem(void)
-{
-	return arch_has_pmem_api() && __arch_has_wmb_pmem();
-}
-
 /*
  * These defaults seek to offer decent performance and minimize the
  * window between i/o completion and writes being durable on media.
@@ -152,7 +128,7 @@ static inline void default_clear_pmem(void __pmem *addr, size_t size)
  * being effectively evicted from, or never written to, the processor
  * cache hierarchy after the copy completes.  After memcpy_to_pmem()
  * data may still reside in cpu or platform buffers, so this operation
- * must be followed by a wmb_pmem().
+ * must be followed by a blkdev_issue_flush() on the pmem block device.
  */
 static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n)
 {
@@ -163,28 +139,13 @@ static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n)
 }
 
 /**
- * wmb_pmem - synchronize writes to persistent memory
- *
- * After a series of memcpy_to_pmem() operations this drains data from
- * cpu write buffers and any platform (memory controller) buffers to
- * ensure that written data is durable on persistent memory media.
- */
-static inline void wmb_pmem(void)
-{
-	if (arch_has_wmb_pmem())
-		arch_wmb_pmem();
-	else
-		wmb();
-}
-
-/**
  * copy_from_iter_pmem - copy data from an iterator to PMEM
  * @addr:	PMEM destination address
  * @bytes:	number of bytes to copy
  * @i:		iterator with source data
  *
  * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
- * This function requires explicit ordering with a wmb_pmem() call.
+ * See blkdev_issue_flush() note for memcpy_to_pmem().
  */
 static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes,
 		struct iov_iter *i)
@@ -200,7 +161,7 @@ static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes,
  * @size:	number of bytes to zero
  *
  * Write zeros into the memory range starting at 'addr' for 'size' bytes.
- * This function requires explicit ordering with a wmb_pmem() call.
+ * See blkdev_issue_flush() note for memcpy_to_pmem().
  */
 static inline void clear_pmem(void __pmem *addr, size_t size)
 {
@@ -230,7 +191,7 @@ static inline void invalidate_pmem(void __pmem *addr, size_t size)
  * @size:	number of bytes to write back
  *
  * Write back the processor cache range starting at 'addr' for 'size' bytes.
- * This function requires explicit ordering with a wmb_pmem() call.
+ * See blkdev_issue_flush() note for memcpy_to_pmem().
  */
 static inline void wb_cache_pmem(void __pmem *addr, size_t size)
 {

  parent reply	other threads:[~2016-06-04 20:54 UTC|newest]

Thread overview: 73+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-06-04 20:52 [PATCH 00/13] deprecate pcommit Dan Williams
2016-06-04 20:52 ` Dan Williams
2016-06-04 20:52 ` [PATCH 01/13] driver core, libnvdimm: disable manual unbind of dimms while region active Dan Williams
2016-06-04 20:52   ` Dan Williams
2016-06-04 21:10   ` Greg Kroah-Hartman
2016-06-04 21:10     ` Greg Kroah-Hartman
2016-06-04 21:39     ` Dan Williams
2016-06-04 21:39       ` Dan Williams
2016-06-04 21:45       ` Greg Kroah-Hartman
2016-06-04 21:45         ` Greg Kroah-Hartman
2016-06-04 21:48         ` Dan Williams
2016-06-04 21:48           ` Dan Williams
2016-06-04 21:50   ` kbuild test robot
2016-06-04 21:50     ` kbuild test robot
2016-06-06 19:25   ` Linda Knippers
2016-06-06 19:25     ` Linda Knippers
2016-06-06 19:31     ` Dan Williams
2016-06-06 19:31       ` Dan Williams
2016-06-06 19:36       ` Dan Williams
2016-06-06 19:36         ` Dan Williams
2016-06-06 19:36       ` Linda Knippers
2016-06-06 19:36         ` Linda Knippers
2016-06-06 19:46         ` Dan Williams
2016-06-06 19:46           ` Dan Williams
2016-06-06 20:20           ` Linda Knippers
2016-06-06 20:20             ` Linda Knippers
2016-06-06 20:36             ` Dan Williams
2016-06-06 20:36               ` Dan Williams
2016-06-06 21:15               ` Linda Knippers
2016-06-06 21:15                 ` Linda Knippers
2016-06-04 20:52 ` [PATCH 02/13] nfit: always associate flush hints Dan Williams
2016-06-04 20:52   ` Dan Williams
2016-06-04 20:52 ` [PATCH 03/13] libnvdimm: introduce nvdimm_flush() Dan Williams
2016-06-04 20:52   ` Dan Williams
2016-06-06 17:45   ` Jeff Moyer
2016-06-04 20:52 ` [PATCH 04/13] libnvdimm, nfit: move flush hint mapping to dimm driver Dan Williams
2016-06-04 20:52   ` Dan Williams
2016-06-04 21:29   ` kbuild test robot
2016-06-04 21:29     ` kbuild test robot
2016-06-04 21:40   ` kbuild test robot
2016-06-04 21:40     ` kbuild test robot
2016-06-04 21:49   ` kbuild test robot
2016-06-04 21:49     ` kbuild test robot
2016-06-07 18:11   ` Kani, Toshimitsu
2016-06-07 18:11     ` Kani, Toshimitsu
2016-06-07 18:15     ` Dan Williams
2016-06-07 18:15       ` Dan Williams
2016-06-04 20:52 ` [PATCH 05/13] tools/testing/nvdimm: simulate multiple flush hints per-dimm Dan Williams
2016-06-04 20:52   ` Dan Williams
2016-06-04 20:53 ` [PATCH 06/13] libnvdimm: cycle flush hints per-cpu Dan Williams
2016-06-04 20:53   ` Dan Williams
2016-06-04 20:53 ` [PATCH 07/13] libnvdimm, pmem: use REQ_FUA, REQ_FLUSH for nvdimm_flush() Dan Williams
2016-06-04 20:53   ` Dan Williams
2016-06-04 20:53 ` [PATCH 08/13] fs/dax: remove wmb_pmem() Dan Williams
2016-06-04 20:53   ` Dan Williams
2016-06-04 20:53 ` [PATCH 09/13] libnvdimm, pmem: use nvdimm_flush() for namespace I/O writes Dan Williams
2016-06-04 20:53   ` Dan Williams
2016-06-04 20:53 ` Dan Williams [this message]
2016-06-04 20:53   ` [PATCH 10/13] pmem: kill wmb_pmem() Dan Williams
2016-06-04 20:53 ` [PATCH 11/13] Revert "KVM: x86: add pcommit support" Dan Williams
2016-06-04 20:53   ` Dan Williams
2016-06-06 15:14   ` Paolo Bonzini
2016-06-06 16:14     ` Dan Williams
2016-06-04 20:53 ` [PATCH 12/13] x86/insn: remove pcommit Dan Williams
2016-06-04 20:53   ` Dan Williams
2016-06-04 20:53 ` [PATCH 13/13] pmem: kill __pmem address space Dan Williams
2016-06-04 20:53   ` Dan Williams
2016-06-04 22:18   ` kbuild test robot
2016-06-04 22:18     ` kbuild test robot
2016-06-05 17:41 ` [PATCH 00/13] deprecate pcommit Andy Lutomirski
2016-06-05 17:41   ` Andy Lutomirski
2016-06-05 18:48   ` Rudoff, Andy
2016-06-05 18:48     ` Rudoff, Andy

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=146507360564.8347.14991384832117517627.stgit@dwillia2-desk3.amr.corp.intel.com \
    --to=dan.j.williams@intel.com \
    --cc=david@fromorbit.com \
    --cc=hch@lst.de \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-nvdimm@lists.01.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.