All of lore.kernel.org
 help / color / mirror / Atom feed
From: Davidlohr Bueso <dave@stgolabs.net>
To: linux-arch@vger.kernel.org
Cc: dan.j.williams@intel.com, peterz@infradead.org,
	mark.rutland@arm.com, dave.jiang@intel.com,
	Jonathan.Cameron@huawei.com, a.manzanares@samsung.com,
	bwidawsk@kernel.org, alison.schofield@intel.com,
	ira.weiny@intel.com, linux-cxl@vger.kernel.org,
	nvdimm@lists.linux.dev, x86@kernel.org,
	linux-arm-kernel@lists.infradead.org,
	linux-kernel@vger.kernel.org, dave@stgolabs.net
Subject: [PATCH v2] arch/cacheflush: Introduce flush_all_caches()
Date: Fri, 19 Aug 2022 10:10:24 -0700	[thread overview]
Message-ID: <20220819171024.1766857-1-dave@stgolabs.net> (raw)

With CXL security features, global CPU cache flushing nvdimm requirements
are no longer specific to that subsystem, even beyond the scope of
security_ops. CXL will need such semantics for features not necessarily
limited to persistent memory.

The functionality this is enabling is to be able to instantaneously
secure erase potentially terabytes of memory at once and the kernel
needs to be sure that none of the data from before the secure is still
present in the cache. It is also used when unlocking a memory device
where speculative reads and firmware accesses could have cached poison
from before the device was unlocked.

This capability is typically only used once per-boot (for unlock), or
once per bare metal provisioning event (secure erase), like when handing
off the system to another tenant or decomissioning a device. That small
scope plus the fact that none of this is available to a VM limits the
potential damage.

While the scope of this is for physical address space, add a new
flush_all_caches() in cacheflush headers such that each architecture
can define it, when capable. For x86 just use the wbinvd hammer and
prevent any other arch from being capable.

Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
---

Changes from v1 (https://lore.kernel.org/all/20220815160706.tqd42dv24tgb7x7y@offworld/):
- Added comments and improved changelog to reflect this is
  routine should be avoided and not considered a general API (Peter, Dan).

 arch/x86/include/asm/cacheflush.h |  4 +++
 drivers/acpi/nfit/intel.c         | 41 ++++++++++++++-----------------
 include/asm-generic/cacheflush.h  | 31 +++++++++++++++++++++++
 3 files changed, 53 insertions(+), 23 deletions(-)

diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index b192d917a6d0..ac4d4fd4e508 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -10,4 +10,8 @@
 
 void clflush_cache_range(void *addr, unsigned int size);
 
+/* see comments in the stub version */
+#define flush_all_caches() \
+	do { wbinvd_on_all_cpus(); } while(0)
+
 #endif /* _ASM_X86_CACHEFLUSH_H */
diff --git a/drivers/acpi/nfit/intel.c b/drivers/acpi/nfit/intel.c
index 8dd792a55730..f2f6c31e6ab7 100644
--- a/drivers/acpi/nfit/intel.c
+++ b/drivers/acpi/nfit/intel.c
@@ -4,6 +4,7 @@
 #include <linux/ndctl.h>
 #include <linux/acpi.h>
 #include <asm/smp.h>
+#include <linux/cacheflush.h>
 #include "intel.h"
 #include "nfit.h"
 
@@ -190,8 +191,6 @@ static int intel_security_change_key(struct nvdimm *nvdimm,
 	}
 }
 
-static void nvdimm_invalidate_cache(void);
-
 static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
 		const struct nvdimm_key_data *key_data)
 {
@@ -210,6 +209,9 @@ static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
 	};
 	int rc;
 
+	if (!flush_all_caches_capable())
+		return -EINVAL;
+
 	if (!test_bit(NVDIMM_INTEL_UNLOCK_UNIT, &nfit_mem->dsm_mask))
 		return -ENOTTY;
 
@@ -228,7 +230,7 @@ static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
 	}
 
 	/* DIMM unlocked, invalidate all CPU caches before we read it */
-	nvdimm_invalidate_cache();
+	flush_all_caches();
 
 	return 0;
 }
@@ -294,11 +296,14 @@ static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
 		},
 	};
 
+	if (!flush_all_caches_capable())
+		return -EINVAL;
+
 	if (!test_bit(cmd, &nfit_mem->dsm_mask))
 		return -ENOTTY;
 
 	/* flush all cache before we erase DIMM */
-	nvdimm_invalidate_cache();
+	flush_all_caches();
 	memcpy(nd_cmd.cmd.passphrase, key->data,
 			sizeof(nd_cmd.cmd.passphrase));
 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
@@ -318,7 +323,7 @@ static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
 	}
 
 	/* DIMM erased, invalidate all CPU caches before we read it */
-	nvdimm_invalidate_cache();
+	flush_all_caches();
 	return 0;
 }
 
@@ -338,6 +343,9 @@ static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
 		},
 	};
 
+	if (!flush_all_caches_capable())
+		return -EINVAL;
+
 	if (!test_bit(NVDIMM_INTEL_QUERY_OVERWRITE, &nfit_mem->dsm_mask))
 		return -ENOTTY;
 
@@ -355,7 +363,7 @@ static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
 	}
 
 	/* flush all cache before we make the nvdimms available */
-	nvdimm_invalidate_cache();
+	flush_all_caches();
 	return 0;
 }
 
@@ -377,11 +385,14 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
 		},
 	};
 
+	if (!flush_all_caches_capable())
+		return -EINVAL;
+
 	if (!test_bit(NVDIMM_INTEL_OVERWRITE, &nfit_mem->dsm_mask))
 		return -ENOTTY;
 
 	/* flush all cache before we erase DIMM */
-	nvdimm_invalidate_cache();
+	flush_all_caches();
 	memcpy(nd_cmd.cmd.passphrase, nkey->data,
 			sizeof(nd_cmd.cmd.passphrase));
 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
@@ -401,22 +412,6 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
 	}
 }
 
-/*
- * TODO: define a cross arch wbinvd equivalent when/if
- * NVDIMM_FAMILY_INTEL command support arrives on another arch.
- */
-#ifdef CONFIG_X86
-static void nvdimm_invalidate_cache(void)
-{
-	wbinvd_on_all_cpus();
-}
-#else
-static void nvdimm_invalidate_cache(void)
-{
-	WARN_ON_ONCE("cache invalidation required after unlock\n");
-}
-#endif
-
 static const struct nvdimm_security_ops __intel_security_ops = {
 	.get_flags = intel_security_flags,
 	.freeze = intel_security_freeze,
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
index 4f07afacbc23..89f310f92498 100644
--- a/include/asm-generic/cacheflush.h
+++ b/include/asm-generic/cacheflush.h
@@ -115,4 +115,35 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
 	memcpy(dst, src, len)
 #endif
 
+/*
+ * Flush the entire caches across all CPUs, however:
+ *
+ *       YOU DO NOT WANT TO USE THIS FUNCTION.
+ *
+ * It is considered a big hammer and can affect overall
+ * system performance and increase latency/response times.
+ * As such it is not for general usage, but for specific use
+ * cases involving instantaneously invalidating wide swaths
+ * of memory on bare metal.
+
+ * Unlike the APIs above, this function can be defined on
+ * architectures which have VIPT or PIPT caches, and thus is
+ * beyond the scope of virtual to physical mappings/page
+ * tables changing.
+ *
+ * The limitation here is that the architectures that make
+ * use of it must can actually comply with the semantics,
+ * such as those which caches are in a consistent state. The
+ * caller can verify the situation early on.
+ */
+#ifndef flush_all_caches
+# define flush_all_caches_capable() false
+static inline void flush_all_caches(void)
+{
+	WARN_ON_ONCE("cache invalidation required\n");
+}
+#else
+# define flush_all_caches_capable() true
+#endif
+
 #endif /* _ASM_GENERIC_CACHEFLUSH_H */
-- 
2.37.2


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

WARNING: multiple messages have this Message-ID (diff)
From: Davidlohr Bueso <dave@stgolabs.net>
To: linux-arch@vger.kernel.org
Cc: dan.j.williams@intel.com, peterz@infradead.org,
	mark.rutland@arm.com, dave.jiang@intel.com,
	Jonathan.Cameron@huawei.com, a.manzanares@samsung.com,
	bwidawsk@kernel.org, alison.schofield@intel.com,
	ira.weiny@intel.com, linux-cxl@vger.kernel.org,
	nvdimm@lists.linux.dev, x86@kernel.org,
	linux-arm-kernel@lists.infradead.org,
	linux-kernel@vger.kernel.org, dave@stgolabs.net
Subject: [PATCH v2] arch/cacheflush: Introduce flush_all_caches()
Date: Fri, 19 Aug 2022 10:10:24 -0700	[thread overview]
Message-ID: <20220819171024.1766857-1-dave@stgolabs.net> (raw)

With CXL security features, global CPU cache flushing nvdimm requirements
are no longer specific to that subsystem, even beyond the scope of
security_ops. CXL will need such semantics for features not necessarily
limited to persistent memory.

The functionality this is enabling is to be able to instantaneously
secure erase potentially terabytes of memory at once and the kernel
needs to be sure that none of the data from before the secure is still
present in the cache. It is also used when unlocking a memory device
where speculative reads and firmware accesses could have cached poison
from before the device was unlocked.

This capability is typically only used once per-boot (for unlock), or
once per bare metal provisioning event (secure erase), like when handing
off the system to another tenant or decomissioning a device. That small
scope plus the fact that none of this is available to a VM limits the
potential damage.

While the scope of this is for physical address space, add a new
flush_all_caches() in cacheflush headers such that each architecture
can define it, when capable. For x86 just use the wbinvd hammer and
prevent any other arch from being capable.

Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
---

Changes from v1 (https://lore.kernel.org/all/20220815160706.tqd42dv24tgb7x7y@offworld/):
- Added comments and improved changelog to reflect this is
  routine should be avoided and not considered a general API (Peter, Dan).

 arch/x86/include/asm/cacheflush.h |  4 +++
 drivers/acpi/nfit/intel.c         | 41 ++++++++++++++-----------------
 include/asm-generic/cacheflush.h  | 31 +++++++++++++++++++++++
 3 files changed, 53 insertions(+), 23 deletions(-)

diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index b192d917a6d0..ac4d4fd4e508 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -10,4 +10,8 @@
 
 void clflush_cache_range(void *addr, unsigned int size);
 
+/* see comments in the stub version */
+#define flush_all_caches() \
+	do { wbinvd_on_all_cpus(); } while(0)
+
 #endif /* _ASM_X86_CACHEFLUSH_H */
diff --git a/drivers/acpi/nfit/intel.c b/drivers/acpi/nfit/intel.c
index 8dd792a55730..f2f6c31e6ab7 100644
--- a/drivers/acpi/nfit/intel.c
+++ b/drivers/acpi/nfit/intel.c
@@ -4,6 +4,7 @@
 #include <linux/ndctl.h>
 #include <linux/acpi.h>
 #include <asm/smp.h>
+#include <linux/cacheflush.h>
 #include "intel.h"
 #include "nfit.h"
 
@@ -190,8 +191,6 @@ static int intel_security_change_key(struct nvdimm *nvdimm,
 	}
 }
 
-static void nvdimm_invalidate_cache(void);
-
 static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
 		const struct nvdimm_key_data *key_data)
 {
@@ -210,6 +209,9 @@ static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
 	};
 	int rc;
 
+	if (!flush_all_caches_capable())
+		return -EINVAL;
+
 	if (!test_bit(NVDIMM_INTEL_UNLOCK_UNIT, &nfit_mem->dsm_mask))
 		return -ENOTTY;
 
@@ -228,7 +230,7 @@ static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
 	}
 
 	/* DIMM unlocked, invalidate all CPU caches before we read it */
-	nvdimm_invalidate_cache();
+	flush_all_caches();
 
 	return 0;
 }
@@ -294,11 +296,14 @@ static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
 		},
 	};
 
+	if (!flush_all_caches_capable())
+		return -EINVAL;
+
 	if (!test_bit(cmd, &nfit_mem->dsm_mask))
 		return -ENOTTY;
 
 	/* flush all cache before we erase DIMM */
-	nvdimm_invalidate_cache();
+	flush_all_caches();
 	memcpy(nd_cmd.cmd.passphrase, key->data,
 			sizeof(nd_cmd.cmd.passphrase));
 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
@@ -318,7 +323,7 @@ static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
 	}
 
 	/* DIMM erased, invalidate all CPU caches before we read it */
-	nvdimm_invalidate_cache();
+	flush_all_caches();
 	return 0;
 }
 
@@ -338,6 +343,9 @@ static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
 		},
 	};
 
+	if (!flush_all_caches_capable())
+		return -EINVAL;
+
 	if (!test_bit(NVDIMM_INTEL_QUERY_OVERWRITE, &nfit_mem->dsm_mask))
 		return -ENOTTY;
 
@@ -355,7 +363,7 @@ static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
 	}
 
 	/* flush all cache before we make the nvdimms available */
-	nvdimm_invalidate_cache();
+	flush_all_caches();
 	return 0;
 }
 
@@ -377,11 +385,14 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
 		},
 	};
 
+	if (!flush_all_caches_capable())
+		return -EINVAL;
+
 	if (!test_bit(NVDIMM_INTEL_OVERWRITE, &nfit_mem->dsm_mask))
 		return -ENOTTY;
 
 	/* flush all cache before we erase DIMM */
-	nvdimm_invalidate_cache();
+	flush_all_caches();
 	memcpy(nd_cmd.cmd.passphrase, nkey->data,
 			sizeof(nd_cmd.cmd.passphrase));
 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
@@ -401,22 +412,6 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
 	}
 }
 
-/*
- * TODO: define a cross arch wbinvd equivalent when/if
- * NVDIMM_FAMILY_INTEL command support arrives on another arch.
- */
-#ifdef CONFIG_X86
-static void nvdimm_invalidate_cache(void)
-{
-	wbinvd_on_all_cpus();
-}
-#else
-static void nvdimm_invalidate_cache(void)
-{
-	WARN_ON_ONCE("cache invalidation required after unlock\n");
-}
-#endif
-
 static const struct nvdimm_security_ops __intel_security_ops = {
 	.get_flags = intel_security_flags,
 	.freeze = intel_security_freeze,
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
index 4f07afacbc23..89f310f92498 100644
--- a/include/asm-generic/cacheflush.h
+++ b/include/asm-generic/cacheflush.h
@@ -115,4 +115,35 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
 	memcpy(dst, src, len)
 #endif
 
+/*
+ * Flush the entire caches across all CPUs, however:
+ *
+ *       YOU DO NOT WANT TO USE THIS FUNCTION.
+ *
+ * It is considered a big hammer and can affect overall
+ * system performance and increase latency/response times.
+ * As such it is not for general usage, but for specific use
+ * cases involving instantaneously invalidating wide swaths
+ * of memory on bare metal.
+
+ * Unlike the APIs above, this function can be defined on
+ * architectures which have VIPT or PIPT caches, and thus is
+ * beyond the scope of virtual to physical mappings/page
+ * tables changing.
+ *
+ * The limitation here is that the architectures that make
+ * use of it must can actually comply with the semantics,
+ * such as those which caches are in a consistent state. The
+ * caller can verify the situation early on.
+ */
+#ifndef flush_all_caches
+# define flush_all_caches_capable() false
+static inline void flush_all_caches(void)
+{
+	WARN_ON_ONCE("cache invalidation required\n");
+}
+#else
+# define flush_all_caches_capable() true
+#endif
+
 #endif /* _ASM_GENERIC_CACHEFLUSH_H */
-- 
2.37.2


             reply	other threads:[~2022-08-19 17:12 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-08-19 17:10 Davidlohr Bueso [this message]
2022-08-19 17:10 ` [PATCH v2] arch/cacheflush: Introduce flush_all_caches() Davidlohr Bueso
2022-08-20  0:20 ` Ira Weiny
2022-08-20  0:20   ` Ira Weiny
2022-08-20 15:10   ` Davidlohr Bueso
2022-08-20 15:10     ` Davidlohr Bueso
2022-08-22  6:38 ` Christoph Hellwig
2022-08-22  6:38   ` Christoph Hellwig
2022-08-22 13:37   ` Davidlohr Bueso
2022-08-22 13:37     ` Davidlohr Bueso
2022-08-22 18:16     ` Dan Williams
2022-08-22 18:16       ` Dan Williams
2022-08-23 15:37       ` Davidlohr Bueso
2022-08-23 15:37         ` Davidlohr Bueso
2022-08-23 17:36         ` Dan Williams
2022-08-23 17:36           ` Dan Williams

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220819171024.1766857-1-dave@stgolabs.net \
    --to=dave@stgolabs.net \
    --cc=Jonathan.Cameron@huawei.com \
    --cc=a.manzanares@samsung.com \
    --cc=alison.schofield@intel.com \
    --cc=bwidawsk@kernel.org \
    --cc=dan.j.williams@intel.com \
    --cc=dave.jiang@intel.com \
    --cc=ira.weiny@intel.com \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-cxl@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mark.rutland@arm.com \
    --cc=nvdimm@lists.linux.dev \
    --cc=peterz@infradead.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.