kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/3 v2] KVM: SVM: Don't flush cache of encrypted pages if hardware enforces cache coherency
@ 2020-09-10  2:22 Krish Sadhukhan
  2020-09-10  2:22 ` [PATCH 1/3 v2] KVM: SVM: Replace numeric value for SME CPUID leaf with a #define Krish Sadhukhan
                   ` (2 more replies)
  0 siblings, 3 replies; 9+ messages in thread
From: Krish Sadhukhan @ 2020-09-10  2:22 UTC (permalink / raw)
  To: kvm; +Cc: pbonzini, jmattson, thomas.lendacky

v1 -> v2:
	1. Patch# 2 is the new addition. It adds the hardware-enforced cache
	   coherency as a CPUID feature.
	2. Patch# 3 (which was pach# 2 in v1) also adds the check to
	   __set_memory_enc_dec() so that cache/TLB is flushed only if
	   hardware doesn't enforce cache coherency.


[PATCH 1/3 v2] KVM: SVM: Replace numeric value for SME CPUID leaf with a
[PATCH 2/3 v2] KVM: SVM: Add hardware-enforced cache coherency as a
[PATCH 3/3 v2] KVM: SVM: Don't flush cache of encrypted pages if

 arch/x86/boot/compressed/mem_encrypt.S | 5 +++--
 arch/x86/include/asm/cpufeatures.h     | 6 ++++++
 arch/x86/kernel/cpu/amd.c              | 5 ++++-
 arch/x86/kernel/cpu/scattered.c        | 4 ++--
 arch/x86/kvm/cpuid.c                   | 2 +-
 arch/x86/kvm/svm/sev.c                 | 3 ++-
 arch/x86/kvm/svm/svm.c                 | 4 ++--
 arch/x86/mm/mem_encrypt_identity.c     | 4 ++--
 arch/x86/mm/pat/set_memory.c           | 6 ++++--
 9 files changed, 26 insertions(+), 13 deletions(-)

Krish Sadhukhan (3):
      KVM: SVM: Replace numeric value for SME CPUID leaf with a #define
      KVM: SVM: Add hardware-enforced cache coherency as a CPUID feature
      KVM: SVM: Don't flush cache of encrypted pages if hardware enforces cache 
coherenc


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 1/3 v2] KVM: SVM: Replace numeric value for SME CPUID leaf with a #define
  2020-09-10  2:22 [PATCH 0/3 v2] KVM: SVM: Don't flush cache of encrypted pages if hardware enforces cache coherency Krish Sadhukhan
@ 2020-09-10  2:22 ` Krish Sadhukhan
  2020-09-10  2:22 ` [PATCH 2/3 v2] KVM: SVM: Add hardware-enforced cache coherency as a CPUID feature Krish Sadhukhan
  2020-09-10  2:22 ` [PATCH 3/3 v2] KVM: SVM: Don't flush cache of encrypted pages if hardware enforces cache coherenc Krish Sadhukhan
  2 siblings, 0 replies; 9+ messages in thread
From: Krish Sadhukhan @ 2020-09-10  2:22 UTC (permalink / raw)
  To: kvm; +Cc: pbonzini, jmattson, thomas.lendacky

Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
---
 arch/x86/boot/compressed/mem_encrypt.S | 5 +++--
 arch/x86/include/asm/cpufeatures.h     | 5 +++++
 arch/x86/kernel/cpu/amd.c              | 2 +-
 arch/x86/kernel/cpu/scattered.c        | 4 ++--
 arch/x86/kvm/cpuid.c                   | 2 +-
 arch/x86/kvm/svm/svm.c                 | 4 ++--
 arch/x86/mm/mem_encrypt_identity.c     | 4 ++--
 7 files changed, 16 insertions(+), 10 deletions(-)

diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S
index dd07e7b41b11..22e30b0c0d19 100644
--- a/arch/x86/boot/compressed/mem_encrypt.S
+++ b/arch/x86/boot/compressed/mem_encrypt.S
@@ -12,6 +12,7 @@
 #include <asm/processor-flags.h>
 #include <asm/msr.h>
 #include <asm/asm-offsets.h>
+#include <asm/cpufeatures.h>
 
 	.text
 	.code32
@@ -31,7 +32,7 @@ SYM_FUNC_START(get_sev_encryption_bit)
 
 	movl	$0x80000000, %eax	/* CPUID to check the highest leaf */
 	cpuid
-	cmpl	$0x8000001f, %eax	/* See if 0x8000001f is available */
+	cmpl	$CPUID_AMD_SME, %eax	/* See if 0x8000001f is available */
 	jb	.Lno_sev
 
 	/*
@@ -40,7 +41,7 @@ SYM_FUNC_START(get_sev_encryption_bit)
 	 *   CPUID Fn8000_001F[EBX] - Bits 5:0
 	 *     Pagetable bit position used to indicate encryption
 	 */
-	movl	$0x8000001f, %eax
+	movl	$CPUID_AMD_SME, %eax
 	cpuid
 	bt	$1, %eax		/* Check if SEV is available */
 	jnc	.Lno_sev
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 2901d5df4366..81335e6fe47d 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -10,6 +10,11 @@
 #include <asm/disabled-features.h>
 #endif
 
+/*
+ * AMD CPUID functions
+ */
+#define CPUID_AMD_SME  0x8000001f	/* Secure Memory Encryption */
+
 /*
  * Defines x86 CPU feature bits
  */
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index dcc3d943c68f..4507ededb978 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -630,7 +630,7 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
 		 * will be a value above 32-bits this is still done for
 		 * CONFIG_X86_32 so that accurate values are reported.
 		 */
-		c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
+		c->x86_phys_bits -= (cpuid_ebx(CPUID_AMD_SME) >> 6) & 0x3f;
 
 		if (IS_ENABLED(CONFIG_X86_32))
 			goto clear_all;
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 62b137c3c97a..033c112e03fc 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -39,8 +39,8 @@ static const struct cpuid_bit cpuid_bits[] = {
 	{ X86_FEATURE_CPB,		CPUID_EDX,  9, 0x80000007, 0 },
 	{ X86_FEATURE_PROC_FEEDBACK,    CPUID_EDX, 11, 0x80000007, 0 },
 	{ X86_FEATURE_MBA,		CPUID_EBX,  6, 0x80000008, 0 },
-	{ X86_FEATURE_SME,		CPUID_EAX,  0, 0x8000001f, 0 },
-	{ X86_FEATURE_SEV,		CPUID_EAX,  1, 0x8000001f, 0 },
+	{ X86_FEATURE_SME,		CPUID_EAX,  0, CPUID_AMD_SME, 0 },
+	{ X86_FEATURE_SEV,		CPUID_EAX,  1, CPUID_AMD_SME, 0 },
 	{ 0, 0, 0, 0, 0 }
 };
 
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 3fd6eec202d7..95863e767d3d 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -756,7 +756,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
 		entry->edx = 0;
 		break;
 	case 0x80000000:
-		entry->eax = min(entry->eax, 0x8000001f);
+		entry->eax = min(entry->eax, CPUID_AMD_SME);
 		break;
 	case 0x80000001:
 		cpuid_entry_override(entry, CPUID_8000_0001_EDX);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 0194336b64a4..a4e92ae399b4 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -749,7 +749,7 @@ static __init void svm_adjust_mmio_mask(void)
 	u64 msr, mask;
 
 	/* If there is no memory encryption support, use existing mask */
-	if (cpuid_eax(0x80000000) < 0x8000001f)
+	if (cpuid_eax(0x80000000) < CPUID_AMD_SME)
 		return;
 
 	/* If memory encryption is not enabled, use existing mask */
@@ -757,7 +757,7 @@ static __init void svm_adjust_mmio_mask(void)
 	if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
 		return;
 
-	enc_bit = cpuid_ebx(0x8000001f) & 0x3f;
+	enc_bit = cpuid_ebx(CPUID_AMD_SME) & 0x3f;
 	mask_bit = boot_cpu_data.x86_phys_bits;
 
 	/* Increment the mask bit if it is the same as the encryption bit */
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index e2b0e2ac07bb..cbe600dd357b 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -498,7 +498,7 @@ void __init sme_enable(struct boot_params *bp)
 	eax = 0x80000000;
 	ecx = 0;
 	native_cpuid(&eax, &ebx, &ecx, &edx);
-	if (eax < 0x8000001f)
+	if (eax < CPUID_AMD_SME)
 		return;
 
 #define AMD_SME_BIT	BIT(0)
@@ -520,7 +520,7 @@ void __init sme_enable(struct boot_params *bp)
 	 *   CPUID Fn8000_001F[EBX]
 	 *   - Bits 5:0 - Pagetable bit position used to indicate encryption
 	 */
-	eax = 0x8000001f;
+	eax = CPUID_AMD_SME;
 	ecx = 0;
 	native_cpuid(&eax, &ebx, &ecx, &edx);
 	if (!(eax & feature_mask))
-- 
2.18.4


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 2/3 v2] KVM: SVM: Add hardware-enforced cache coherency as a CPUID feature
  2020-09-10  2:22 [PATCH 0/3 v2] KVM: SVM: Don't flush cache of encrypted pages if hardware enforces cache coherency Krish Sadhukhan
  2020-09-10  2:22 ` [PATCH 1/3 v2] KVM: SVM: Replace numeric value for SME CPUID leaf with a #define Krish Sadhukhan
@ 2020-09-10  2:22 ` Krish Sadhukhan
  2020-09-10 14:45   ` Tom Lendacky
  2020-09-10  2:22 ` [PATCH 3/3 v2] KVM: SVM: Don't flush cache of encrypted pages if hardware enforces cache coherenc Krish Sadhukhan
  2 siblings, 1 reply; 9+ messages in thread
From: Krish Sadhukhan @ 2020-09-10  2:22 UTC (permalink / raw)
  To: kvm; +Cc: pbonzini, jmattson, thomas.lendacky

Some AMD hardware platforms enforce cache coherency across encryption domains.
Add this hardware feature as a CPUID feature to the kernel.

Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
---
 arch/x86/include/asm/cpufeatures.h | 1 +
 arch/x86/kernel/cpu/amd.c          | 3 +++
 2 files changed, 4 insertions(+)

diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 81335e6fe47d..0e5b27ee5931 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -293,6 +293,7 @@
 #define X86_FEATURE_FENCE_SWAPGS_USER	(11*32+ 4) /* "" LFENCE in user entry SWAPGS path */
 #define X86_FEATURE_FENCE_SWAPGS_KERNEL	(11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */
 #define X86_FEATURE_SPLIT_LOCK_DETECT	(11*32+ 6) /* #AC for split lock */
+#define X86_FEATURE_HW_CACHE_COHERENCY (11*32+ 7) /* AMD hardware-enforced cache coherency */
 
 /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
 #define X86_FEATURE_AVX512_BF16		(12*32+ 5) /* AVX512 BFLOAT16 instructions */
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 4507ededb978..698884812989 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -632,6 +632,9 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
 		 */
 		c->x86_phys_bits -= (cpuid_ebx(CPUID_AMD_SME) >> 6) & 0x3f;
 
+		if (cpuid_eax(CPUID_AMD_SME) & 0x400)
+			set_cpu_cap(c, X86_FEATURE_HW_CACHE_COHERENCY);
+
 		if (IS_ENABLED(CONFIG_X86_32))
 			goto clear_all;
 
-- 
2.18.4


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 3/3 v2] KVM: SVM: Don't flush cache of encrypted pages if hardware enforces cache coherenc
  2020-09-10  2:22 [PATCH 0/3 v2] KVM: SVM: Don't flush cache of encrypted pages if hardware enforces cache coherency Krish Sadhukhan
  2020-09-10  2:22 ` [PATCH 1/3 v2] KVM: SVM: Replace numeric value for SME CPUID leaf with a #define Krish Sadhukhan
  2020-09-10  2:22 ` [PATCH 2/3 v2] KVM: SVM: Add hardware-enforced cache coherency as a CPUID feature Krish Sadhukhan
@ 2020-09-10  2:22 ` Krish Sadhukhan
  2020-09-10 14:43   ` Tom Lendacky
  2 siblings, 1 reply; 9+ messages in thread
From: Krish Sadhukhan @ 2020-09-10  2:22 UTC (permalink / raw)
  To: kvm; +Cc: pbonzini, jmattson, thomas.lendacky

Some hardware implementations may enforce cache coherency across encryption
domains. In such cases, it's not required to flush encrypted pages off
cache lines.

Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
---
 arch/x86/kvm/svm/sev.c       | 3 ++-
 arch/x86/mm/pat/set_memory.c | 6 ++++--
 2 files changed, 6 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 402dc4234e39..8aa2209f2637 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -384,7 +384,8 @@ static void sev_clflush_pages(struct page *pages[], unsigned long npages)
 	uint8_t *page_virtual;
 	unsigned long i;
 
-	if (npages == 0 || pages == NULL)
+	if (this_cpu_has(X86_FEATURE_HW_CACHE_COHERENCY) || npages == 0 ||
+	    pages == NULL)
 		return;
 
 	for (i = 0; i < npages; i++) {
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index d1b2a889f035..5e2c618cbe84 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -1999,7 +1999,8 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
 	/*
 	 * Before changing the encryption attribute, we need to flush caches.
 	 */
-	cpa_flush(&cpa, 1);
+	if (!this_cpu_has(X86_FEATURE_HW_CACHE_COHERENCY))
+		cpa_flush(&cpa, 1);
 
 	ret = __change_page_attr_set_clr(&cpa, 1);
 
@@ -2010,7 +2011,8 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
 	 * flushing gets optimized in the cpa_flush() path use the same logic
 	 * as above.
 	 */
-	cpa_flush(&cpa, 0);
+	if (!this_cpu_has(X86_FEATURE_HW_CACHE_COHERENCY))
+		cpa_flush(&cpa, 0);
 
 	return ret;
 }
-- 
2.18.4


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH 3/3 v2] KVM: SVM: Don't flush cache of encrypted pages if hardware enforces cache coherenc
  2020-09-10  2:22 ` [PATCH 3/3 v2] KVM: SVM: Don't flush cache of encrypted pages if hardware enforces cache coherenc Krish Sadhukhan
@ 2020-09-10 14:43   ` Tom Lendacky
  2020-09-10 19:29     ` Krish Sadhukhan
  0 siblings, 1 reply; 9+ messages in thread
From: Tom Lendacky @ 2020-09-10 14:43 UTC (permalink / raw)
  To: Krish Sadhukhan, kvm; +Cc: pbonzini, jmattson

On 9/9/20 9:22 PM, Krish Sadhukhan wrote:
> Some hardware implementations may enforce cache coherency across encryption
> domains. In such cases, it's not required to flush encrypted pages off
> cache lines.
> 
> Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
> ---
>   arch/x86/kvm/svm/sev.c       | 3 ++-
>   arch/x86/mm/pat/set_memory.c | 6 ++++--
>   2 files changed, 6 insertions(+), 3 deletions(-)

You should probably split this patch into two patches, one for the KVM 
usage and one for the MM usage with appropriate subjects prefixes at that 
point. Also, you need to then copy the proper people. Did you run these 
patches through get_maintainer.pl?

> 
> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
> index 402dc4234e39..8aa2209f2637 100644
> --- a/arch/x86/kvm/svm/sev.c
> +++ b/arch/x86/kvm/svm/sev.c
> @@ -384,7 +384,8 @@ static void sev_clflush_pages(struct page *pages[], unsigned long npages)
>   	uint8_t *page_virtual;
>   	unsigned long i;
>   
> -	if (npages == 0 || pages == NULL)
> +	if (this_cpu_has(X86_FEATURE_HW_CACHE_COHERENCY) || npages == 0 ||
> +	    pages == NULL)
>   		return;
>   
>   	for (i = 0; i < npages; i++) {
> diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
> index d1b2a889f035..5e2c618cbe84 100644
> --- a/arch/x86/mm/pat/set_memory.c
> +++ b/arch/x86/mm/pat/set_memory.c
> @@ -1999,7 +1999,8 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
>   	/*
>   	 * Before changing the encryption attribute, we need to flush caches.
>   	 */
> -	cpa_flush(&cpa, 1);
> +	if (!this_cpu_has(X86_FEATURE_HW_CACHE_COHERENCY))
> +		cpa_flush(&cpa, 1);

This bit is only about cache coherency, so the TLB flush is still needed, 
so this should be something like:

	cpa_flush(&cpa, !this_cpu_has(X86_FEATURE_HW_CACHE_COHERENCY));

>   
>   	ret = __change_page_attr_set_clr(&cpa, 1);
>   
> @@ -2010,7 +2011,8 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
>   	 * flushing gets optimized in the cpa_flush() path use the same logic
>   	 * as above.
>   	 */
> -	cpa_flush(&cpa, 0);
> +	if (!this_cpu_has(X86_FEATURE_HW_CACHE_COHERENCY))
> +		cpa_flush(&cpa, 0);

This should not be changed, still need the call to do the TLB flush.

Thanks,
Tom

>   
>   	return ret;
>   }
> 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/3 v2] KVM: SVM: Add hardware-enforced cache coherency as a CPUID feature
  2020-09-10  2:22 ` [PATCH 2/3 v2] KVM: SVM: Add hardware-enforced cache coherency as a CPUID feature Krish Sadhukhan
@ 2020-09-10 14:45   ` Tom Lendacky
  2020-09-10 19:27     ` Krish Sadhukhan
  0 siblings, 1 reply; 9+ messages in thread
From: Tom Lendacky @ 2020-09-10 14:45 UTC (permalink / raw)
  To: Krish Sadhukhan, kvm; +Cc: pbonzini, jmattson

On 9/9/20 9:22 PM, Krish Sadhukhan wrote:
> Some AMD hardware platforms enforce cache coherency across encryption domains.
> Add this hardware feature as a CPUID feature to the kernel.
> 
> Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
> ---
>   arch/x86/include/asm/cpufeatures.h | 1 +
>   arch/x86/kernel/cpu/amd.c          | 3 +++
>   2 files changed, 4 insertions(+)
> 
> diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
> index 81335e6fe47d..0e5b27ee5931 100644
> --- a/arch/x86/include/asm/cpufeatures.h
> +++ b/arch/x86/include/asm/cpufeatures.h
> @@ -293,6 +293,7 @@
>   #define X86_FEATURE_FENCE_SWAPGS_USER	(11*32+ 4) /* "" LFENCE in user entry SWAPGS path */
>   #define X86_FEATURE_FENCE_SWAPGS_KERNEL	(11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */
>   #define X86_FEATURE_SPLIT_LOCK_DETECT	(11*32+ 6) /* #AC for split lock */
> +#define X86_FEATURE_HW_CACHE_COHERENCY (11*32+ 7) /* AMD hardware-enforced cache coherency */
>   
>   /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
>   #define X86_FEATURE_AVX512_BF16		(12*32+ 5) /* AVX512 BFLOAT16 instructions */
> diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
> index 4507ededb978..698884812989 100644
> --- a/arch/x86/kernel/cpu/amd.c
> +++ b/arch/x86/kernel/cpu/amd.c
> @@ -632,6 +632,9 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
>   		 */
>   		c->x86_phys_bits -= (cpuid_ebx(CPUID_AMD_SME) >> 6) & 0x3f;
>   
> +		if (cpuid_eax(CPUID_AMD_SME) & 0x400)
> +			set_cpu_cap(c, X86_FEATURE_HW_CACHE_COHERENCY);

Why not add this to arch/x86/kernel/cpu/scattered.c?

Thanks,
Tom

> +
>   		if (IS_ENABLED(CONFIG_X86_32))
>   			goto clear_all;
>   
> 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/3 v2] KVM: SVM: Add hardware-enforced cache coherency as a CPUID feature
  2020-09-10 14:45   ` Tom Lendacky
@ 2020-09-10 19:27     ` Krish Sadhukhan
  2020-09-10 20:40       ` Tom Lendacky
  0 siblings, 1 reply; 9+ messages in thread
From: Krish Sadhukhan @ 2020-09-10 19:27 UTC (permalink / raw)
  To: Tom Lendacky, kvm; +Cc: pbonzini, jmattson


On 9/10/20 7:45 AM, Tom Lendacky wrote:
> On 9/9/20 9:22 PM, Krish Sadhukhan wrote:
>> Some AMD hardware platforms enforce cache coherency across encryption 
>> domains.
>> Add this hardware feature as a CPUID feature to the kernel.
>>
>> Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
>> ---
>>   arch/x86/include/asm/cpufeatures.h | 1 +
>>   arch/x86/kernel/cpu/amd.c          | 3 +++
>>   2 files changed, 4 insertions(+)
>>
>> diff --git a/arch/x86/include/asm/cpufeatures.h 
>> b/arch/x86/include/asm/cpufeatures.h
>> index 81335e6fe47d..0e5b27ee5931 100644
>> --- a/arch/x86/include/asm/cpufeatures.h
>> +++ b/arch/x86/include/asm/cpufeatures.h
>> @@ -293,6 +293,7 @@
>>   #define X86_FEATURE_FENCE_SWAPGS_USER    (11*32+ 4) /* "" LFENCE in 
>> user entry SWAPGS path */
>>   #define X86_FEATURE_FENCE_SWAPGS_KERNEL    (11*32+ 5) /* "" LFENCE 
>> in kernel entry SWAPGS path */
>>   #define X86_FEATURE_SPLIT_LOCK_DETECT    (11*32+ 6) /* #AC for 
>> split lock */
>> +#define X86_FEATURE_HW_CACHE_COHERENCY (11*32+ 7) /* AMD 
>> hardware-enforced cache coherency */
>>     /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), 
>> word 12 */
>>   #define X86_FEATURE_AVX512_BF16        (12*32+ 5) /* AVX512 
>> BFLOAT16 instructions */
>> diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
>> index 4507ededb978..698884812989 100644
>> --- a/arch/x86/kernel/cpu/amd.c
>> +++ b/arch/x86/kernel/cpu/amd.c
>> @@ -632,6 +632,9 @@ static void early_detect_mem_encrypt(struct 
>> cpuinfo_x86 *c)
>>            */
>>           c->x86_phys_bits -= (cpuid_ebx(CPUID_AMD_SME) >> 6) & 0x3f;
>>   +        if (cpuid_eax(CPUID_AMD_SME) & 0x400)
>> +            set_cpu_cap(c, X86_FEATURE_HW_CACHE_COHERENCY);
>
> Why not add this to arch/x86/kernel/cpu/scattered.c?


The reason why I put it in amd.c is because it's AMD-specific, though I 
know we have SME and SEV in scattered.c. Shouldn't SME and SEV features 
be ideally placed in AMD-specific files and scattered.c be used for 
common CPUID features ?


>
> Thanks,
> Tom
>
>> +
>>           if (IS_ENABLED(CONFIG_X86_32))
>>               goto clear_all;
>>

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 3/3 v2] KVM: SVM: Don't flush cache of encrypted pages if hardware enforces cache coherenc
  2020-09-10 14:43   ` Tom Lendacky
@ 2020-09-10 19:29     ` Krish Sadhukhan
  0 siblings, 0 replies; 9+ messages in thread
From: Krish Sadhukhan @ 2020-09-10 19:29 UTC (permalink / raw)
  To: Tom Lendacky, kvm; +Cc: pbonzini, jmattson


On 9/10/20 7:43 AM, Tom Lendacky wrote:
> On 9/9/20 9:22 PM, Krish Sadhukhan wrote:
>> Some hardware implementations may enforce cache coherency across 
>> encryption
>> domains. In such cases, it's not required to flush encrypted pages off
>> cache lines.
>>
>> Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
>> ---
>>   arch/x86/kvm/svm/sev.c       | 3 ++-
>>   arch/x86/mm/pat/set_memory.c | 6 ++++--
>>   2 files changed, 6 insertions(+), 3 deletions(-)
>
> You should probably split this patch into two patches, one for the KVM 
> usage and one for the MM usage with appropriate subjects prefixes at 
> that point. Also, you need to then copy the proper people. Did you run 
> these patches through get_maintainer.pl?

I will split it into two patches and copy the relevant people and 
distribution lists.
>
>>
>> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
>> index 402dc4234e39..8aa2209f2637 100644
>> --- a/arch/x86/kvm/svm/sev.c
>> +++ b/arch/x86/kvm/svm/sev.c
>> @@ -384,7 +384,8 @@ static void sev_clflush_pages(struct page 
>> *pages[], unsigned long npages)
>>       uint8_t *page_virtual;
>>       unsigned long i;
>>   -    if (npages == 0 || pages == NULL)
>> +    if (this_cpu_has(X86_FEATURE_HW_CACHE_COHERENCY) || npages == 0 ||
>> +        pages == NULL)
>>           return;
>>         for (i = 0; i < npages; i++) {
>> diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
>> index d1b2a889f035..5e2c618cbe84 100644
>> --- a/arch/x86/mm/pat/set_memory.c
>> +++ b/arch/x86/mm/pat/set_memory.c
>> @@ -1999,7 +1999,8 @@ static int __set_memory_enc_dec(unsigned long 
>> addr, int numpages, bool enc)
>>       /*
>>        * Before changing the encryption attribute, we need to flush 
>> caches.
>>        */
>> -    cpa_flush(&cpa, 1);
>> +    if (!this_cpu_has(X86_FEATURE_HW_CACHE_COHERENCY))
>> +        cpa_flush(&cpa, 1);
>
> This bit is only about cache coherency, so the TLB flush is still 
> needed, so this should be something like:
>
>     cpa_flush(&cpa, !this_cpu_has(X86_FEATURE_HW_CACHE_COHERENCY));


Agreed. Will fix it.

>
>>         ret = __change_page_attr_set_clr(&cpa, 1);
>>   @@ -2010,7 +2011,8 @@ static int __set_memory_enc_dec(unsigned long 
>> addr, int numpages, bool enc)
>>        * flushing gets optimized in the cpa_flush() path use the same 
>> logic
>>        * as above.
>>        */
>> -    cpa_flush(&cpa, 0);
>> +    if (!this_cpu_has(X86_FEATURE_HW_CACHE_COHERENCY))
>> +        cpa_flush(&cpa, 0);
>
> This should not be changed, still need the call to do the TLB flush.
>
> Thanks,
> Tom
>
>>         return ret;
>>   }
>>

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/3 v2] KVM: SVM: Add hardware-enforced cache coherency as a CPUID feature
  2020-09-10 19:27     ` Krish Sadhukhan
@ 2020-09-10 20:40       ` Tom Lendacky
  0 siblings, 0 replies; 9+ messages in thread
From: Tom Lendacky @ 2020-09-10 20:40 UTC (permalink / raw)
  To: Krish Sadhukhan, kvm; +Cc: pbonzini, jmattson

On 9/10/20 2:27 PM, Krish Sadhukhan wrote:
> 
> On 9/10/20 7:45 AM, Tom Lendacky wrote:
>> On 9/9/20 9:22 PM, Krish Sadhukhan wrote:
>>> Some AMD hardware platforms enforce cache coherency across encryption 
>>> domains.
>>> Add this hardware feature as a CPUID feature to the kernel.
>>>
>>> Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
>>> ---
>>>   arch/x86/include/asm/cpufeatures.h | 1 +
>>>   arch/x86/kernel/cpu/amd.c          | 3 +++
>>>   2 files changed, 4 insertions(+)
>>>
>>> diff --git a/arch/x86/include/asm/cpufeatures.h 
>>> b/arch/x86/include/asm/cpufeatures.h
>>> index 81335e6fe47d..0e5b27ee5931 100644
>>> --- a/arch/x86/include/asm/cpufeatures.h
>>> +++ b/arch/x86/include/asm/cpufeatures.h
>>> @@ -293,6 +293,7 @@
>>>   #define X86_FEATURE_FENCE_SWAPGS_USER    (11*32+ 4) /* "" LFENCE in 
>>> user entry SWAPGS path */
>>>   #define X86_FEATURE_FENCE_SWAPGS_KERNEL    (11*32+ 5) /* "" LFENCE in 
>>> kernel entry SWAPGS path */
>>>   #define X86_FEATURE_SPLIT_LOCK_DETECT    (11*32+ 6) /* #AC for split 
>>> lock */
>>> +#define X86_FEATURE_HW_CACHE_COHERENCY (11*32+ 7) /* AMD 
>>> hardware-enforced cache coherency */
>>>     /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 
>>> 12 */
>>>   #define X86_FEATURE_AVX512_BF16        (12*32+ 5) /* AVX512 BFLOAT16 
>>> instructions */
>>> diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
>>> index 4507ededb978..698884812989 100644
>>> --- a/arch/x86/kernel/cpu/amd.c
>>> +++ b/arch/x86/kernel/cpu/amd.c
>>> @@ -632,6 +632,9 @@ static void early_detect_mem_encrypt(struct 
>>> cpuinfo_x86 *c)
>>>            */
>>>           c->x86_phys_bits -= (cpuid_ebx(CPUID_AMD_SME) >> 6) & 0x3f;
>>>   +        if (cpuid_eax(CPUID_AMD_SME) & 0x400)
>>> +            set_cpu_cap(c, X86_FEATURE_HW_CACHE_COHERENCY);
>>
>> Why not add this to arch/x86/kernel/cpu/scattered.c?
> 
> 
> The reason why I put it in amd.c is because it's AMD-specific, though I 
> know we have SME and SEV in scattered.c. Shouldn't SME and SEV features be 
> ideally placed in AMD-specific files and scattered.c be used for common 
> CPUID features ?

No, it is perfectly fine to put them in scattered.c.

Thanks,
Tom

> 
> 
>>
>> Thanks,
>> Tom
>>
>>> +
>>>           if (IS_ENABLED(CONFIG_X86_32))
>>>               goto clear_all;
>>>

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2020-09-10 21:01 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-09-10  2:22 [PATCH 0/3 v2] KVM: SVM: Don't flush cache of encrypted pages if hardware enforces cache coherency Krish Sadhukhan
2020-09-10  2:22 ` [PATCH 1/3 v2] KVM: SVM: Replace numeric value for SME CPUID leaf with a #define Krish Sadhukhan
2020-09-10  2:22 ` [PATCH 2/3 v2] KVM: SVM: Add hardware-enforced cache coherency as a CPUID feature Krish Sadhukhan
2020-09-10 14:45   ` Tom Lendacky
2020-09-10 19:27     ` Krish Sadhukhan
2020-09-10 20:40       ` Tom Lendacky
2020-09-10  2:22 ` [PATCH 3/3 v2] KVM: SVM: Don't flush cache of encrypted pages if hardware enforces cache coherenc Krish Sadhukhan
2020-09-10 14:43   ` Tom Lendacky
2020-09-10 19:29     ` Krish Sadhukhan

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).