All of lore.kernel.org
 help / color / mirror / Atom feed
From: Borislav Petkov <bp@alien8.de>
To: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Tom Lendacky <thomas.lendacky@amd.com>,
	linuxppc-dev@lists.ozlabs.org, linux-s390@vger.kernel.org,
	linux-kernel@vger.kernel.org, x86@kernel.org,
	iommu@lists.linux-foundation.org, kvm@vger.kernel.org,
	linux-efi@vger.kernel.org, platform-driver-x86@vger.kernel.org,
	linux-graphics-maintainer@vmware.com,
	amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org,
	kexec@lists.infradead.org, linux-fsdevel@vger.kernel.org,
	Brijesh Singh <brijesh.singh@amd.com>,
	Joerg Roedel <joro@8bytes.org>, Andi Kleen <ak@linux.intel.com>,
	Sathyanarayanan Kuppuswamy 
	<sathyanarayanan.kuppuswamy@linux.intel.com>,
	Tianyu Lan <Tianyu.Lan@microsoft.com>,
	Christoph Hellwig <hch@infradead.org>,
	Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>,
	Dave Hansen <dave.hansen@linux.intel.com>,
	Andy Lutomirski <luto@kernel.org>,
	Peter Zijlstra <peterz@infradead.org>,
	Will Deacon <will@kernel.org>
Subject: Re: [PATCH v3 5/8] x86/sme: Replace occurrences of sme_active() with cc_platform_has()
Date: Thu, 23 Sep 2021 20:21:03 +0200	[thread overview]
Message-ID: <YUzFj+yH79XRc3F3@zn.tnic> (raw)
In-Reply-To: <20210922210558.itofvu3725dap5xx@box.shutemov.name>

On Thu, Sep 23, 2021 at 12:05:58AM +0300, Kirill A. Shutemov wrote:
> Unless we find other way to guarantee RIP-relative access, we must use
> fixup_pointer() to access any global variables.

Yah, I've asked compiler folks about any guarantees we have wrt
rip-relative addresses but it doesn't look good. Worst case, we'd have
to do the fixup_pointer() thing.

In the meantime, Tom and I did some more poking at this and here's a
diff ontop.

The direction being that we'll stick both the AMD and Intel
*cc_platform_has() call into cc_platform.c for which instrumentation
will be disabled so no issues with that.

And that will keep all that querying all together in a single file.

---
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index a73712b6ee0e..2d4f5c17d79c 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -51,7 +51,6 @@ void __init mem_encrypt_free_decrypted_mem(void);
 void __init mem_encrypt_init(void);
 
 void __init sev_es_init_vc_handling(void);
-bool amd_cc_platform_has(enum cc_attr attr);
 
 #define __bss_decrypted __section(".bss..decrypted")
 
@@ -74,7 +73,6 @@ static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
 static inline void __init sme_enable(struct boot_params *bp) { }
 
 static inline void sev_es_init_vc_handling(void) { }
-static inline bool amd_cc_platform_has(enum cc_attr attr) { return false; }
 
 static inline int __init
 early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0; }
@@ -103,12 +101,6 @@ static inline u64 sme_get_me_mask(void)
 	return sme_me_mask;
 }
 
-#if defined(CONFIG_CPU_SUP_INTEL) && defined(CONFIG_ARCH_HAS_CC_PLATFORM)
-bool intel_cc_platform_has(enum cc_attr attr);
-#else
-static inline bool intel_cc_platform_has(enum cc_attr attr) { return false; }
-#endif
-
 #endif	/* __ASSEMBLY__ */
 
 #endif	/* __X86_MEM_ENCRYPT_H__ */
diff --git a/arch/x86/kernel/cc_platform.c b/arch/x86/kernel/cc_platform.c
index da54a1805211..97ede7052f77 100644
--- a/arch/x86/kernel/cc_platform.c
+++ b/arch/x86/kernel/cc_platform.c
@@ -13,6 +13,52 @@
 
 #include <asm/processor.h>
 
+static bool intel_cc_platform_has(enum cc_attr attr)
+{
+#ifdef CONFIG_INTEL_TDX_GUEST
+	return false;
+#else
+	return false;
+#endif
+}
+
+/*
+ * SME and SEV are very similar but they are not the same, so there are
+ * times that the kernel will need to distinguish between SME and SEV. The
+ * cc_platform_has() function is used for this.  When a distinction isn't
+ * needed, the CC_ATTR_MEM_ENCRYPT attribute can be used.
+ *
+ * The trampoline code is a good example for this requirement.  Before
+ * paging is activated, SME will access all memory as decrypted, but SEV
+ * will access all memory as encrypted.  So, when APs are being brought
+ * up under SME the trampoline area cannot be encrypted, whereas under SEV
+ * the trampoline area must be encrypted.
+ */
+static bool amd_cc_platform_has(enum cc_attr attr)
+{
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+	switch (attr) {
+	case CC_ATTR_MEM_ENCRYPT:
+		return sme_me_mask;
+
+	case CC_ATTR_HOST_MEM_ENCRYPT:
+		return sme_me_mask && !(sev_status & MSR_AMD64_SEV_ENABLED);
+
+	case CC_ATTR_GUEST_MEM_ENCRYPT:
+		return sev_status & MSR_AMD64_SEV_ENABLED;
+
+	case CC_ATTR_GUEST_STATE_ENCRYPT:
+		return sev_status & MSR_AMD64_SEV_ES_ENABLED;
+
+	default:
+		return false;
+	}
+#else
+	return false;
+#endif
+}
+
+
 bool cc_platform_has(enum cc_attr attr)
 {
 	if (sme_me_mask)
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 53756ff12295..8321c43554a1 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -60,13 +60,6 @@ static u64 msr_test_ctrl_cache __ro_after_init;
  */
 static bool cpu_model_supports_sld __ro_after_init;
 
-#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
-bool intel_cc_platform_has(enum cc_attr attr)
-{
-	return false;
-}
-#endif
-
 /*
  * Processors which have self-snooping capability can handle conflicting
  * memory type across CPUs by snooping its own cache. However, there exists
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 9417d404ea92..23d54b810f08 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -361,38 +361,6 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
 	return early_set_memory_enc_dec(vaddr, size, true);
 }
 
-/*
- * SME and SEV are very similar but they are not the same, so there are
- * times that the kernel will need to distinguish between SME and SEV. The
- * cc_platform_has() function is used for this.  When a distinction isn't
- * needed, the CC_ATTR_MEM_ENCRYPT attribute can be used.
- *
- * The trampoline code is a good example for this requirement.  Before
- * paging is activated, SME will access all memory as decrypted, but SEV
- * will access all memory as encrypted.  So, when APs are being brought
- * up under SME the trampoline area cannot be encrypted, whereas under SEV
- * the trampoline area must be encrypted.
- */
-bool amd_cc_platform_has(enum cc_attr attr)
-{
-	switch (attr) {
-	case CC_ATTR_MEM_ENCRYPT:
-		return sme_me_mask;
-
-	case CC_ATTR_HOST_MEM_ENCRYPT:
-		return sme_me_mask && !(sev_status & MSR_AMD64_SEV_ENABLED);
-
-	case CC_ATTR_GUEST_MEM_ENCRYPT:
-		return sev_status & MSR_AMD64_SEV_ENABLED;
-
-	case CC_ATTR_GUEST_STATE_ENCRYPT:
-		return sev_status & MSR_AMD64_SEV_ES_ENABLED;
-
-	default:
-		return false;
-	}
-}
-
 /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
 bool force_dma_unencrypted(struct device *dev)
 {

-- 
Regards/Gruss,
    Boris.

https://people.kernel.org/tglx/notes-about-netiquette

WARNING: multiple messages have this Message-ID (diff)
From: Borislav Petkov <bp@alien8.de>
To: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: linux-efi@vger.kernel.org, Brijesh Singh <brijesh.singh@amd.com>,
	kvm@vger.kernel.org, Peter Zijlstra <peterz@infradead.org>,
	Dave Hansen <dave.hansen@linux.intel.com>,
	dri-devel@lists.freedesktop.org,
	platform-driver-x86@vger.kernel.org,
	Will Deacon <will@kernel.org>,
	linux-s390@vger.kernel.org, Andi Kleen <ak@linux.intel.com>,
	x86@kernel.org, amd-gfx@lists.freedesktop.org,
	Christoph Hellwig <hch@infradead.org>,
	Ingo Molnar <mingo@redhat.com>,
	linux-graphics-maintainer@vmware.com,
	Tom Lendacky <thomas.lendacky@amd.com>,
	Tianyu Lan <Tianyu.Lan@microsoft.com>,
	Andy Lutomirski <luto@kernel.org>,
	Thomas Gleixner <tglx@linutronix.de>,
	kexec@lists.infradead.org, linux-kernel@vger.kernel.org,
	iommu@lists.linux-foundation.org, linux-fsdevel@vger.kernel.org,
	linuxppc-dev@lists.ozlabs.org
Subject: Re: [PATCH v3 5/8] x86/sme: Replace occurrences of sme_active() with cc_platform_has()
Date: Thu, 23 Sep 2021 20:21:03 +0200	[thread overview]
Message-ID: <YUzFj+yH79XRc3F3@zn.tnic> (raw)
In-Reply-To: <20210922210558.itofvu3725dap5xx@box.shutemov.name>

On Thu, Sep 23, 2021 at 12:05:58AM +0300, Kirill A. Shutemov wrote:
> Unless we find other way to guarantee RIP-relative access, we must use
> fixup_pointer() to access any global variables.

Yah, I've asked compiler folks about any guarantees we have wrt
rip-relative addresses but it doesn't look good. Worst case, we'd have
to do the fixup_pointer() thing.

In the meantime, Tom and I did some more poking at this and here's a
diff ontop.

The direction being that we'll stick both the AMD and Intel
*cc_platform_has() call into cc_platform.c for which instrumentation
will be disabled so no issues with that.

And that will keep all that querying all together in a single file.

---
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index a73712b6ee0e..2d4f5c17d79c 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -51,7 +51,6 @@ void __init mem_encrypt_free_decrypted_mem(void);
 void __init mem_encrypt_init(void);
 
 void __init sev_es_init_vc_handling(void);
-bool amd_cc_platform_has(enum cc_attr attr);
 
 #define __bss_decrypted __section(".bss..decrypted")
 
@@ -74,7 +73,6 @@ static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
 static inline void __init sme_enable(struct boot_params *bp) { }
 
 static inline void sev_es_init_vc_handling(void) { }
-static inline bool amd_cc_platform_has(enum cc_attr attr) { return false; }
 
 static inline int __init
 early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0; }
@@ -103,12 +101,6 @@ static inline u64 sme_get_me_mask(void)
 	return sme_me_mask;
 }
 
-#if defined(CONFIG_CPU_SUP_INTEL) && defined(CONFIG_ARCH_HAS_CC_PLATFORM)
-bool intel_cc_platform_has(enum cc_attr attr);
-#else
-static inline bool intel_cc_platform_has(enum cc_attr attr) { return false; }
-#endif
-
 #endif	/* __ASSEMBLY__ */
 
 #endif	/* __X86_MEM_ENCRYPT_H__ */
diff --git a/arch/x86/kernel/cc_platform.c b/arch/x86/kernel/cc_platform.c
index da54a1805211..97ede7052f77 100644
--- a/arch/x86/kernel/cc_platform.c
+++ b/arch/x86/kernel/cc_platform.c
@@ -13,6 +13,52 @@
 
 #include <asm/processor.h>
 
+static bool intel_cc_platform_has(enum cc_attr attr)
+{
+#ifdef CONFIG_INTEL_TDX_GUEST
+	return false;
+#else
+	return false;
+#endif
+}
+
+/*
+ * SME and SEV are very similar but they are not the same, so there are
+ * times that the kernel will need to distinguish between SME and SEV. The
+ * cc_platform_has() function is used for this.  When a distinction isn't
+ * needed, the CC_ATTR_MEM_ENCRYPT attribute can be used.
+ *
+ * The trampoline code is a good example for this requirement.  Before
+ * paging is activated, SME will access all memory as decrypted, but SEV
+ * will access all memory as encrypted.  So, when APs are being brought
+ * up under SME the trampoline area cannot be encrypted, whereas under SEV
+ * the trampoline area must be encrypted.
+ */
+static bool amd_cc_platform_has(enum cc_attr attr)
+{
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+	switch (attr) {
+	case CC_ATTR_MEM_ENCRYPT:
+		return sme_me_mask;
+
+	case CC_ATTR_HOST_MEM_ENCRYPT:
+		return sme_me_mask && !(sev_status & MSR_AMD64_SEV_ENABLED);
+
+	case CC_ATTR_GUEST_MEM_ENCRYPT:
+		return sev_status & MSR_AMD64_SEV_ENABLED;
+
+	case CC_ATTR_GUEST_STATE_ENCRYPT:
+		return sev_status & MSR_AMD64_SEV_ES_ENABLED;
+
+	default:
+		return false;
+	}
+#else
+	return false;
+#endif
+}
+
+
 bool cc_platform_has(enum cc_attr attr)
 {
 	if (sme_me_mask)
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 53756ff12295..8321c43554a1 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -60,13 +60,6 @@ static u64 msr_test_ctrl_cache __ro_after_init;
  */
 static bool cpu_model_supports_sld __ro_after_init;
 
-#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
-bool intel_cc_platform_has(enum cc_attr attr)
-{
-	return false;
-}
-#endif
-
 /*
  * Processors which have self-snooping capability can handle conflicting
  * memory type across CPUs by snooping its own cache. However, there exists
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 9417d404ea92..23d54b810f08 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -361,38 +361,6 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
 	return early_set_memory_enc_dec(vaddr, size, true);
 }
 
-/*
- * SME and SEV are very similar but they are not the same, so there are
- * times that the kernel will need to distinguish between SME and SEV. The
- * cc_platform_has() function is used for this.  When a distinction isn't
- * needed, the CC_ATTR_MEM_ENCRYPT attribute can be used.
- *
- * The trampoline code is a good example for this requirement.  Before
- * paging is activated, SME will access all memory as decrypted, but SEV
- * will access all memory as encrypted.  So, when APs are being brought
- * up under SME the trampoline area cannot be encrypted, whereas under SEV
- * the trampoline area must be encrypted.
- */
-bool amd_cc_platform_has(enum cc_attr attr)
-{
-	switch (attr) {
-	case CC_ATTR_MEM_ENCRYPT:
-		return sme_me_mask;
-
-	case CC_ATTR_HOST_MEM_ENCRYPT:
-		return sme_me_mask && !(sev_status & MSR_AMD64_SEV_ENABLED);
-
-	case CC_ATTR_GUEST_MEM_ENCRYPT:
-		return sev_status & MSR_AMD64_SEV_ENABLED;
-
-	case CC_ATTR_GUEST_STATE_ENCRYPT:
-		return sev_status & MSR_AMD64_SEV_ES_ENABLED;
-
-	default:
-		return false;
-	}
-}
-
 /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
 bool force_dma_unencrypted(struct device *dev)
 {

-- 
Regards/Gruss,
    Boris.

https://people.kernel.org/tglx/notes-about-netiquette
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

WARNING: multiple messages have this Message-ID (diff)
From: Borislav Petkov <bp@alien8.de>
To: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Sathyanarayanan Kuppuswamy
	<sathyanarayanan.kuppuswamy@linux.intel.com>,
	linux-efi@vger.kernel.org, Brijesh Singh <brijesh.singh@amd.com>,
	kvm@vger.kernel.org, Peter Zijlstra <peterz@infradead.org>,
	Dave Hansen <dave.hansen@linux.intel.com>,
	dri-devel@lists.freedesktop.org,
	platform-driver-x86@vger.kernel.org,
	Will Deacon <will@kernel.org>,
	linux-s390@vger.kernel.org, Andi Kleen <ak@linux.intel.com>,
	Joerg Roedel <joro@8bytes.org>,
	x86@kernel.org, amd-gfx@lists.freedesktop.org,
	Christoph Hellwig <hch@infradead.org>,
	Ingo Molnar <mingo@redhat.com>,
	linux-graphics-maintainer@vmware.com,
	Tom Lendacky <thomas.lendacky@amd.com>,
	Tianyu Lan <Tianyu.Lan@microsoft.com>,
	Andy Lutomirski <luto@kernel.org>,
	Thomas Gleixner <tglx@linutronix.de>,
	kexec@lists.infradead.org, linux-kernel@vger.kernel.org,
	iommu@lists.linux-foundation.org, linux-fsdevel@vger.kernel.org,
	linuxppc-dev@lists.ozlabs.org
Subject: Re: [PATCH v3 5/8] x86/sme: Replace occurrences of sme_active() with cc_platform_has()
Date: Thu, 23 Sep 2021 20:21:03 +0200	[thread overview]
Message-ID: <YUzFj+yH79XRc3F3@zn.tnic> (raw)
In-Reply-To: <20210922210558.itofvu3725dap5xx@box.shutemov.name>

On Thu, Sep 23, 2021 at 12:05:58AM +0300, Kirill A. Shutemov wrote:
> Unless we find other way to guarantee RIP-relative access, we must use
> fixup_pointer() to access any global variables.

Yah, I've asked compiler folks about any guarantees we have wrt
rip-relative addresses but it doesn't look good. Worst case, we'd have
to do the fixup_pointer() thing.

In the meantime, Tom and I did some more poking at this and here's a
diff ontop.

The direction being that we'll stick both the AMD and Intel
*cc_platform_has() call into cc_platform.c for which instrumentation
will be disabled so no issues with that.

And that will keep all that querying all together in a single file.

---
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index a73712b6ee0e..2d4f5c17d79c 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -51,7 +51,6 @@ void __init mem_encrypt_free_decrypted_mem(void);
 void __init mem_encrypt_init(void);
 
 void __init sev_es_init_vc_handling(void);
-bool amd_cc_platform_has(enum cc_attr attr);
 
 #define __bss_decrypted __section(".bss..decrypted")
 
@@ -74,7 +73,6 @@ static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
 static inline void __init sme_enable(struct boot_params *bp) { }
 
 static inline void sev_es_init_vc_handling(void) { }
-static inline bool amd_cc_platform_has(enum cc_attr attr) { return false; }
 
 static inline int __init
 early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0; }
@@ -103,12 +101,6 @@ static inline u64 sme_get_me_mask(void)
 	return sme_me_mask;
 }
 
-#if defined(CONFIG_CPU_SUP_INTEL) && defined(CONFIG_ARCH_HAS_CC_PLATFORM)
-bool intel_cc_platform_has(enum cc_attr attr);
-#else
-static inline bool intel_cc_platform_has(enum cc_attr attr) { return false; }
-#endif
-
 #endif	/* __ASSEMBLY__ */
 
 #endif	/* __X86_MEM_ENCRYPT_H__ */
diff --git a/arch/x86/kernel/cc_platform.c b/arch/x86/kernel/cc_platform.c
index da54a1805211..97ede7052f77 100644
--- a/arch/x86/kernel/cc_platform.c
+++ b/arch/x86/kernel/cc_platform.c
@@ -13,6 +13,52 @@
 
 #include <asm/processor.h>
 
+static bool intel_cc_platform_has(enum cc_attr attr)
+{
+#ifdef CONFIG_INTEL_TDX_GUEST
+	return false;
+#else
+	return false;
+#endif
+}
+
+/*
+ * SME and SEV are very similar but they are not the same, so there are
+ * times that the kernel will need to distinguish between SME and SEV. The
+ * cc_platform_has() function is used for this.  When a distinction isn't
+ * needed, the CC_ATTR_MEM_ENCRYPT attribute can be used.
+ *
+ * The trampoline code is a good example for this requirement.  Before
+ * paging is activated, SME will access all memory as decrypted, but SEV
+ * will access all memory as encrypted.  So, when APs are being brought
+ * up under SME the trampoline area cannot be encrypted, whereas under SEV
+ * the trampoline area must be encrypted.
+ */
+static bool amd_cc_platform_has(enum cc_attr attr)
+{
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+	switch (attr) {
+	case CC_ATTR_MEM_ENCRYPT:
+		return sme_me_mask;
+
+	case CC_ATTR_HOST_MEM_ENCRYPT:
+		return sme_me_mask && !(sev_status & MSR_AMD64_SEV_ENABLED);
+
+	case CC_ATTR_GUEST_MEM_ENCRYPT:
+		return sev_status & MSR_AMD64_SEV_ENABLED;
+
+	case CC_ATTR_GUEST_STATE_ENCRYPT:
+		return sev_status & MSR_AMD64_SEV_ES_ENABLED;
+
+	default:
+		return false;
+	}
+#else
+	return false;
+#endif
+}
+
+
 bool cc_platform_has(enum cc_attr attr)
 {
 	if (sme_me_mask)
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 53756ff12295..8321c43554a1 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -60,13 +60,6 @@ static u64 msr_test_ctrl_cache __ro_after_init;
  */
 static bool cpu_model_supports_sld __ro_after_init;
 
-#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
-bool intel_cc_platform_has(enum cc_attr attr)
-{
-	return false;
-}
-#endif
-
 /*
  * Processors which have self-snooping capability can handle conflicting
  * memory type across CPUs by snooping its own cache. However, there exists
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 9417d404ea92..23d54b810f08 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -361,38 +361,6 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
 	return early_set_memory_enc_dec(vaddr, size, true);
 }
 
-/*
- * SME and SEV are very similar but they are not the same, so there are
- * times that the kernel will need to distinguish between SME and SEV. The
- * cc_platform_has() function is used for this.  When a distinction isn't
- * needed, the CC_ATTR_MEM_ENCRYPT attribute can be used.
- *
- * The trampoline code is a good example for this requirement.  Before
- * paging is activated, SME will access all memory as decrypted, but SEV
- * will access all memory as encrypted.  So, when APs are being brought
- * up under SME the trampoline area cannot be encrypted, whereas under SEV
- * the trampoline area must be encrypted.
- */
-bool amd_cc_platform_has(enum cc_attr attr)
-{
-	switch (attr) {
-	case CC_ATTR_MEM_ENCRYPT:
-		return sme_me_mask;
-
-	case CC_ATTR_HOST_MEM_ENCRYPT:
-		return sme_me_mask && !(sev_status & MSR_AMD64_SEV_ENABLED);
-
-	case CC_ATTR_GUEST_MEM_ENCRYPT:
-		return sev_status & MSR_AMD64_SEV_ENABLED;
-
-	case CC_ATTR_GUEST_STATE_ENCRYPT:
-		return sev_status & MSR_AMD64_SEV_ES_ENABLED;
-
-	default:
-		return false;
-	}
-}
-
 /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
 bool force_dma_unencrypted(struct device *dev)
 {

-- 
Regards/Gruss,
    Boris.

https://people.kernel.org/tglx/notes-about-netiquette

WARNING: multiple messages have this Message-ID (diff)
From: Borislav Petkov <bp@alien8.de>
To: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Tom Lendacky <thomas.lendacky@amd.com>,
	linuxppc-dev@lists.ozlabs.org, linux-s390@vger.kernel.org,
	linux-kernel@vger.kernel.org, x86@kernel.org,
	iommu@lists.linux-foundation.org, kvm@vger.kernel.org,
	linux-efi@vger.kernel.org, platform-driver-x86@vger.kernel.org,
	linux-graphics-maintainer@vmware.com,
	amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org,
	kexec@lists.infradead.org, linux-fsdevel@vger.kernel.org,
	Brijesh Singh <brijesh.singh@amd.com>,
	Joerg Roedel <joro@8bytes.org>, Andi Kleen <ak@linux.intel.com>,
	Sathyanarayanan Kuppuswamy
	<sathyanarayanan.kuppuswamy@linux.intel.com>,
	Tianyu Lan <Tianyu.Lan@microsoft.com>,
	Christoph Hellwig <hch@infradead.org>,
	Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>,
	Dave Hansen <dave.hansen@linux.intel.com>,
	Andy Lutomirski <luto@kernel.org>,
	Peter Zijlstra <peterz@infradead.org>,
	Will Deacon <will@kernel.org>
Subject: Re: [PATCH v3 5/8] x86/sme: Replace occurrences of sme_active() with cc_platform_has()
Date: Thu, 23 Sep 2021 20:21:03 +0200	[thread overview]
Message-ID: <YUzFj+yH79XRc3F3@zn.tnic> (raw)
In-Reply-To: <20210922210558.itofvu3725dap5xx@box.shutemov.name>

On Thu, Sep 23, 2021 at 12:05:58AM +0300, Kirill A. Shutemov wrote:
> Unless we find other way to guarantee RIP-relative access, we must use
> fixup_pointer() to access any global variables.

Yah, I've asked compiler folks about any guarantees we have wrt
rip-relative addresses but it doesn't look good. Worst case, we'd have
to do the fixup_pointer() thing.

In the meantime, Tom and I did some more poking at this and here's a
diff ontop.

The direction being that we'll stick both the AMD and Intel
*cc_platform_has() call into cc_platform.c for which instrumentation
will be disabled so no issues with that.

And that will keep all that querying all together in a single file.

---
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index a73712b6ee0e..2d4f5c17d79c 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -51,7 +51,6 @@ void __init mem_encrypt_free_decrypted_mem(void);
 void __init mem_encrypt_init(void);
 
 void __init sev_es_init_vc_handling(void);
-bool amd_cc_platform_has(enum cc_attr attr);
 
 #define __bss_decrypted __section(".bss..decrypted")
 
@@ -74,7 +73,6 @@ static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
 static inline void __init sme_enable(struct boot_params *bp) { }
 
 static inline void sev_es_init_vc_handling(void) { }
-static inline bool amd_cc_platform_has(enum cc_attr attr) { return false; }
 
 static inline int __init
 early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0; }
@@ -103,12 +101,6 @@ static inline u64 sme_get_me_mask(void)
 	return sme_me_mask;
 }
 
-#if defined(CONFIG_CPU_SUP_INTEL) && defined(CONFIG_ARCH_HAS_CC_PLATFORM)
-bool intel_cc_platform_has(enum cc_attr attr);
-#else
-static inline bool intel_cc_platform_has(enum cc_attr attr) { return false; }
-#endif
-
 #endif	/* __ASSEMBLY__ */
 
 #endif	/* __X86_MEM_ENCRYPT_H__ */
diff --git a/arch/x86/kernel/cc_platform.c b/arch/x86/kernel/cc_platform.c
index da54a1805211..97ede7052f77 100644
--- a/arch/x86/kernel/cc_platform.c
+++ b/arch/x86/kernel/cc_platform.c
@@ -13,6 +13,52 @@
 
 #include <asm/processor.h>
 
+static bool intel_cc_platform_has(enum cc_attr attr)
+{
+#ifdef CONFIG_INTEL_TDX_GUEST
+	return false;
+#else
+	return false;
+#endif
+}
+
+/*
+ * SME and SEV are very similar but they are not the same, so there are
+ * times that the kernel will need to distinguish between SME and SEV. The
+ * cc_platform_has() function is used for this.  When a distinction isn't
+ * needed, the CC_ATTR_MEM_ENCRYPT attribute can be used.
+ *
+ * The trampoline code is a good example for this requirement.  Before
+ * paging is activated, SME will access all memory as decrypted, but SEV
+ * will access all memory as encrypted.  So, when APs are being brought
+ * up under SME the trampoline area cannot be encrypted, whereas under SEV
+ * the trampoline area must be encrypted.
+ */
+static bool amd_cc_platform_has(enum cc_attr attr)
+{
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+	switch (attr) {
+	case CC_ATTR_MEM_ENCRYPT:
+		return sme_me_mask;
+
+	case CC_ATTR_HOST_MEM_ENCRYPT:
+		return sme_me_mask && !(sev_status & MSR_AMD64_SEV_ENABLED);
+
+	case CC_ATTR_GUEST_MEM_ENCRYPT:
+		return sev_status & MSR_AMD64_SEV_ENABLED;
+
+	case CC_ATTR_GUEST_STATE_ENCRYPT:
+		return sev_status & MSR_AMD64_SEV_ES_ENABLED;
+
+	default:
+		return false;
+	}
+#else
+	return false;
+#endif
+}
+
+
 bool cc_platform_has(enum cc_attr attr)
 {
 	if (sme_me_mask)
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 53756ff12295..8321c43554a1 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -60,13 +60,6 @@ static u64 msr_test_ctrl_cache __ro_after_init;
  */
 static bool cpu_model_supports_sld __ro_after_init;
 
-#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
-bool intel_cc_platform_has(enum cc_attr attr)
-{
-	return false;
-}
-#endif
-
 /*
  * Processors which have self-snooping capability can handle conflicting
  * memory type across CPUs by snooping its own cache. However, there exists
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 9417d404ea92..23d54b810f08 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -361,38 +361,6 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
 	return early_set_memory_enc_dec(vaddr, size, true);
 }
 
-/*
- * SME and SEV are very similar but they are not the same, so there are
- * times that the kernel will need to distinguish between SME and SEV. The
- * cc_platform_has() function is used for this.  When a distinction isn't
- * needed, the CC_ATTR_MEM_ENCRYPT attribute can be used.
- *
- * The trampoline code is a good example for this requirement.  Before
- * paging is activated, SME will access all memory as decrypted, but SEV
- * will access all memory as encrypted.  So, when APs are being brought
- * up under SME the trampoline area cannot be encrypted, whereas under SEV
- * the trampoline area must be encrypted.
- */
-bool amd_cc_platform_has(enum cc_attr attr)
-{
-	switch (attr) {
-	case CC_ATTR_MEM_ENCRYPT:
-		return sme_me_mask;
-
-	case CC_ATTR_HOST_MEM_ENCRYPT:
-		return sme_me_mask && !(sev_status & MSR_AMD64_SEV_ENABLED);
-
-	case CC_ATTR_GUEST_MEM_ENCRYPT:
-		return sev_status & MSR_AMD64_SEV_ENABLED;
-
-	case CC_ATTR_GUEST_STATE_ENCRYPT:
-		return sev_status & MSR_AMD64_SEV_ES_ENABLED;
-
-	default:
-		return false;
-	}
-}
-
 /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
 bool force_dma_unencrypted(struct device *dev)
 {

-- 
Regards/Gruss,
    Boris.

https://people.kernel.org/tglx/notes-about-netiquette

_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

  reply	other threads:[~2021-09-23 18:21 UTC|newest]

Thread overview: 233+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-08 22:58 [PATCH v3 0/8] Implement generic cc_platform_has() helper function Tom Lendacky
2021-09-08 22:58 ` Tom Lendacky
2021-09-08 22:58 ` Tom Lendacky
2021-09-08 22:58 ` Tom Lendacky
2021-09-08 22:58 ` Tom Lendacky via iommu
2021-09-08 22:58 ` [PATCH v3 1/8] x86/ioremap: Selectively build arch override encryption functions Tom Lendacky
2021-09-08 22:58   ` Tom Lendacky
2021-09-08 22:58   ` Tom Lendacky
2021-09-08 22:58   ` Tom Lendacky
2021-09-08 22:58   ` Tom Lendacky via iommu
2021-09-08 22:58 ` [PATCH v3 2/8] mm: Introduce a function to check for confidential computing features Tom Lendacky
2021-09-08 22:58   ` Tom Lendacky
2021-09-08 22:58   ` Tom Lendacky
2021-09-08 22:58   ` Tom Lendacky
2021-09-08 22:58   ` Tom Lendacky via iommu
2021-09-09  7:35   ` Christophe Leroy
2021-09-09  7:35     ` Christophe Leroy
2021-09-09  7:35     ` Christophe Leroy
2021-09-09  7:35     ` Christophe Leroy
2021-09-10 15:02   ` Borislav Petkov
2021-09-10 15:02     ` Borislav Petkov
2021-09-10 15:02     ` Borislav Petkov
2021-09-10 15:02     ` Borislav Petkov
2021-09-10 15:02     ` Borislav Petkov
2021-09-08 22:58 ` [PATCH v3 3/8] x86/sev: Add an x86 version of cc_platform_has() Tom Lendacky
2021-09-08 22:58   ` Tom Lendacky
2021-09-08 22:58   ` Tom Lendacky
2021-09-08 22:58   ` Tom Lendacky
2021-09-08 22:58   ` Tom Lendacky via iommu
2021-09-11 10:10   ` Borislav Petkov
2021-09-11 10:10     ` Borislav Petkov
2021-09-11 10:10     ` Borislav Petkov
2021-09-11 10:10     ` Borislav Petkov
2021-09-11 10:10     ` Borislav Petkov
2021-09-08 22:58 ` [PATCH v3 4/8] powerpc/pseries/svm: Add a powerpc " Tom Lendacky
2021-09-08 22:58   ` Tom Lendacky
2021-09-08 22:58   ` Tom Lendacky
2021-09-08 22:58   ` Tom Lendacky
2021-09-08 22:58   ` Tom Lendacky via iommu
2021-09-09  7:40   ` Christophe Leroy
2021-09-09  7:40     ` Christophe Leroy
2021-09-09  7:40     ` Christophe Leroy
2021-09-09  7:40     ` Christophe Leroy
2021-09-14 11:58   ` Borislav Petkov
2021-09-14 11:58     ` Borislav Petkov
2021-09-14 11:58     ` Borislav Petkov
2021-09-14 11:58     ` Borislav Petkov
2021-09-14 11:58     ` Borislav Petkov
2021-09-14 14:47     ` Christophe Leroy
2021-09-14 14:47       ` Christophe Leroy
2021-09-14 14:47       ` Christophe Leroy
2021-09-14 14:47       ` Christophe Leroy
2021-09-14 14:47       ` Christophe Leroy
2021-09-14 14:56       ` Borislav Petkov
2021-09-14 14:56         ` Borislav Petkov
2021-09-14 14:56         ` Borislav Petkov
2021-09-14 14:56         ` Borislav Petkov
2021-09-14 14:56         ` Borislav Petkov
2021-09-15  0:28     ` Michael Ellerman
2021-09-15  0:28       ` Michael Ellerman
2021-09-15  0:28       ` Michael Ellerman
2021-09-15  0:28       ` Michael Ellerman
2021-09-15  0:28       ` Michael Ellerman
2021-09-15 10:08       ` Borislav Petkov
2021-09-15 10:08         ` Borislav Petkov
2021-09-15 10:08         ` Borislav Petkov
2021-09-15 10:08         ` Borislav Petkov
2021-09-15 10:08         ` Borislav Petkov
2021-09-15 17:18         ` Christophe Leroy
2021-09-15 17:18           ` Christophe Leroy
2021-09-15 17:18           ` Christophe Leroy
2021-09-15 17:18           ` Christophe Leroy
2021-09-15 17:18           ` Christophe Leroy
2021-09-15 18:47           ` Borislav Petkov
2021-09-15 18:47             ` Borislav Petkov
2021-09-15 18:47             ` Borislav Petkov
2021-09-15 18:47             ` Borislav Petkov
2021-09-15 18:47             ` Borislav Petkov
2021-09-16  7:35           ` Christoph Hellwig
2021-09-16  7:35             ` Christoph Hellwig
2021-09-16  7:35             ` Christoph Hellwig
2021-09-16  7:35             ` Christoph Hellwig
2021-09-16  7:35             ` Christoph Hellwig
2021-09-16 11:51             ` Michael Ellerman
2021-09-16 11:51               ` Michael Ellerman
2021-09-16 11:51               ` Michael Ellerman
2021-09-16 11:51               ` Michael Ellerman
2021-09-16 11:51               ` Michael Ellerman
2021-09-08 22:58 ` [PATCH v3 5/8] x86/sme: Replace occurrences of sme_active() with cc_platform_has() Tom Lendacky
2021-09-08 22:58   ` Tom Lendacky
2021-09-08 22:58   ` Tom Lendacky
2021-09-08 22:58   ` Tom Lendacky
2021-09-08 22:58   ` Tom Lendacky via iommu
2021-09-14 18:24   ` Borislav Petkov
2021-09-14 18:24     ` Borislav Petkov
2021-09-14 18:24     ` Borislav Petkov
2021-09-14 18:24     ` Borislav Petkov
2021-09-14 18:24     ` Borislav Petkov
2021-09-20 19:23   ` Kirill A. Shutemov
2021-09-20 19:23     ` Kirill A. Shutemov
2021-09-20 19:23     ` Kirill A. Shutemov
2021-09-20 19:23     ` Kirill A. Shutemov
2021-09-20 19:23     ` Kirill A. Shutemov
2021-09-21 17:04     ` Tom Lendacky
2021-09-21 17:04       ` Tom Lendacky
2021-09-21 17:04       ` Tom Lendacky
2021-09-21 17:04       ` Tom Lendacky
2021-09-21 17:04       ` Tom Lendacky via iommu
2021-09-21 17:47       ` Borislav Petkov
2021-09-21 17:47         ` Borislav Petkov
2021-09-21 17:47         ` Borislav Petkov
2021-09-21 17:47         ` Borislav Petkov
2021-09-21 17:47         ` Borislav Petkov
2021-09-21 21:20         ` Kirill A. Shutemov
2021-09-21 21:20           ` Kirill A. Shutemov
2021-09-21 21:20           ` Kirill A. Shutemov
2021-09-21 21:20           ` Kirill A. Shutemov
2021-09-21 21:20           ` Kirill A. Shutemov
2021-09-21 21:27           ` Borislav Petkov
2021-09-21 21:27             ` Borislav Petkov
2021-09-21 21:27             ` Borislav Petkov
2021-09-21 21:27             ` Borislav Petkov
2021-09-21 21:27             ` Borislav Petkov
2021-09-21 21:34             ` Kirill A. Shutemov
2021-09-21 21:34               ` Kirill A. Shutemov
2021-09-21 21:34               ` Kirill A. Shutemov
2021-09-21 21:34               ` Kirill A. Shutemov
2021-09-21 21:34               ` Kirill A. Shutemov
2021-09-21 21:43               ` Tom Lendacky
2021-09-21 21:43                 ` Tom Lendacky
2021-09-21 21:43                 ` Tom Lendacky
2021-09-21 21:43                 ` Tom Lendacky
2021-09-21 21:43                 ` Tom Lendacky via iommu
2021-09-21 21:58                 ` Kirill A. Shutemov
2021-09-21 21:58                   ` Kirill A. Shutemov
2021-09-21 21:58                   ` Kirill A. Shutemov
2021-09-21 21:58                   ` Kirill A. Shutemov
2021-09-21 21:58                   ` Kirill A. Shutemov
2021-09-22 13:40                   ` Tom Lendacky
2021-09-22 13:40                     ` Tom Lendacky
2021-09-22 13:40                     ` Tom Lendacky
2021-09-22 13:40                     ` Tom Lendacky
2021-09-22 13:40                     ` Tom Lendacky via iommu
2021-09-22 14:30                     ` Kirill A. Shutemov
2021-09-22 14:30                       ` Kirill A. Shutemov
2021-09-22 14:30                       ` Kirill A. Shutemov
2021-09-22 14:30                       ` Kirill A. Shutemov
2021-09-22 14:30                       ` Kirill A. Shutemov
2021-09-22 19:52                       ` Borislav Petkov
2021-09-22 19:52                         ` Borislav Petkov
2021-09-22 19:52                         ` Borislav Petkov
2021-09-22 19:52                         ` Borislav Petkov
2021-09-22 19:52                         ` Borislav Petkov
2021-09-22 21:05                         ` Kirill A. Shutemov
2021-09-22 21:05                           ` Kirill A. Shutemov
2021-09-22 21:05                           ` Kirill A. Shutemov
2021-09-22 21:05                           ` Kirill A. Shutemov
2021-09-22 21:05                           ` Kirill A. Shutemov
2021-09-23 18:21                           ` Borislav Petkov [this message]
2021-09-23 18:21                             ` Borislav Petkov
2021-09-23 18:21                             ` Borislav Petkov
2021-09-23 18:21                             ` Borislav Petkov
2021-09-23 18:21                             ` Borislav Petkov
2021-09-24  9:41                             ` Kirill A. Shutemov
2021-09-24  9:41                               ` Kirill A. Shutemov
2021-09-24  9:41                               ` Kirill A. Shutemov
2021-09-24  9:41                               ` Kirill A. Shutemov
2021-09-24  9:41                               ` Kirill A. Shutemov
2021-09-24  9:51                               ` Borislav Petkov
2021-09-24  9:51                                 ` Borislav Petkov
2021-09-24  9:51                                 ` Borislav Petkov
2021-09-24  9:51                                 ` Borislav Petkov
2021-09-24  9:51                                 ` Borislav Petkov
2021-09-24 13:31                                 ` Tom Lendacky
2021-09-24 13:31                                   ` Tom Lendacky
2021-09-24 13:31                                   ` Tom Lendacky
2021-09-24 13:31                                   ` Tom Lendacky
2021-09-24 13:31                                   ` Tom Lendacky via iommu
2021-09-08 22:58 ` [PATCH v3 6/8] x86/sev: Replace occurrences of sev_active() " Tom Lendacky
2021-09-08 22:58   ` Tom Lendacky
2021-09-08 22:58   ` Tom Lendacky
2021-09-08 22:58   ` Tom Lendacky
2021-09-08 22:58   ` Tom Lendacky via iommu
2021-09-08 22:58 ` [PATCH v3 7/8] x86/sev: Replace occurrences of sev_es_active() " Tom Lendacky via iommu
2021-09-08 22:58   ` Tom Lendacky
2021-09-08 22:58   ` Tom Lendacky
2021-09-08 22:58   ` Tom Lendacky
2021-09-08 22:58   ` Tom Lendacky
2021-09-08 22:58 ` [PATCH v3 8/8] treewide: Replace the use of mem_encrypt_active() " Tom Lendacky via iommu
2021-09-08 22:58   ` Tom Lendacky
2021-09-08 22:58   ` Tom Lendacky
2021-09-08 22:58   ` Tom Lendacky
2021-09-08 22:58   ` Tom Lendacky
2021-09-09  7:25   ` Christophe Leroy
2021-09-09  7:25     ` Christophe Leroy
2021-09-09  7:25     ` Christophe Leroy
2021-09-09  7:25     ` Christophe Leroy
2021-09-09  7:25     ` Christophe Leroy
2021-09-09 13:10     ` Tom Lendacky via iommu
2021-09-09 13:10       ` Tom Lendacky
2021-09-09 13:10       ` Tom Lendacky
2021-09-09 13:10       ` Tom Lendacky
2021-09-09 13:10       ` Tom Lendacky
2021-09-09  7:32 ` [PATCH v3 0/8] Implement generic cc_platform_has() helper function Christian Borntraeger
2021-09-09  7:32   ` Christian Borntraeger
2021-09-09  7:32   ` Christian Borntraeger
2021-09-09  7:32   ` Christian Borntraeger
2021-09-09  7:32   ` Christian Borntraeger
2021-09-09 13:01   ` Tom Lendacky
2021-09-09 13:01     ` Tom Lendacky
2021-09-09 13:01     ` Tom Lendacky
2021-09-09 13:01     ` Tom Lendacky
2021-09-09 13:01     ` Tom Lendacky via iommu
2021-09-15 16:46 ` Borislav Petkov
2021-09-15 16:46   ` Borislav Petkov
2021-09-15 16:46   ` Borislav Petkov
2021-09-15 16:46   ` Borislav Petkov
2021-09-15 16:46   ` Borislav Petkov
2021-09-15 17:26   ` Kuppuswamy, Sathyanarayanan
2021-09-15 17:26     ` Kuppuswamy, Sathyanarayanan
2021-09-15 17:26     ` Kuppuswamy, Sathyanarayanan
2021-09-15 17:26     ` Kuppuswamy, Sathyanarayanan
2021-09-15 17:26     ` Kuppuswamy, Sathyanarayanan
2021-09-16 15:02     ` Borislav Petkov
2021-09-16 15:02       ` Borislav Petkov
2021-09-16 15:02       ` Borislav Petkov
2021-09-16 15:02       ` Borislav Petkov
2021-09-16 15:02       ` Borislav Petkov
2021-09-16 18:38       ` Kuppuswamy, Sathyanarayanan
2021-09-16 18:38         ` Kuppuswamy, Sathyanarayanan
2021-09-16 18:38         ` Kuppuswamy, Sathyanarayanan
2021-09-16 18:38         ` Kuppuswamy, Sathyanarayanan
2021-09-16 18:38         ` Kuppuswamy, Sathyanarayanan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=YUzFj+yH79XRc3F3@zn.tnic \
    --to=bp@alien8.de \
    --cc=Tianyu.Lan@microsoft.com \
    --cc=ak@linux.intel.com \
    --cc=amd-gfx@lists.freedesktop.org \
    --cc=brijesh.singh@amd.com \
    --cc=dave.hansen@linux.intel.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=hch@infradead.org \
    --cc=iommu@lists.linux-foundation.org \
    --cc=joro@8bytes.org \
    --cc=kexec@lists.infradead.org \
    --cc=kirill@shutemov.name \
    --cc=kvm@vger.kernel.org \
    --cc=linux-efi@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-graphics-maintainer@vmware.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-s390@vger.kernel.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=luto@kernel.org \
    --cc=mingo@redhat.com \
    --cc=peterz@infradead.org \
    --cc=platform-driver-x86@vger.kernel.org \
    --cc=sathyanarayanan.kuppuswamy@linux.intel.com \
    --cc=tglx@linutronix.de \
    --cc=thomas.lendacky@amd.com \
    --cc=will@kernel.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.