All of lore.kernel.org
 help / color / mirror / Atom feed
From: Tom Lendacky <thomas.lendacky@amd.com>
To: linux-kernel@vger.kernel.org, x86@kernel.org,
	linuxppc-dev@lists.ozlabs.org, linux-s390@vger.kernel.org,
	iommu@lists.linux-foundation.org, kvm@vger.kernel.org,
	linux-efi@vger.kernel.org, platform-driver-x86@vger.kernel.org,
	linux-graphics-maintainer@vmware.com,
	amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org,
	kexec@lists.infradead.org, linux-fsdevel@vger.kernel.org
Cc: Borislav Petkov <bp@alien8.de>,
	Brijesh Singh <brijesh.singh@amd.com>,
	Joerg Roedel <joro@8bytes.org>, Andi Kleen <ak@linux.intel.com>,
	Sathyanarayanan Kuppuswamy 
	<sathyanarayanan.kuppuswamy@linux.intel.com>,
	Tianyu Lan <Tianyu.Lan@microsoft.com>,
	Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>
Subject: [PATCH 06/11] x86/sev: Replace occurrences of sev_es_active() with prot_guest_has()
Date: Tue, 27 Jul 2021 17:26:09 -0500	[thread overview]
Message-ID: <ba565128b88661a656fc3972f01bb2e295158a15.1627424774.git.thomas.lendacky@amd.com> (raw)
In-Reply-To: <cover.1627424773.git.thomas.lendacky@amd.com>

Replace occurrences of sev_es_active() with the more generic
prot_guest_has() using PATTR_GUEST_PROT_STATE, except for in
arch/x86/kernel/sev*.c and arch/x86/mm/mem_encrypt*.c where PATTR_SEV_ES
will be used. If future support is added for other memory encyrption
techonologies, the use of PATTR_GUEST_PROT_STATE can be updated, as
required, to specifically use PATTR_SEV_ES.

Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
---
 arch/x86/include/asm/mem_encrypt.h | 2 --
 arch/x86/kernel/sev.c              | 6 +++---
 arch/x86/mm/mem_encrypt.c          | 7 +++----
 arch/x86/realmode/init.c           | 3 +--
 4 files changed, 7 insertions(+), 11 deletions(-)

diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 7e25de37c148..797146e0cd6b 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -50,7 +50,6 @@ void __init mem_encrypt_free_decrypted_mem(void);
 void __init mem_encrypt_init(void);
 
 void __init sev_es_init_vc_handling(void);
-bool sev_es_active(void);
 bool amd_prot_guest_has(unsigned int attr);
 
 #define __bss_decrypted __section(".bss..decrypted")
@@ -74,7 +73,6 @@ static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
 static inline void __init sme_enable(struct boot_params *bp) { }
 
 static inline void sev_es_init_vc_handling(void) { }
-static inline bool sev_es_active(void) { return false; }
 static inline bool amd_prot_guest_has(unsigned int attr) { return false; }
 
 static inline int __init
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index a6895e440bc3..66a4ab9d95d7 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -11,7 +11,7 @@
 
 #include <linux/sched/debug.h>	/* For show_regs() */
 #include <linux/percpu-defs.h>
-#include <linux/mem_encrypt.h>
+#include <linux/protected_guest.h>
 #include <linux/printk.h>
 #include <linux/mm_types.h>
 #include <linux/set_memory.h>
@@ -615,7 +615,7 @@ int __init sev_es_efi_map_ghcbs(pgd_t *pgd)
 	int cpu;
 	u64 pfn;
 
-	if (!sev_es_active())
+	if (!prot_guest_has(PATTR_SEV_ES))
 		return 0;
 
 	pflags = _PAGE_NX | _PAGE_RW;
@@ -774,7 +774,7 @@ void __init sev_es_init_vc_handling(void)
 
 	BUILD_BUG_ON(offsetof(struct sev_es_runtime_data, ghcb_page) % PAGE_SIZE);
 
-	if (!sev_es_active())
+	if (!prot_guest_has(PATTR_SEV_ES))
 		return;
 
 	if (!sev_es_check_cpu_features())
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index eb5cae93b238..451de8e84fce 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -383,8 +383,7 @@ static bool sme_active(void)
 	return sme_me_mask && !sev_active();
 }
 
-/* Needs to be called from non-instrumentable code */
-bool noinstr sev_es_active(void)
+static bool sev_es_active(void)
 {
 	return sev_status & MSR_AMD64_SEV_ES_ENABLED;
 }
@@ -482,7 +481,7 @@ static void print_mem_encrypt_feature_info(void)
 		pr_cont(" SEV");
 
 	/* Encrypted Register State */
-	if (sev_es_active())
+	if (amd_prot_guest_has(PATTR_SEV_ES))
 		pr_cont(" SEV-ES");
 
 	pr_cont("\n");
@@ -501,7 +500,7 @@ void __init mem_encrypt_init(void)
 	 * With SEV, we need to unroll the rep string I/O instructions,
 	 * but SEV-ES supports them through the #VC handler.
 	 */
-	if (amd_prot_guest_has(PATTR_SEV) && !sev_es_active())
+	if (amd_prot_guest_has(PATTR_SEV) && !amd_prot_guest_has(PATTR_SEV_ES))
 		static_branch_enable(&sev_enable_key);
 
 	print_mem_encrypt_feature_info();
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index 2109ae569c67..7711d0071f41 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -2,7 +2,6 @@
 #include <linux/io.h>
 #include <linux/slab.h>
 #include <linux/memblock.h>
-#include <linux/mem_encrypt.h>
 #include <linux/protected_guest.h>
 #include <linux/pgtable.h>
 
@@ -48,7 +47,7 @@ static void sme_sev_setup_real_mode(struct trampoline_header *th)
 	if (prot_guest_has(PATTR_HOST_MEM_ENCRYPT))
 		th->flags |= TH_FLAGS_SME_ACTIVE;
 
-	if (sev_es_active()) {
+	if (prot_guest_has(PATTR_GUEST_PROT_STATE)) {
 		/*
 		 * Skip the call to verify_cpu() in secondary_startup_64 as it
 		 * will cause #VC exceptions when the AP can't handle them yet.
-- 
2.32.0


WARNING: multiple messages have this Message-ID (diff)
From: Tom Lendacky <thomas.lendacky@amd.com>
To: linux-kernel@vger.kernel.org, x86@kernel.org,
	linuxppc-dev@lists.ozlabs.org, linux-s390@vger.kernel.org,
	iommu@lists.linux-foundation.org, kvm@vger.kernel.org,
	linux-efi@vger.kernel.org, platform-driver-x86@vger.kernel.org,
	linux-graphics-maintainer@vmware.com,
	amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org,
	kexec@lists.infradead.org, linux-fsdevel@vger.kernel.org
Cc: Sathyanarayanan Kuppuswamy
	<sathyanarayanan.kuppuswamy@linux.intel.com>,
	Andi Kleen <ak@linux.intel.com>,
	Tianyu Lan <Tianyu.Lan@microsoft.com>,
	Joerg Roedel <joro@8bytes.org>, Ingo Molnar <mingo@redhat.com>,
	Borislav Petkov <bp@alien8.de>,
	Brijesh Singh <brijesh.singh@amd.com>,
	Thomas Gleixner <tglx@linutronix.de>
Subject: [PATCH 06/11] x86/sev: Replace occurrences of sev_es_active() with prot_guest_has()
Date: Tue, 27 Jul 2021 17:26:09 -0500	[thread overview]
Message-ID: <ba565128b88661a656fc3972f01bb2e295158a15.1627424774.git.thomas.lendacky@amd.com> (raw)
In-Reply-To: <cover.1627424773.git.thomas.lendacky@amd.com>

Replace occurrences of sev_es_active() with the more generic
prot_guest_has() using PATTR_GUEST_PROT_STATE, except for in
arch/x86/kernel/sev*.c and arch/x86/mm/mem_encrypt*.c where PATTR_SEV_ES
will be used. If future support is added for other memory encyrption
techonologies, the use of PATTR_GUEST_PROT_STATE can be updated, as
required, to specifically use PATTR_SEV_ES.

Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
---
 arch/x86/include/asm/mem_encrypt.h | 2 --
 arch/x86/kernel/sev.c              | 6 +++---
 arch/x86/mm/mem_encrypt.c          | 7 +++----
 arch/x86/realmode/init.c           | 3 +--
 4 files changed, 7 insertions(+), 11 deletions(-)

diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 7e25de37c148..797146e0cd6b 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -50,7 +50,6 @@ void __init mem_encrypt_free_decrypted_mem(void);
 void __init mem_encrypt_init(void);
 
 void __init sev_es_init_vc_handling(void);
-bool sev_es_active(void);
 bool amd_prot_guest_has(unsigned int attr);
 
 #define __bss_decrypted __section(".bss..decrypted")
@@ -74,7 +73,6 @@ static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
 static inline void __init sme_enable(struct boot_params *bp) { }
 
 static inline void sev_es_init_vc_handling(void) { }
-static inline bool sev_es_active(void) { return false; }
 static inline bool amd_prot_guest_has(unsigned int attr) { return false; }
 
 static inline int __init
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index a6895e440bc3..66a4ab9d95d7 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -11,7 +11,7 @@
 
 #include <linux/sched/debug.h>	/* For show_regs() */
 #include <linux/percpu-defs.h>
-#include <linux/mem_encrypt.h>
+#include <linux/protected_guest.h>
 #include <linux/printk.h>
 #include <linux/mm_types.h>
 #include <linux/set_memory.h>
@@ -615,7 +615,7 @@ int __init sev_es_efi_map_ghcbs(pgd_t *pgd)
 	int cpu;
 	u64 pfn;
 
-	if (!sev_es_active())
+	if (!prot_guest_has(PATTR_SEV_ES))
 		return 0;
 
 	pflags = _PAGE_NX | _PAGE_RW;
@@ -774,7 +774,7 @@ void __init sev_es_init_vc_handling(void)
 
 	BUILD_BUG_ON(offsetof(struct sev_es_runtime_data, ghcb_page) % PAGE_SIZE);
 
-	if (!sev_es_active())
+	if (!prot_guest_has(PATTR_SEV_ES))
 		return;
 
 	if (!sev_es_check_cpu_features())
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index eb5cae93b238..451de8e84fce 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -383,8 +383,7 @@ static bool sme_active(void)
 	return sme_me_mask && !sev_active();
 }
 
-/* Needs to be called from non-instrumentable code */
-bool noinstr sev_es_active(void)
+static bool sev_es_active(void)
 {
 	return sev_status & MSR_AMD64_SEV_ES_ENABLED;
 }
@@ -482,7 +481,7 @@ static void print_mem_encrypt_feature_info(void)
 		pr_cont(" SEV");
 
 	/* Encrypted Register State */
-	if (sev_es_active())
+	if (amd_prot_guest_has(PATTR_SEV_ES))
 		pr_cont(" SEV-ES");
 
 	pr_cont("\n");
@@ -501,7 +500,7 @@ void __init mem_encrypt_init(void)
 	 * With SEV, we need to unroll the rep string I/O instructions,
 	 * but SEV-ES supports them through the #VC handler.
 	 */
-	if (amd_prot_guest_has(PATTR_SEV) && !sev_es_active())
+	if (amd_prot_guest_has(PATTR_SEV) && !amd_prot_guest_has(PATTR_SEV_ES))
 		static_branch_enable(&sev_enable_key);
 
 	print_mem_encrypt_feature_info();
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index 2109ae569c67..7711d0071f41 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -2,7 +2,6 @@
 #include <linux/io.h>
 #include <linux/slab.h>
 #include <linux/memblock.h>
-#include <linux/mem_encrypt.h>
 #include <linux/protected_guest.h>
 #include <linux/pgtable.h>
 
@@ -48,7 +47,7 @@ static void sme_sev_setup_real_mode(struct trampoline_header *th)
 	if (prot_guest_has(PATTR_HOST_MEM_ENCRYPT))
 		th->flags |= TH_FLAGS_SME_ACTIVE;
 
-	if (sev_es_active()) {
+	if (prot_guest_has(PATTR_GUEST_PROT_STATE)) {
 		/*
 		 * Skip the call to verify_cpu() in secondary_startup_64 as it
 		 * will cause #VC exceptions when the AP can't handle them yet.
-- 
2.32.0


WARNING: multiple messages have this Message-ID (diff)
From: Tom Lendacky via iommu <iommu@lists.linux-foundation.org>
To: linux-kernel@vger.kernel.org, x86@kernel.org,
	linuxppc-dev@lists.ozlabs.org, linux-s390@vger.kernel.org,
	iommu@lists.linux-foundation.org, kvm@vger.kernel.org,
	linux-efi@vger.kernel.org, platform-driver-x86@vger.kernel.org,
	linux-graphics-maintainer@vmware.com,
	amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org,
	kexec@lists.infradead.org, linux-fsdevel@vger.kernel.org
Cc: Andi Kleen <ak@linux.intel.com>,
	Tianyu Lan <Tianyu.Lan@microsoft.com>,
	Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
	Brijesh Singh <brijesh.singh@amd.com>,
	Thomas Gleixner <tglx@linutronix.de>
Subject: [PATCH 06/11] x86/sev: Replace occurrences of sev_es_active() with prot_guest_has()
Date: Tue, 27 Jul 2021 17:26:09 -0500	[thread overview]
Message-ID: <ba565128b88661a656fc3972f01bb2e295158a15.1627424774.git.thomas.lendacky@amd.com> (raw)
In-Reply-To: <cover.1627424773.git.thomas.lendacky@amd.com>

Replace occurrences of sev_es_active() with the more generic
prot_guest_has() using PATTR_GUEST_PROT_STATE, except for in
arch/x86/kernel/sev*.c and arch/x86/mm/mem_encrypt*.c where PATTR_SEV_ES
will be used. If future support is added for other memory encyrption
techonologies, the use of PATTR_GUEST_PROT_STATE can be updated, as
required, to specifically use PATTR_SEV_ES.

Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
---
 arch/x86/include/asm/mem_encrypt.h | 2 --
 arch/x86/kernel/sev.c              | 6 +++---
 arch/x86/mm/mem_encrypt.c          | 7 +++----
 arch/x86/realmode/init.c           | 3 +--
 4 files changed, 7 insertions(+), 11 deletions(-)

diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 7e25de37c148..797146e0cd6b 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -50,7 +50,6 @@ void __init mem_encrypt_free_decrypted_mem(void);
 void __init mem_encrypt_init(void);
 
 void __init sev_es_init_vc_handling(void);
-bool sev_es_active(void);
 bool amd_prot_guest_has(unsigned int attr);
 
 #define __bss_decrypted __section(".bss..decrypted")
@@ -74,7 +73,6 @@ static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
 static inline void __init sme_enable(struct boot_params *bp) { }
 
 static inline void sev_es_init_vc_handling(void) { }
-static inline bool sev_es_active(void) { return false; }
 static inline bool amd_prot_guest_has(unsigned int attr) { return false; }
 
 static inline int __init
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index a6895e440bc3..66a4ab9d95d7 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -11,7 +11,7 @@
 
 #include <linux/sched/debug.h>	/* For show_regs() */
 #include <linux/percpu-defs.h>
-#include <linux/mem_encrypt.h>
+#include <linux/protected_guest.h>
 #include <linux/printk.h>
 #include <linux/mm_types.h>
 #include <linux/set_memory.h>
@@ -615,7 +615,7 @@ int __init sev_es_efi_map_ghcbs(pgd_t *pgd)
 	int cpu;
 	u64 pfn;
 
-	if (!sev_es_active())
+	if (!prot_guest_has(PATTR_SEV_ES))
 		return 0;
 
 	pflags = _PAGE_NX | _PAGE_RW;
@@ -774,7 +774,7 @@ void __init sev_es_init_vc_handling(void)
 
 	BUILD_BUG_ON(offsetof(struct sev_es_runtime_data, ghcb_page) % PAGE_SIZE);
 
-	if (!sev_es_active())
+	if (!prot_guest_has(PATTR_SEV_ES))
 		return;
 
 	if (!sev_es_check_cpu_features())
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index eb5cae93b238..451de8e84fce 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -383,8 +383,7 @@ static bool sme_active(void)
 	return sme_me_mask && !sev_active();
 }
 
-/* Needs to be called from non-instrumentable code */
-bool noinstr sev_es_active(void)
+static bool sev_es_active(void)
 {
 	return sev_status & MSR_AMD64_SEV_ES_ENABLED;
 }
@@ -482,7 +481,7 @@ static void print_mem_encrypt_feature_info(void)
 		pr_cont(" SEV");
 
 	/* Encrypted Register State */
-	if (sev_es_active())
+	if (amd_prot_guest_has(PATTR_SEV_ES))
 		pr_cont(" SEV-ES");
 
 	pr_cont("\n");
@@ -501,7 +500,7 @@ void __init mem_encrypt_init(void)
 	 * With SEV, we need to unroll the rep string I/O instructions,
 	 * but SEV-ES supports them through the #VC handler.
 	 */
-	if (amd_prot_guest_has(PATTR_SEV) && !sev_es_active())
+	if (amd_prot_guest_has(PATTR_SEV) && !amd_prot_guest_has(PATTR_SEV_ES))
 		static_branch_enable(&sev_enable_key);
 
 	print_mem_encrypt_feature_info();
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index 2109ae569c67..7711d0071f41 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -2,7 +2,6 @@
 #include <linux/io.h>
 #include <linux/slab.h>
 #include <linux/memblock.h>
-#include <linux/mem_encrypt.h>
 #include <linux/protected_guest.h>
 #include <linux/pgtable.h>
 
@@ -48,7 +47,7 @@ static void sme_sev_setup_real_mode(struct trampoline_header *th)
 	if (prot_guest_has(PATTR_HOST_MEM_ENCRYPT))
 		th->flags |= TH_FLAGS_SME_ACTIVE;
 
-	if (sev_es_active()) {
+	if (prot_guest_has(PATTR_GUEST_PROT_STATE)) {
 		/*
 		 * Skip the call to verify_cpu() in secondary_startup_64 as it
 		 * will cause #VC exceptions when the AP can't handle them yet.
-- 
2.32.0

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

WARNING: multiple messages have this Message-ID (diff)
From: Tom Lendacky <thomas.lendacky@amd.com>
To: linux-kernel@vger.kernel.org, x86@kernel.org,
	linuxppc-dev@lists.ozlabs.org, linux-s390@vger.kernel.org,
	iommu@lists.linux-foundation.org, kvm@vger.kernel.org,
	linux-efi@vger.kernel.org, platform-driver-x86@vger.kernel.org,
	linux-graphics-maintainer@vmware.com,
	amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org,
	kexec@lists.infradead.org, linux-fsdevel@vger.kernel.org
Cc: Sathyanarayanan Kuppuswamy
	<sathyanarayanan.kuppuswamy@linux.intel.com>,
	Andi Kleen <ak@linux.intel.com>,
	Tianyu Lan <Tianyu.Lan@microsoft.com>,
	Joerg Roedel <joro@8bytes.org>, Ingo Molnar <mingo@redhat.com>,
	Borislav Petkov <bp@alien8.de>,
	Brijesh Singh <brijesh.singh@amd.com>,
	Thomas Gleixner <tglx@linutronix.de>
Subject: [PATCH 06/11] x86/sev: Replace occurrences of sev_es_active() with prot_guest_has()
Date: Tue, 27 Jul 2021 17:26:09 -0500	[thread overview]
Message-ID: <ba565128b88661a656fc3972f01bb2e295158a15.1627424774.git.thomas.lendacky@amd.com> (raw)
In-Reply-To: <cover.1627424773.git.thomas.lendacky@amd.com>

Replace occurrences of sev_es_active() with the more generic
prot_guest_has() using PATTR_GUEST_PROT_STATE, except for in
arch/x86/kernel/sev*.c and arch/x86/mm/mem_encrypt*.c where PATTR_SEV_ES
will be used. If future support is added for other memory encyrption
techonologies, the use of PATTR_GUEST_PROT_STATE can be updated, as
required, to specifically use PATTR_SEV_ES.

Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
---
 arch/x86/include/asm/mem_encrypt.h | 2 --
 arch/x86/kernel/sev.c              | 6 +++---
 arch/x86/mm/mem_encrypt.c          | 7 +++----
 arch/x86/realmode/init.c           | 3 +--
 4 files changed, 7 insertions(+), 11 deletions(-)

diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 7e25de37c148..797146e0cd6b 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -50,7 +50,6 @@ void __init mem_encrypt_free_decrypted_mem(void);
 void __init mem_encrypt_init(void);
 
 void __init sev_es_init_vc_handling(void);
-bool sev_es_active(void);
 bool amd_prot_guest_has(unsigned int attr);
 
 #define __bss_decrypted __section(".bss..decrypted")
@@ -74,7 +73,6 @@ static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
 static inline void __init sme_enable(struct boot_params *bp) { }
 
 static inline void sev_es_init_vc_handling(void) { }
-static inline bool sev_es_active(void) { return false; }
 static inline bool amd_prot_guest_has(unsigned int attr) { return false; }
 
 static inline int __init
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index a6895e440bc3..66a4ab9d95d7 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -11,7 +11,7 @@
 
 #include <linux/sched/debug.h>	/* For show_regs() */
 #include <linux/percpu-defs.h>
-#include <linux/mem_encrypt.h>
+#include <linux/protected_guest.h>
 #include <linux/printk.h>
 #include <linux/mm_types.h>
 #include <linux/set_memory.h>
@@ -615,7 +615,7 @@ int __init sev_es_efi_map_ghcbs(pgd_t *pgd)
 	int cpu;
 	u64 pfn;
 
-	if (!sev_es_active())
+	if (!prot_guest_has(PATTR_SEV_ES))
 		return 0;
 
 	pflags = _PAGE_NX | _PAGE_RW;
@@ -774,7 +774,7 @@ void __init sev_es_init_vc_handling(void)
 
 	BUILD_BUG_ON(offsetof(struct sev_es_runtime_data, ghcb_page) % PAGE_SIZE);
 
-	if (!sev_es_active())
+	if (!prot_guest_has(PATTR_SEV_ES))
 		return;
 
 	if (!sev_es_check_cpu_features())
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index eb5cae93b238..451de8e84fce 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -383,8 +383,7 @@ static bool sme_active(void)
 	return sme_me_mask && !sev_active();
 }
 
-/* Needs to be called from non-instrumentable code */
-bool noinstr sev_es_active(void)
+static bool sev_es_active(void)
 {
 	return sev_status & MSR_AMD64_SEV_ES_ENABLED;
 }
@@ -482,7 +481,7 @@ static void print_mem_encrypt_feature_info(void)
 		pr_cont(" SEV");
 
 	/* Encrypted Register State */
-	if (sev_es_active())
+	if (amd_prot_guest_has(PATTR_SEV_ES))
 		pr_cont(" SEV-ES");
 
 	pr_cont("\n");
@@ -501,7 +500,7 @@ void __init mem_encrypt_init(void)
 	 * With SEV, we need to unroll the rep string I/O instructions,
 	 * but SEV-ES supports them through the #VC handler.
 	 */
-	if (amd_prot_guest_has(PATTR_SEV) && !sev_es_active())
+	if (amd_prot_guest_has(PATTR_SEV) && !amd_prot_guest_has(PATTR_SEV_ES))
 		static_branch_enable(&sev_enable_key);
 
 	print_mem_encrypt_feature_info();
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index 2109ae569c67..7711d0071f41 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -2,7 +2,6 @@
 #include <linux/io.h>
 #include <linux/slab.h>
 #include <linux/memblock.h>
-#include <linux/mem_encrypt.h>
 #include <linux/protected_guest.h>
 #include <linux/pgtable.h>
 
@@ -48,7 +47,7 @@ static void sme_sev_setup_real_mode(struct trampoline_header *th)
 	if (prot_guest_has(PATTR_HOST_MEM_ENCRYPT))
 		th->flags |= TH_FLAGS_SME_ACTIVE;
 
-	if (sev_es_active()) {
+	if (prot_guest_has(PATTR_GUEST_PROT_STATE)) {
 		/*
 		 * Skip the call to verify_cpu() in secondary_startup_64 as it
 		 * will cause #VC exceptions when the AP can't handle them yet.
-- 
2.32.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

WARNING: multiple messages have this Message-ID (diff)
From: Tom Lendacky <thomas.lendacky@amd.com>
To: linux-kernel@vger.kernel.org, x86@kernel.org,
	linuxppc-dev@lists.ozlabs.org, linux-s390@vger.kernel.org,
	iommu@lists.linux-foundation.org, kvm@vger.kernel.org,
	linux-efi@vger.kernel.org, platform-driver-x86@vger.kernel.org,
	linux-graphics-maintainer@vmware.com,
	amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org,
	kexec@lists.infradead.org, linux-fsdevel@vger.kernel.org
Cc: Borislav Petkov <bp@alien8.de>,
	Brijesh Singh <brijesh.singh@amd.com>,
	Joerg Roedel <joro@8bytes.org>, Andi Kleen <ak@linux.intel.com>,
	Sathyanarayanan Kuppuswamy
	<sathyanarayanan.kuppuswamy@linux.intel.com>,
	Tianyu Lan <Tianyu.Lan@microsoft.com>,
	Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>
Subject: [PATCH 06/11] x86/sev: Replace occurrences of sev_es_active() with prot_guest_has()
Date: Tue, 27 Jul 2021 17:26:09 -0500	[thread overview]
Message-ID: <ba565128b88661a656fc3972f01bb2e295158a15.1627424774.git.thomas.lendacky@amd.com> (raw)
In-Reply-To: <cover.1627424773.git.thomas.lendacky@amd.com>

Replace occurrences of sev_es_active() with the more generic
prot_guest_has() using PATTR_GUEST_PROT_STATE, except for in
arch/x86/kernel/sev*.c and arch/x86/mm/mem_encrypt*.c where PATTR_SEV_ES
will be used. If future support is added for other memory encyrption
techonologies, the use of PATTR_GUEST_PROT_STATE can be updated, as
required, to specifically use PATTR_SEV_ES.

Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
---
 arch/x86/include/asm/mem_encrypt.h | 2 --
 arch/x86/kernel/sev.c              | 6 +++---
 arch/x86/mm/mem_encrypt.c          | 7 +++----
 arch/x86/realmode/init.c           | 3 +--
 4 files changed, 7 insertions(+), 11 deletions(-)

diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 7e25de37c148..797146e0cd6b 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -50,7 +50,6 @@ void __init mem_encrypt_free_decrypted_mem(void);
 void __init mem_encrypt_init(void);
 
 void __init sev_es_init_vc_handling(void);
-bool sev_es_active(void);
 bool amd_prot_guest_has(unsigned int attr);
 
 #define __bss_decrypted __section(".bss..decrypted")
@@ -74,7 +73,6 @@ static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
 static inline void __init sme_enable(struct boot_params *bp) { }
 
 static inline void sev_es_init_vc_handling(void) { }
-static inline bool sev_es_active(void) { return false; }
 static inline bool amd_prot_guest_has(unsigned int attr) { return false; }
 
 static inline int __init
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index a6895e440bc3..66a4ab9d95d7 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -11,7 +11,7 @@
 
 #include <linux/sched/debug.h>	/* For show_regs() */
 #include <linux/percpu-defs.h>
-#include <linux/mem_encrypt.h>
+#include <linux/protected_guest.h>
 #include <linux/printk.h>
 #include <linux/mm_types.h>
 #include <linux/set_memory.h>
@@ -615,7 +615,7 @@ int __init sev_es_efi_map_ghcbs(pgd_t *pgd)
 	int cpu;
 	u64 pfn;
 
-	if (!sev_es_active())
+	if (!prot_guest_has(PATTR_SEV_ES))
 		return 0;
 
 	pflags = _PAGE_NX | _PAGE_RW;
@@ -774,7 +774,7 @@ void __init sev_es_init_vc_handling(void)
 
 	BUILD_BUG_ON(offsetof(struct sev_es_runtime_data, ghcb_page) % PAGE_SIZE);
 
-	if (!sev_es_active())
+	if (!prot_guest_has(PATTR_SEV_ES))
 		return;
 
 	if (!sev_es_check_cpu_features())
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index eb5cae93b238..451de8e84fce 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -383,8 +383,7 @@ static bool sme_active(void)
 	return sme_me_mask && !sev_active();
 }
 
-/* Needs to be called from non-instrumentable code */
-bool noinstr sev_es_active(void)
+static bool sev_es_active(void)
 {
 	return sev_status & MSR_AMD64_SEV_ES_ENABLED;
 }
@@ -482,7 +481,7 @@ static void print_mem_encrypt_feature_info(void)
 		pr_cont(" SEV");
 
 	/* Encrypted Register State */
-	if (sev_es_active())
+	if (amd_prot_guest_has(PATTR_SEV_ES))
 		pr_cont(" SEV-ES");
 
 	pr_cont("\n");
@@ -501,7 +500,7 @@ void __init mem_encrypt_init(void)
 	 * With SEV, we need to unroll the rep string I/O instructions,
 	 * but SEV-ES supports them through the #VC handler.
 	 */
-	if (amd_prot_guest_has(PATTR_SEV) && !sev_es_active())
+	if (amd_prot_guest_has(PATTR_SEV) && !amd_prot_guest_has(PATTR_SEV_ES))
 		static_branch_enable(&sev_enable_key);
 
 	print_mem_encrypt_feature_info();
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index 2109ae569c67..7711d0071f41 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -2,7 +2,6 @@
 #include <linux/io.h>
 #include <linux/slab.h>
 #include <linux/memblock.h>
-#include <linux/mem_encrypt.h>
 #include <linux/protected_guest.h>
 #include <linux/pgtable.h>
 
@@ -48,7 +47,7 @@ static void sme_sev_setup_real_mode(struct trampoline_header *th)
 	if (prot_guest_has(PATTR_HOST_MEM_ENCRYPT))
 		th->flags |= TH_FLAGS_SME_ACTIVE;
 
-	if (sev_es_active()) {
+	if (prot_guest_has(PATTR_GUEST_PROT_STATE)) {
 		/*
 		 * Skip the call to verify_cpu() in secondary_startup_64 as it
 		 * will cause #VC exceptions when the AP can't handle them yet.
-- 
2.32.0


_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

  parent reply	other threads:[~2021-07-27 22:28 UTC|newest]

Thread overview: 214+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-27 22:26 [PATCH 00/11] Implement generic prot_guest_has() helper function Tom Lendacky
2021-07-27 22:26 ` Tom Lendacky
2021-07-27 22:26 ` Tom Lendacky
2021-07-27 22:26 ` Tom Lendacky
2021-07-27 22:26 ` Tom Lendacky via iommu
2021-07-27 22:26 ` Tom Lendacky
2021-07-27 22:26 ` [PATCH 01/11] mm: Introduce a function to check for virtualization protection features Tom Lendacky
2021-07-27 22:26   ` Tom Lendacky
2021-07-27 22:26   ` Tom Lendacky
2021-07-27 22:26   ` Tom Lendacky via iommu
2021-07-27 22:26   ` Tom Lendacky
2021-07-28 13:17   ` Christoph Hellwig
2021-07-28 13:17     ` Christoph Hellwig
2021-07-28 13:17     ` Christoph Hellwig
2021-07-28 13:17     ` Christoph Hellwig
2021-07-28 13:17     ` Christoph Hellwig
2021-07-28 16:28     ` Borislav Petkov
2021-07-28 16:28       ` Borislav Petkov
2021-07-28 16:28       ` Borislav Petkov
2021-07-28 16:28       ` Borislav Petkov
2021-07-28 16:28       ` Borislav Petkov
2021-08-02 10:34   ` Joerg Roedel
2021-08-02 10:34     ` Joerg Roedel
2021-08-02 10:34     ` Joerg Roedel
2021-08-02 10:34     ` Joerg Roedel
2021-08-11 14:53   ` Kuppuswamy, Sathyanarayanan
2021-08-11 14:53     ` Kuppuswamy, Sathyanarayanan
2021-08-11 14:53     ` Kuppuswamy, Sathyanarayanan
2021-08-11 14:53     ` Kuppuswamy, Sathyanarayanan
2021-08-11 14:53     ` Kuppuswamy, Sathyanarayanan
2021-08-11 15:39     ` Tom Lendacky
2021-08-11 15:39       ` Tom Lendacky
2021-08-11 15:39       ` Tom Lendacky
2021-08-11 15:39       ` Tom Lendacky via iommu
2021-08-11 15:39       ` Tom Lendacky
2021-07-27 22:26 ` [PATCH 02/11] x86/sev: Add an x86 version of prot_guest_has() Tom Lendacky
2021-07-27 22:26   ` Tom Lendacky
2021-07-27 22:26   ` Tom Lendacky
2021-07-27 22:26   ` Tom Lendacky via iommu
2021-07-27 22:26   ` Tom Lendacky
2021-07-28 13:22   ` Christoph Hellwig
2021-07-28 13:22     ` Christoph Hellwig
2021-07-28 13:22     ` Christoph Hellwig
2021-07-28 13:22     ` Christoph Hellwig
2021-07-28 13:22     ` Christoph Hellwig
2021-07-29 14:24     ` Tom Lendacky
2021-07-29 14:24       ` Tom Lendacky
2021-07-29 14:24       ` Tom Lendacky
2021-07-29 14:24       ` Tom Lendacky via iommu
2021-07-29 14:24       ` Tom Lendacky
2021-08-02 10:35   ` Joerg Roedel
2021-08-02 10:35     ` Joerg Roedel
2021-08-02 10:35     ` Joerg Roedel
2021-08-02 10:35     ` Joerg Roedel
2021-07-27 22:26 ` [PATCH 03/11] powerpc/pseries/svm: Add a powerpc " Tom Lendacky
2021-07-27 22:26   ` Tom Lendacky
2021-07-27 22:26   ` Tom Lendacky
2021-07-27 22:26   ` Tom Lendacky
2021-07-27 22:26   ` Tom Lendacky via iommu
2021-07-27 22:26   ` Tom Lendacky
2021-07-27 22:26 ` [PATCH 04/11] x86/sme: Replace occurrences of sme_active() with prot_guest_has() Tom Lendacky
2021-07-27 22:26   ` Tom Lendacky
2021-07-27 22:26   ` Tom Lendacky
2021-07-27 22:26   ` Tom Lendacky via iommu
2021-07-27 22:26   ` Tom Lendacky
2021-08-02 10:37   ` Joerg Roedel
2021-08-02 10:37     ` Joerg Roedel
2021-08-02 10:37     ` Joerg Roedel
2021-08-02 10:37     ` Joerg Roedel
2021-07-27 22:26 ` [PATCH 05/11] x86/sev: Replace occurrences of sev_active() " Tom Lendacky
2021-07-27 22:26   ` Tom Lendacky
2021-07-27 22:26   ` Tom Lendacky
2021-07-27 22:26   ` Tom Lendacky via iommu
2021-07-27 22:26   ` Tom Lendacky
2021-08-02 10:42   ` Joerg Roedel
2021-08-02 10:42     ` Joerg Roedel
2021-08-02 10:42     ` Joerg Roedel
2021-08-02 10:42     ` Joerg Roedel
2021-07-27 22:26 ` Tom Lendacky [this message]
2021-07-27 22:26   ` [PATCH 06/11] x86/sev: Replace occurrences of sev_es_active() " Tom Lendacky
2021-07-27 22:26   ` Tom Lendacky
2021-07-27 22:26   ` Tom Lendacky via iommu
2021-07-27 22:26   ` Tom Lendacky
2021-08-02 10:45   ` Joerg Roedel
2021-08-02 10:45     ` Joerg Roedel
2021-08-02 10:45     ` Joerg Roedel
2021-08-02 10:45     ` Joerg Roedel
2021-08-09 21:59     ` Tom Lendacky
2021-08-09 21:59       ` Tom Lendacky
2021-08-09 21:59       ` Tom Lendacky
2021-08-09 21:59       ` Tom Lendacky via iommu
2021-08-09 21:59       ` Tom Lendacky
2021-08-09 22:08       ` Kuppuswamy, Sathyanarayanan
2021-08-09 22:08         ` Kuppuswamy, Sathyanarayanan
2021-08-09 22:08         ` Kuppuswamy, Sathyanarayanan
2021-08-09 22:08         ` Kuppuswamy, Sathyanarayanan
2021-08-09 22:08         ` Kuppuswamy, Sathyanarayanan
2021-07-27 22:26 ` [PATCH 07/11] treewide: Replace the use of mem_encrypt_active() " Tom Lendacky
2021-07-27 22:26   ` Tom Lendacky
2021-07-27 22:26   ` Tom Lendacky
2021-07-27 22:26   ` Tom Lendacky
2021-07-27 22:26   ` Tom Lendacky via iommu
2021-07-27 22:26   ` Tom Lendacky
2021-07-30 22:34   ` Sean Christopherson
2021-07-30 22:34     ` Sean Christopherson
2021-07-30 22:34     ` Sean Christopherson
2021-07-30 22:34     ` Sean Christopherson via iommu
2021-07-30 22:34     ` Sean Christopherson
2021-08-09 21:55     ` Tom Lendacky
2021-08-09 21:55       ` Tom Lendacky
2021-08-09 21:55       ` Tom Lendacky
2021-08-09 21:55       ` Tom Lendacky via iommu
2021-08-09 21:55       ` Tom Lendacky
2021-08-02 12:42   ` Christophe Leroy
2021-08-02 12:42     ` Christophe Leroy
2021-08-02 12:42     ` Christophe Leroy
2021-08-02 12:42     ` Christophe Leroy
2021-08-02 12:42     ` Christophe Leroy
2021-08-09 22:04     ` Tom Lendacky
2021-08-09 22:04       ` Tom Lendacky
2021-08-09 22:04       ` Tom Lendacky
2021-08-09 22:04       ` Tom Lendacky via iommu
2021-08-09 22:04       ` Tom Lendacky
2021-08-10 18:45   ` Kuppuswamy, Sathyanarayanan
2021-08-10 18:45     ` Kuppuswamy, Sathyanarayanan
2021-08-10 18:45     ` Kuppuswamy, Sathyanarayanan
2021-08-10 18:45     ` Kuppuswamy, Sathyanarayanan
2021-08-10 18:45     ` Kuppuswamy, Sathyanarayanan
2021-08-10 19:48     ` Tom Lendacky
2021-08-10 19:48       ` Tom Lendacky
2021-08-10 19:48       ` Tom Lendacky
2021-08-10 19:48       ` Tom Lendacky via iommu
2021-08-10 19:48       ` Tom Lendacky
2021-08-10 20:09       ` Kuppuswamy, Sathyanarayanan
2021-08-10 20:09         ` Kuppuswamy, Sathyanarayanan
2021-08-10 20:09         ` Kuppuswamy, Sathyanarayanan
2021-08-10 20:09         ` Kuppuswamy, Sathyanarayanan
2021-08-10 20:09         ` Kuppuswamy, Sathyanarayanan
2021-08-11 12:19       ` Kirill A. Shutemov
2021-08-11 12:19         ` Kirill A. Shutemov
2021-08-11 12:19         ` Kirill A. Shutemov
2021-08-11 12:19         ` Kirill A. Shutemov
2021-08-11 12:19         ` Kirill A. Shutemov
2021-08-11 15:52         ` Tom Lendacky
2021-08-11 15:52           ` Tom Lendacky
2021-08-11 15:52           ` Tom Lendacky
2021-08-11 15:52           ` Tom Lendacky via iommu
2021-08-11 15:52           ` Tom Lendacky
2021-08-12 10:07           ` Kirill A. Shutemov
2021-08-12 10:07             ` Kirill A. Shutemov
2021-08-12 10:07             ` Kirill A. Shutemov
2021-08-12 10:07             ` Kirill A. Shutemov
2021-08-12 10:07             ` Kirill A. Shutemov
2021-08-13 17:08             ` Tom Lendacky
2021-08-13 17:08               ` Tom Lendacky
2021-08-13 17:08               ` Tom Lendacky
2021-08-13 17:08               ` Tom Lendacky via iommu
2021-08-13 17:08               ` Tom Lendacky
2021-08-13 20:17               ` Tom Lendacky
2021-08-13 20:17                 ` Tom Lendacky
2021-08-13 20:17                 ` Tom Lendacky
2021-08-13 20:17                 ` Tom Lendacky via iommu
2021-08-13 20:17                 ` Tom Lendacky
2021-07-27 22:26 ` [PATCH 08/11] mm: Remove the now unused mem_encrypt_active() function Tom Lendacky
2021-07-27 22:26   ` Tom Lendacky
2021-07-27 22:26   ` Tom Lendacky
2021-07-27 22:26   ` Tom Lendacky via iommu
2021-07-27 22:26   ` Tom Lendacky
2021-08-02 10:47   ` Joerg Roedel
2021-08-02 10:47     ` Joerg Roedel
2021-08-02 10:47     ` Joerg Roedel
2021-08-02 10:47     ` Joerg Roedel
2021-07-27 22:26 ` [PATCH 09/11] x86/sev: " Tom Lendacky
2021-07-27 22:26   ` Tom Lendacky
2021-07-27 22:26   ` Tom Lendacky
2021-07-27 22:26   ` Tom Lendacky via iommu
2021-07-27 22:26   ` Tom Lendacky
2021-08-02 10:46   ` Joerg Roedel
2021-08-02 10:46     ` Joerg Roedel
2021-08-02 10:46     ` Joerg Roedel
2021-08-02 10:46     ` Joerg Roedel
2021-07-27 22:26 ` [PATCH 10/11] powerpc/pseries/svm: " Tom Lendacky
2021-07-27 22:26   ` Tom Lendacky
2021-07-27 22:26   ` Tom Lendacky
2021-07-27 22:26   ` Tom Lendacky
2021-07-27 22:26   ` Tom Lendacky via iommu
2021-07-27 22:26   ` Tom Lendacky
2021-07-27 22:26 ` [PATCH 11/11] s390/mm: " Tom Lendacky
2021-07-27 22:26   ` Tom Lendacky
2021-07-27 22:26   ` Tom Lendacky
2021-07-27 22:26   ` Tom Lendacky via iommu
2021-07-27 22:26   ` Tom Lendacky
2021-07-27 22:37 ` [PATCH 00/11] Implement generic prot_guest_has() helper function Tom Lendacky
2021-07-27 22:37   ` Tom Lendacky
2021-07-27 22:37   ` Tom Lendacky
2021-07-27 22:37   ` Tom Lendacky
2021-07-27 22:37   ` Tom Lendacky via iommu
2021-07-27 22:37   ` Tom Lendacky
2021-07-28 11:50 ` Christian König
2021-07-28 11:50   ` Christian König
2021-07-28 11:50   ` Christian König
2021-07-28 11:50   ` Christian König
2021-07-28 11:50   ` Christian König
2021-07-28 11:50   ` Christian König
2021-08-09  1:41 ` Kuppuswamy, Sathyanarayanan
2021-08-09  1:41   ` Kuppuswamy, Sathyanarayanan
2021-08-09  1:41   ` Kuppuswamy, Sathyanarayanan
2021-08-09  1:41   ` Kuppuswamy, Sathyanarayanan
2021-08-09  1:41   ` Kuppuswamy, Sathyanarayanan
2021-08-09 22:16   ` Tom Lendacky
2021-08-09 22:16     ` Tom Lendacky
2021-08-09 22:16     ` Tom Lendacky
2021-08-09 22:16     ` Tom Lendacky via iommu
2021-08-09 22:16     ` Tom Lendacky

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ba565128b88661a656fc3972f01bb2e295158a15.1627424774.git.thomas.lendacky@amd.com \
    --to=thomas.lendacky@amd.com \
    --cc=Tianyu.Lan@microsoft.com \
    --cc=ak@linux.intel.com \
    --cc=amd-gfx@lists.freedesktop.org \
    --cc=bp@alien8.de \
    --cc=brijesh.singh@amd.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=iommu@lists.linux-foundation.org \
    --cc=joro@8bytes.org \
    --cc=kexec@lists.infradead.org \
    --cc=kvm@vger.kernel.org \
    --cc=linux-efi@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-graphics-maintainer@vmware.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-s390@vger.kernel.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=mingo@redhat.com \
    --cc=platform-driver-x86@vger.kernel.org \
    --cc=sathyanarayanan.kuppuswamy@linux.intel.com \
    --cc=tglx@linutronix.de \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.