All of lore.kernel.org
 help / color / mirror / Atom feed
From: Tom Lendacky <thomas.lendacky@amd.com>
To: <linux-arch@vger.kernel.org>, <linux-efi@vger.kernel.org>,
	<kvm@vger.kernel.org>, <linux-doc@vger.kernel.org>,
	<x86@kernel.org>, <linux-kernel@vger.kernel.org>,
	<kasan-dev@googlegroups.com>, <linux-mm@kvack.org>,
	<iommu@lists.linux-foundation.org>
Cc: "Rik van Riel" <riel@redhat.com>,
	"Radim Krčmář" <rkrcmar@redhat.com>,
	"Toshimitsu Kani" <toshi.kani@hpe.com>,
	"Arnd Bergmann" <arnd@arndb.de>,
	"Jonathan Corbet" <corbet@lwn.net>,
	"Matt Fleming" <matt@codeblueprint.co.uk>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	"Joerg Roedel" <joro@8bytes.org>,
	"Konrad Rzeszutek Wilk" <konrad.wilk@oracle.com>,
	"Paolo Bonzini" <pbonzini@redhat.com>,
	"Brijesh Singh" <brijesh.singh@amd.com>,
	"Ingo Molnar" <mingo@redhat.com>,
	"Alexander Potapenko" <glider@google.com>,
	"Andy Lutomirski" <luto@kernel.org>,
	"H. Peter Anvin" <hpa@zytor.com>,
	"Borislav Petkov" <bp@alien8.de>,
	"Andrey Ryabinin" <aryabinin@virtuozzo.com>,
	"Thomas Gleixner" <tglx@linutronix.de>,
	"Larry Woodman" <lwoodman@redhat.com>,
	"Dmitry Vyukov" <dvyukov@google.com>
Subject: [RFC PATCH v4 10/28] x86: Insure that boot memory areas are mapped properly
Date: Thu, 16 Feb 2017 09:44:11 -0600	[thread overview]
Message-ID: <20170216154411.19244.99258.stgit@tlendack-t1.amdoffice.net> (raw)
In-Reply-To: <20170216154158.19244.66630.stgit@tlendack-t1.amdoffice.net>

The boot data and command line data are present in memory in a decrypted
state and are copied early in the boot process.  The early page fault
support will map these areas as encrypted, so before attempting to copy
them, add decrypted mappings so the data is accessed properly when copied.

For the initrd, encrypt this data in place. Since the future mapping of the
initrd area will be mapped as encrypted the data will be accessed properly.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
---
 arch/x86/include/asm/mem_encrypt.h |   11 +++++
 arch/x86/kernel/head64.c           |   34 +++++++++++++++--
 arch/x86/kernel/setup.c            |   10 +++++
 arch/x86/mm/mem_encrypt.c          |   74 ++++++++++++++++++++++++++++++++++++
 4 files changed, 126 insertions(+), 3 deletions(-)

diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 3c9052c..e2b7364 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -31,6 +31,9 @@ void __init sme_early_encrypt(resource_size_t paddr,
 void __init sme_early_decrypt(resource_size_t paddr,
 			      unsigned long size);
 
+void __init sme_map_bootdata(char *real_mode_data);
+void __init sme_unmap_bootdata(char *real_mode_data);
+
 void __init sme_early_init(void);
 
 #define __sme_pa(x)		(__pa((x)) | sme_me_mask)
@@ -57,6 +60,14 @@ static inline void __init sme_early_decrypt(resource_size_t paddr,
 {
 }
 
+static inline void __init sme_map_bootdata(char *real_mode_data)
+{
+}
+
+static inline void __init sme_unmap_bootdata(char *real_mode_data)
+{
+}
+
 static inline void __init sme_early_init(void)
 {
 }
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 182a4c7..03f8e74 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -46,13 +46,18 @@ static void __init reset_early_page_tables(void)
 	write_cr3(__sme_pa_nodebug(early_level4_pgt));
 }
 
+void __init __early_pgtable_flush(void)
+{
+	write_cr3(__sme_pa_nodebug(early_level4_pgt));
+}
+
 /* Create a new PMD entry */
-int __init early_make_pgtable(unsigned long address)
+int __init __early_make_pgtable(unsigned long address, pmdval_t pmd)
 {
 	unsigned long physaddr = address - __PAGE_OFFSET;
 	pgdval_t pgd, *pgd_p;
 	pudval_t pud, *pud_p;
-	pmdval_t pmd, *pmd_p;
+	pmdval_t *pmd_p;
 
 	/* Invalid address or early pgt is done ?  */
 	if (physaddr >= MAXMEM || read_cr3() != __sme_pa_nodebug(early_level4_pgt))
@@ -94,12 +99,21 @@ int __init early_make_pgtable(unsigned long address)
 		memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
 		*pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
 	}
-	pmd = (physaddr & PMD_MASK) + early_pmd_flags;
 	pmd_p[pmd_index(address)] = pmd;
 
 	return 0;
 }
 
+int __init early_make_pgtable(unsigned long address)
+{
+	unsigned long physaddr = address - __PAGE_OFFSET;
+	pmdval_t pmd;
+
+	pmd = (physaddr & PMD_MASK) + early_pmd_flags;
+
+	return __early_make_pgtable(address, pmd);
+}
+
 /* Don't add a printk in there. printk relies on the PDA which is not initialized 
    yet. */
 static void __init clear_bss(void)
@@ -122,6 +136,12 @@ static void __init copy_bootdata(char *real_mode_data)
 	char * command_line;
 	unsigned long cmd_line_ptr;
 
+	/*
+	 * If SME is active, this will create decrypted mappings of the
+	 * boot data in advance of the copy operations.
+	 */
+	sme_map_bootdata(real_mode_data);
+
 	memcpy(&boot_params, real_mode_data, sizeof boot_params);
 	sanitize_boot_params(&boot_params);
 	cmd_line_ptr = get_cmd_line_ptr();
@@ -129,6 +149,14 @@ static void __init copy_bootdata(char *real_mode_data)
 		command_line = __va(cmd_line_ptr);
 		memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
 	}
+
+	/*
+	 * The old boot data is no longer needed and won't be reserved,
+	 * freeing up that memory for use by the system. If SME is active,
+	 * we need to remove the mappings that were created so that the
+	 * memory doesn't remain mapped as decrypted.
+	 */
+	sme_unmap_bootdata(real_mode_data);
 }
 
 asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index cab13f7..bd5b9a7 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -114,6 +114,7 @@
 #include <asm/microcode.h>
 #include <asm/mmu_context.h>
 #include <asm/kaslr.h>
+#include <asm/mem_encrypt.h>
 
 /*
  * max_low_pfn_mapped: highest direct mapped pfn under 4GB
@@ -376,6 +377,15 @@ static void __init reserve_initrd(void)
 	    !ramdisk_image || !ramdisk_size)
 		return;		/* No initrd provided by bootloader */
 
+	/*
+	 * If SME is active, this memory will be marked encrypted by the
+	 * kernel when it is accessed (including relocation). However, the
+	 * ramdisk image was loaded decrypted by the bootloader, so make
+	 * sure that it is encrypted before accessing it.
+	 */
+	if (sme_active())
+		sme_early_encrypt(ramdisk_image, ramdisk_end - ramdisk_image);
+
 	initrd_start = 0;
 
 	mapped_size = memblock_mem_size(max_pfn_mapped);
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index ac3565c..ec548e9 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -16,8 +16,12 @@
 
 #include <asm/tlbflush.h>
 #include <asm/fixmap.h>
+#include <asm/setup.h>
+#include <asm/bootparam.h>
 
 extern pmdval_t early_pmd_flags;
+int __init __early_make_pgtable(unsigned long, pmdval_t);
+void __init __early_pgtable_flush(void);
 
 /*
  * Since SME related variables are set early in the boot process they must
@@ -103,6 +107,76 @@ void __init sme_early_decrypt(resource_size_t paddr, unsigned long size)
 	__sme_early_enc_dec(paddr, size, false);
 }
 
+static void __init __sme_early_map_unmap_mem(void *vaddr, unsigned long size,
+					     bool map)
+{
+	unsigned long paddr = (unsigned long)vaddr - __PAGE_OFFSET;
+	pmdval_t pmd_flags, pmd;
+
+	/* Use early_pmd_flags but remove the encryption mask */
+	pmd_flags = early_pmd_flags & ~sme_me_mask;
+
+	do {
+		pmd = map ? (paddr & PMD_MASK) + pmd_flags : 0;
+		__early_make_pgtable((unsigned long)vaddr, pmd);
+
+		vaddr += PMD_SIZE;
+		paddr += PMD_SIZE;
+		size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE;
+	} while (size);
+}
+
+static void __init __sme_map_unmap_bootdata(char *real_mode_data, bool map)
+{
+	struct boot_params *boot_data;
+	unsigned long cmdline_paddr;
+
+	__sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), map);
+	boot_data = (struct boot_params *)real_mode_data;
+
+	/*
+	 * Determine the command line address only after having established
+	 * the decrypted mapping.
+	 */
+	cmdline_paddr = boot_data->hdr.cmd_line_ptr |
+			((u64)boot_data->ext_cmd_line_ptr << 32);
+
+	if (cmdline_paddr)
+		__sme_early_map_unmap_mem(__va(cmdline_paddr),
+					  COMMAND_LINE_SIZE, map);
+}
+
+void __init sme_unmap_bootdata(char *real_mode_data)
+{
+	/* If SME is not active, the bootdata is in the correct state */
+	if (!sme_active())
+		return;
+
+	/*
+	 * The bootdata and command line aren't needed anymore so clear
+	 * any mapping of them.
+	 */
+	__sme_map_unmap_bootdata(real_mode_data, false);
+
+	__early_pgtable_flush();
+}
+
+void __init sme_map_bootdata(char *real_mode_data)
+{
+	/* If SME is not active, the bootdata is in the correct state */
+	if (!sme_active())
+		return;
+
+	/*
+	 * The bootdata and command line will not be encrypted, so they
+	 * need to be mapped as decrypted memory so they can be copied
+	 * properly.
+	 */
+	__sme_map_unmap_bootdata(real_mode_data, true);
+
+	__early_pgtable_flush();
+}
+
 void __init sme_early_init(void)
 {
 	unsigned int i;

WARNING: multiple messages have this Message-ID (diff)
From: Tom Lendacky <thomas.lendacky-5C7GfCeVMHo@public.gmane.org>
To: linux-arch-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	linux-efi-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	kvm-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	linux-doc-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	x86-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	kasan-dev-/JYPxA39Uh5TLH3MbocFFw@public.gmane.org,
	linux-mm-Bw31MaZKKs3YtjvyW6yDsg@public.gmane.org,
	iommu-cunTk1MwBs9QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org
Cc: "Rik van Riel" <riel-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>,
	"Larry Woodman"
	<lwoodman-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>,
	"Brijesh Singh" <brijesh.singh-5C7GfCeVMHo@public.gmane.org>,
	"Toshimitsu Kani" <toshi.kani-ZPxbGqLxI0U@public.gmane.org>,
	"Arnd Bergmann" <arnd-r2nGTMty4D4@public.gmane.org>,
	"Jonathan Corbet" <corbet-T1hC0tSOHrs@public.gmane.org>,
	"Matt Fleming"
	<matt-mF/unelCI9GS6iBeEJttW/XRex20P6io@public.gmane.org>,
	"Radim Krčmář" <rkrcmar-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>,
	"Andrey Ryabinin"
	<aryabinin-5HdwGun5lf+gSpxsJD1C4w@public.gmane.org>,
	"Ingo Molnar" <mingo-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>,
	"Michael S. Tsirkin"
	<mst-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>,
	"Andy Lutomirski" <luto-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>,
	"H. Peter Anvin" <hpa-YMNOUZJC4hwAvxtiuMwx3w@public.gmane.org>,
	"Borislav Petkov" <bp-Gina5bIWoIWzQB+pC5nmwQ@public.gmane.org>,
	"Paolo Bonzini"
	<pbonzini-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>,
	"Alexander Potapenko"
	<glider-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org>,
	"Thomas Gleixner" <tglx-hfZtesqFncYOwBW4kG4KsQ@public.gmane.org>,
	"Dmitry Vyukov" <dvyukov-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org>
Subject: [RFC PATCH v4 10/28] x86: Insure that boot memory areas are mapped properly
Date: Thu, 16 Feb 2017 09:44:11 -0600	[thread overview]
Message-ID: <20170216154411.19244.99258.stgit@tlendack-t1.amdoffice.net> (raw)
In-Reply-To: <20170216154158.19244.66630.stgit-qCXWGYdRb2BnqfbPTmsdiZQ+2ll4COg0XqFh9Ls21Oc@public.gmane.org>

The boot data and command line data are present in memory in a decrypted
state and are copied early in the boot process.  The early page fault
support will map these areas as encrypted, so before attempting to copy
them, add decrypted mappings so the data is accessed properly when copied.

For the initrd, encrypt this data in place. Since the future mapping of the
initrd area will be mapped as encrypted the data will be accessed properly.

Signed-off-by: Tom Lendacky <thomas.lendacky-5C7GfCeVMHo@public.gmane.org>
---
 arch/x86/include/asm/mem_encrypt.h |   11 +++++
 arch/x86/kernel/head64.c           |   34 +++++++++++++++--
 arch/x86/kernel/setup.c            |   10 +++++
 arch/x86/mm/mem_encrypt.c          |   74 ++++++++++++++++++++++++++++++++++++
 4 files changed, 126 insertions(+), 3 deletions(-)

diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 3c9052c..e2b7364 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -31,6 +31,9 @@ void __init sme_early_encrypt(resource_size_t paddr,
 void __init sme_early_decrypt(resource_size_t paddr,
 			      unsigned long size);
 
+void __init sme_map_bootdata(char *real_mode_data);
+void __init sme_unmap_bootdata(char *real_mode_data);
+
 void __init sme_early_init(void);
 
 #define __sme_pa(x)		(__pa((x)) | sme_me_mask)
@@ -57,6 +60,14 @@ static inline void __init sme_early_decrypt(resource_size_t paddr,
 {
 }
 
+static inline void __init sme_map_bootdata(char *real_mode_data)
+{
+}
+
+static inline void __init sme_unmap_bootdata(char *real_mode_data)
+{
+}
+
 static inline void __init sme_early_init(void)
 {
 }
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 182a4c7..03f8e74 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -46,13 +46,18 @@ static void __init reset_early_page_tables(void)
 	write_cr3(__sme_pa_nodebug(early_level4_pgt));
 }
 
+void __init __early_pgtable_flush(void)
+{
+	write_cr3(__sme_pa_nodebug(early_level4_pgt));
+}
+
 /* Create a new PMD entry */
-int __init early_make_pgtable(unsigned long address)
+int __init __early_make_pgtable(unsigned long address, pmdval_t pmd)
 {
 	unsigned long physaddr = address - __PAGE_OFFSET;
 	pgdval_t pgd, *pgd_p;
 	pudval_t pud, *pud_p;
-	pmdval_t pmd, *pmd_p;
+	pmdval_t *pmd_p;
 
 	/* Invalid address or early pgt is done ?  */
 	if (physaddr >= MAXMEM || read_cr3() != __sme_pa_nodebug(early_level4_pgt))
@@ -94,12 +99,21 @@ int __init early_make_pgtable(unsigned long address)
 		memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
 		*pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
 	}
-	pmd = (physaddr & PMD_MASK) + early_pmd_flags;
 	pmd_p[pmd_index(address)] = pmd;
 
 	return 0;
 }
 
+int __init early_make_pgtable(unsigned long address)
+{
+	unsigned long physaddr = address - __PAGE_OFFSET;
+	pmdval_t pmd;
+
+	pmd = (physaddr & PMD_MASK) + early_pmd_flags;
+
+	return __early_make_pgtable(address, pmd);
+}
+
 /* Don't add a printk in there. printk relies on the PDA which is not initialized 
    yet. */
 static void __init clear_bss(void)
@@ -122,6 +136,12 @@ static void __init copy_bootdata(char *real_mode_data)
 	char * command_line;
 	unsigned long cmd_line_ptr;
 
+	/*
+	 * If SME is active, this will create decrypted mappings of the
+	 * boot data in advance of the copy operations.
+	 */
+	sme_map_bootdata(real_mode_data);
+
 	memcpy(&boot_params, real_mode_data, sizeof boot_params);
 	sanitize_boot_params(&boot_params);
 	cmd_line_ptr = get_cmd_line_ptr();
@@ -129,6 +149,14 @@ static void __init copy_bootdata(char *real_mode_data)
 		command_line = __va(cmd_line_ptr);
 		memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
 	}
+
+	/*
+	 * The old boot data is no longer needed and won't be reserved,
+	 * freeing up that memory for use by the system. If SME is active,
+	 * we need to remove the mappings that were created so that the
+	 * memory doesn't remain mapped as decrypted.
+	 */
+	sme_unmap_bootdata(real_mode_data);
 }
 
 asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index cab13f7..bd5b9a7 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -114,6 +114,7 @@
 #include <asm/microcode.h>
 #include <asm/mmu_context.h>
 #include <asm/kaslr.h>
+#include <asm/mem_encrypt.h>
 
 /*
  * max_low_pfn_mapped: highest direct mapped pfn under 4GB
@@ -376,6 +377,15 @@ static void __init reserve_initrd(void)
 	    !ramdisk_image || !ramdisk_size)
 		return;		/* No initrd provided by bootloader */
 
+	/*
+	 * If SME is active, this memory will be marked encrypted by the
+	 * kernel when it is accessed (including relocation). However, the
+	 * ramdisk image was loaded decrypted by the bootloader, so make
+	 * sure that it is encrypted before accessing it.
+	 */
+	if (sme_active())
+		sme_early_encrypt(ramdisk_image, ramdisk_end - ramdisk_image);
+
 	initrd_start = 0;
 
 	mapped_size = memblock_mem_size(max_pfn_mapped);
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index ac3565c..ec548e9 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -16,8 +16,12 @@
 
 #include <asm/tlbflush.h>
 #include <asm/fixmap.h>
+#include <asm/setup.h>
+#include <asm/bootparam.h>
 
 extern pmdval_t early_pmd_flags;
+int __init __early_make_pgtable(unsigned long, pmdval_t);
+void __init __early_pgtable_flush(void);
 
 /*
  * Since SME related variables are set early in the boot process they must
@@ -103,6 +107,76 @@ void __init sme_early_decrypt(resource_size_t paddr, unsigned long size)
 	__sme_early_enc_dec(paddr, size, false);
 }
 
+static void __init __sme_early_map_unmap_mem(void *vaddr, unsigned long size,
+					     bool map)
+{
+	unsigned long paddr = (unsigned long)vaddr - __PAGE_OFFSET;
+	pmdval_t pmd_flags, pmd;
+
+	/* Use early_pmd_flags but remove the encryption mask */
+	pmd_flags = early_pmd_flags & ~sme_me_mask;
+
+	do {
+		pmd = map ? (paddr & PMD_MASK) + pmd_flags : 0;
+		__early_make_pgtable((unsigned long)vaddr, pmd);
+
+		vaddr += PMD_SIZE;
+		paddr += PMD_SIZE;
+		size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE;
+	} while (size);
+}
+
+static void __init __sme_map_unmap_bootdata(char *real_mode_data, bool map)
+{
+	struct boot_params *boot_data;
+	unsigned long cmdline_paddr;
+
+	__sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), map);
+	boot_data = (struct boot_params *)real_mode_data;
+
+	/*
+	 * Determine the command line address only after having established
+	 * the decrypted mapping.
+	 */
+	cmdline_paddr = boot_data->hdr.cmd_line_ptr |
+			((u64)boot_data->ext_cmd_line_ptr << 32);
+
+	if (cmdline_paddr)
+		__sme_early_map_unmap_mem(__va(cmdline_paddr),
+					  COMMAND_LINE_SIZE, map);
+}
+
+void __init sme_unmap_bootdata(char *real_mode_data)
+{
+	/* If SME is not active, the bootdata is in the correct state */
+	if (!sme_active())
+		return;
+
+	/*
+	 * The bootdata and command line aren't needed anymore so clear
+	 * any mapping of them.
+	 */
+	__sme_map_unmap_bootdata(real_mode_data, false);
+
+	__early_pgtable_flush();
+}
+
+void __init sme_map_bootdata(char *real_mode_data)
+{
+	/* If SME is not active, the bootdata is in the correct state */
+	if (!sme_active())
+		return;
+
+	/*
+	 * The bootdata and command line will not be encrypted, so they
+	 * need to be mapped as decrypted memory so they can be copied
+	 * properly.
+	 */
+	__sme_map_unmap_bootdata(real_mode_data, true);
+
+	__early_pgtable_flush();
+}
+
 void __init sme_early_init(void)
 {
 	unsigned int i;

WARNING: multiple messages have this Message-ID (diff)
From: Tom Lendacky <thomas.lendacky@amd.com>
To: linux-arch@vger.kernel.org, linux-efi@vger.kernel.org,
	kvm@vger.kernel.org, linux-doc@vger.kernel.org, x86@kernel.org,
	linux-kernel@vger.kernel.org, kasan-dev@googlegroups.com,
	linux-mm@kvack.org, iommu@lists.linux-foundation.org
Cc: "Rik van Riel" <riel@redhat.com>,
	"Radim Krčmář" <rkrcmar@redhat.com>,
	"Toshimitsu Kani" <toshi.kani@hpe.com>,
	"Arnd Bergmann" <arnd@arndb.de>,
	"Jonathan Corbet" <corbet@lwn.net>,
	"Matt Fleming" <matt@codeblueprint.co.uk>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	"Joerg Roedel" <joro@8bytes.org>,
	"Konrad Rzeszutek Wilk" <konrad.wilk@oracle.com>,
	"Paolo Bonzini" <pbonzini@redhat.com>,
	"Brijesh Singh" <brijesh.singh@amd.com>,
	"Ingo Molnar" <mingo@redhat.com>,
	"Alexander Potapenko" <glider@google.com>,
	"Andy Lutomirski" <luto@kernel.org>,
	"H. Peter Anvin" <hpa@zytor.com>,
	"Borislav Petkov" <bp@alien8.de>,
	"Andrey Ryabinin" <aryabinin@virtuozzo.com>,
	"Thomas Gleixner" <tglx@linutronix.de>,
	"Larry Woodman" <lwoodman@redhat.com>,
	"Dmitry Vyukov" <dvyukov@google.com>
Subject: [RFC PATCH v4 10/28] x86: Insure that boot memory areas are mapped properly
Date: Thu, 16 Feb 2017 09:44:11 -0600	[thread overview]
Message-ID: <20170216154411.19244.99258.stgit@tlendack-t1.amdoffice.net> (raw)
Message-ID: <20170216154411.bct_rj1ySZEb2fMCYyNXCKqWD-xvMIYl8ABU9Kb7cHc@z> (raw)
In-Reply-To: <20170216154158.19244.66630.stgit@tlendack-t1.amdoffice.net>

The boot data and command line data are present in memory in a decrypted
state and are copied early in the boot process.  The early page fault
support will map these areas as encrypted, so before attempting to copy
them, add decrypted mappings so the data is accessed properly when copied.

For the initrd, encrypt this data in place. Since the future mapping of the
initrd area will be mapped as encrypted the data will be accessed properly.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
---
 arch/x86/include/asm/mem_encrypt.h |   11 +++++
 arch/x86/kernel/head64.c           |   34 +++++++++++++++--
 arch/x86/kernel/setup.c            |   10 +++++
 arch/x86/mm/mem_encrypt.c          |   74 ++++++++++++++++++++++++++++++++++++
 4 files changed, 126 insertions(+), 3 deletions(-)

diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 3c9052c..e2b7364 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -31,6 +31,9 @@ void __init sme_early_encrypt(resource_size_t paddr,
 void __init sme_early_decrypt(resource_size_t paddr,
 			      unsigned long size);
 
+void __init sme_map_bootdata(char *real_mode_data);
+void __init sme_unmap_bootdata(char *real_mode_data);
+
 void __init sme_early_init(void);
 
 #define __sme_pa(x)		(__pa((x)) | sme_me_mask)
@@ -57,6 +60,14 @@ static inline void __init sme_early_decrypt(resource_size_t paddr,
 {
 }
 
+static inline void __init sme_map_bootdata(char *real_mode_data)
+{
+}
+
+static inline void __init sme_unmap_bootdata(char *real_mode_data)
+{
+}
+
 static inline void __init sme_early_init(void)
 {
 }
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 182a4c7..03f8e74 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -46,13 +46,18 @@ static void __init reset_early_page_tables(void)
 	write_cr3(__sme_pa_nodebug(early_level4_pgt));
 }
 
+void __init __early_pgtable_flush(void)
+{
+	write_cr3(__sme_pa_nodebug(early_level4_pgt));
+}
+
 /* Create a new PMD entry */
-int __init early_make_pgtable(unsigned long address)
+int __init __early_make_pgtable(unsigned long address, pmdval_t pmd)
 {
 	unsigned long physaddr = address - __PAGE_OFFSET;
 	pgdval_t pgd, *pgd_p;
 	pudval_t pud, *pud_p;
-	pmdval_t pmd, *pmd_p;
+	pmdval_t *pmd_p;
 
 	/* Invalid address or early pgt is done ?  */
 	if (physaddr >= MAXMEM || read_cr3() != __sme_pa_nodebug(early_level4_pgt))
@@ -94,12 +99,21 @@ int __init early_make_pgtable(unsigned long address)
 		memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
 		*pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
 	}
-	pmd = (physaddr & PMD_MASK) + early_pmd_flags;
 	pmd_p[pmd_index(address)] = pmd;
 
 	return 0;
 }
 
+int __init early_make_pgtable(unsigned long address)
+{
+	unsigned long physaddr = address - __PAGE_OFFSET;
+	pmdval_t pmd;
+
+	pmd = (physaddr & PMD_MASK) + early_pmd_flags;
+
+	return __early_make_pgtable(address, pmd);
+}
+
 /* Don't add a printk in there. printk relies on the PDA which is not initialized 
    yet. */
 static void __init clear_bss(void)
@@ -122,6 +136,12 @@ static void __init copy_bootdata(char *real_mode_data)
 	char * command_line;
 	unsigned long cmd_line_ptr;
 
+	/*
+	 * If SME is active, this will create decrypted mappings of the
+	 * boot data in advance of the copy operations.
+	 */
+	sme_map_bootdata(real_mode_data);
+
 	memcpy(&boot_params, real_mode_data, sizeof boot_params);
 	sanitize_boot_params(&boot_params);
 	cmd_line_ptr = get_cmd_line_ptr();
@@ -129,6 +149,14 @@ static void __init copy_bootdata(char *real_mode_data)
 		command_line = __va(cmd_line_ptr);
 		memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
 	}
+
+	/*
+	 * The old boot data is no longer needed and won't be reserved,
+	 * freeing up that memory for use by the system. If SME is active,
+	 * we need to remove the mappings that were created so that the
+	 * memory doesn't remain mapped as decrypted.
+	 */
+	sme_unmap_bootdata(real_mode_data);
 }
 
 asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index cab13f7..bd5b9a7 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -114,6 +114,7 @@
 #include <asm/microcode.h>
 #include <asm/mmu_context.h>
 #include <asm/kaslr.h>
+#include <asm/mem_encrypt.h>
 
 /*
  * max_low_pfn_mapped: highest direct mapped pfn under 4GB
@@ -376,6 +377,15 @@ static void __init reserve_initrd(void)
 	    !ramdisk_image || !ramdisk_size)
 		return;		/* No initrd provided by bootloader */
 
+	/*
+	 * If SME is active, this memory will be marked encrypted by the
+	 * kernel when it is accessed (including relocation). However, the
+	 * ramdisk image was loaded decrypted by the bootloader, so make
+	 * sure that it is encrypted before accessing it.
+	 */
+	if (sme_active())
+		sme_early_encrypt(ramdisk_image, ramdisk_end - ramdisk_image);
+
 	initrd_start = 0;
 
 	mapped_size = memblock_mem_size(max_pfn_mapped);
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index ac3565c..ec548e9 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -16,8 +16,12 @@
 
 #include <asm/tlbflush.h>
 #include <asm/fixmap.h>
+#include <asm/setup.h>
+#include <asm/bootparam.h>
 
 extern pmdval_t early_pmd_flags;
+int __init __early_make_pgtable(unsigned long, pmdval_t);
+void __init __early_pgtable_flush(void);
 
 /*
  * Since SME related variables are set early in the boot process they must
@@ -103,6 +107,76 @@ void __init sme_early_decrypt(resource_size_t paddr, unsigned long size)
 	__sme_early_enc_dec(paddr, size, false);
 }
 
+static void __init __sme_early_map_unmap_mem(void *vaddr, unsigned long size,
+					     bool map)
+{
+	unsigned long paddr = (unsigned long)vaddr - __PAGE_OFFSET;
+	pmdval_t pmd_flags, pmd;
+
+	/* Use early_pmd_flags but remove the encryption mask */
+	pmd_flags = early_pmd_flags & ~sme_me_mask;
+
+	do {
+		pmd = map ? (paddr & PMD_MASK) + pmd_flags : 0;
+		__early_make_pgtable((unsigned long)vaddr, pmd);
+
+		vaddr += PMD_SIZE;
+		paddr += PMD_SIZE;
+		size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE;
+	} while (size);
+}
+
+static void __init __sme_map_unmap_bootdata(char *real_mode_data, bool map)
+{
+	struct boot_params *boot_data;
+	unsigned long cmdline_paddr;
+
+	__sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), map);
+	boot_data = (struct boot_params *)real_mode_data;
+
+	/*
+	 * Determine the command line address only after having established
+	 * the decrypted mapping.
+	 */
+	cmdline_paddr = boot_data->hdr.cmd_line_ptr |
+			((u64)boot_data->ext_cmd_line_ptr << 32);
+
+	if (cmdline_paddr)
+		__sme_early_map_unmap_mem(__va(cmdline_paddr),
+					  COMMAND_LINE_SIZE, map);
+}
+
+void __init sme_unmap_bootdata(char *real_mode_data)
+{
+	/* If SME is not active, the bootdata is in the correct state */
+	if (!sme_active())
+		return;
+
+	/*
+	 * The bootdata and command line aren't needed anymore so clear
+	 * any mapping of them.
+	 */
+	__sme_map_unmap_bootdata(real_mode_data, false);
+
+	__early_pgtable_flush();
+}
+
+void __init sme_map_bootdata(char *real_mode_data)
+{
+	/* If SME is not active, the bootdata is in the correct state */
+	if (!sme_active())
+		return;
+
+	/*
+	 * The bootdata and command line will not be encrypted, so they
+	 * need to be mapped as decrypted memory so they can be copied
+	 * properly.
+	 */
+	__sme_map_unmap_bootdata(real_mode_data, true);
+
+	__early_pgtable_flush();
+}
+
 void __init sme_early_init(void)
 {
 	unsigned int i;

WARNING: multiple messages have this Message-ID (diff)
From: Tom Lendacky <thomas.lendacky-5C7GfCeVMHo@public.gmane.org>
To: <linux-arch-u79uwXL29TY76Z2rM5mHXA@public.gmane.org>,
	<linux-efi-u79uwXL29TY76Z2rM5mHXA@public.gmane.org>,
	<kvm-u79uwXL29TY76Z2rM5mHXA@public.gmane.org>,
	<linux-doc-u79uwXL29TY76Z2rM5mHXA@public.gmane.org>,
	<x86-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>,
	<linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org>,
	<kasan-dev-/JYPxA39Uh5TLH3MbocFFw@public.gmane.org>,
	<linux-mm-Bw31MaZKKs3YtjvyW6yDsg@public.gmane.org>,
	<iommu-cunTk1MwBs9QetFLy7KEm3xJsTq8ys+cHZ5vskTnxNA@public.gmane.org>
Cc: "Rik van Riel" <riel-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>,
	"Larry Woodman"
	<lwoodman-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>,
	"Brijesh Singh" <brijesh.singh-5C7GfCeVMHo@public.gmane.org>,
	"Toshimitsu Kani" <toshi.kani-ZPxbGqLxI0U@public.gmane.org>,
	"Arnd Bergmann" <arnd-r2nGTMty4D4@public.gmane.org>,
	"Jonathan Corbet" <corbet-T1hC0tSOHrs@public.gmane.org>,
	"Matt Fleming"
	<matt-mF/unelCI9GS6iBeEJttW/XRex20P6io@public.gmane.org>,
	"Radim Krčmář" <rkrcmar-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>,
	"Andrey Ryabinin"
	<aryabinin-5HdwGun5lf+gSpxsJD1C4w@public.gmane.org>,
	"Ingo Molnar" <mingo-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>,
	"Michael S. Tsirkin"
	<mst-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>,
	"Andy Lutomirski" <luto-DgEjT+Ai2ygdnm+yROfE0A@public.gmane.org>,
	"H. Peter Anvin" <hpa-YMNOUZJC4hwAvxtiuMwx3w@public.gmane.org>,
	"Borislav Petkov" <bp-Gina5bIWoIWzQB+pC5nmwQ@public.gmane.org>,
	"Paolo Bonzini"
	<pbonzini-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>,
	"Alexander Potapenko"
	<glider-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org>,
	"Thomas Gleixner" <tglx-hfZtesqFncYOwBW4kG4KsQ@public.gmane.org>,
	"Dmitry Vyukov" <dvyukov-hpIqsD4AKlfQT0dZR+AlfA@public.gmane.org>
Subject: [RFC PATCH v4 10/28] x86: Insure that boot memory areas are mapped properly
Date: Thu, 16 Feb 2017 09:44:11 -0600	[thread overview]
Message-ID: <20170216154411.19244.99258.stgit@tlendack-t1.amdoffice.net> (raw)
In-Reply-To: <20170216154158.19244.66630.stgit-qCXWGYdRb2BnqfbPTmsdiZQ+2ll4COg0XqFh9Ls21Oc@public.gmane.org>

The boot data and command line data are present in memory in a decrypted
state and are copied early in the boot process.  The early page fault
support will map these areas as encrypted, so before attempting to copy
them, add decrypted mappings so the data is accessed properly when copied.

For the initrd, encrypt this data in place. Since the future mapping of the
initrd area will be mapped as encrypted the data will be accessed properly.

Signed-off-by: Tom Lendacky <thomas.lendacky-5C7GfCeVMHo@public.gmane.org>
---
 arch/x86/include/asm/mem_encrypt.h |   11 +++++
 arch/x86/kernel/head64.c           |   34 +++++++++++++++--
 arch/x86/kernel/setup.c            |   10 +++++
 arch/x86/mm/mem_encrypt.c          |   74 ++++++++++++++++++++++++++++++++++++
 4 files changed, 126 insertions(+), 3 deletions(-)

diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 3c9052c..e2b7364 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -31,6 +31,9 @@ void __init sme_early_encrypt(resource_size_t paddr,
 void __init sme_early_decrypt(resource_size_t paddr,
 			      unsigned long size);
 
+void __init sme_map_bootdata(char *real_mode_data);
+void __init sme_unmap_bootdata(char *real_mode_data);
+
 void __init sme_early_init(void);
 
 #define __sme_pa(x)		(__pa((x)) | sme_me_mask)
@@ -57,6 +60,14 @@ static inline void __init sme_early_decrypt(resource_size_t paddr,
 {
 }
 
+static inline void __init sme_map_bootdata(char *real_mode_data)
+{
+}
+
+static inline void __init sme_unmap_bootdata(char *real_mode_data)
+{
+}
+
 static inline void __init sme_early_init(void)
 {
 }
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 182a4c7..03f8e74 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -46,13 +46,18 @@ static void __init reset_early_page_tables(void)
 	write_cr3(__sme_pa_nodebug(early_level4_pgt));
 }
 
+void __init __early_pgtable_flush(void)
+{
+	write_cr3(__sme_pa_nodebug(early_level4_pgt));
+}
+
 /* Create a new PMD entry */
-int __init early_make_pgtable(unsigned long address)
+int __init __early_make_pgtable(unsigned long address, pmdval_t pmd)
 {
 	unsigned long physaddr = address - __PAGE_OFFSET;
 	pgdval_t pgd, *pgd_p;
 	pudval_t pud, *pud_p;
-	pmdval_t pmd, *pmd_p;
+	pmdval_t *pmd_p;
 
 	/* Invalid address or early pgt is done ?  */
 	if (physaddr >= MAXMEM || read_cr3() != __sme_pa_nodebug(early_level4_pgt))
@@ -94,12 +99,21 @@ int __init early_make_pgtable(unsigned long address)
 		memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
 		*pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
 	}
-	pmd = (physaddr & PMD_MASK) + early_pmd_flags;
 	pmd_p[pmd_index(address)] = pmd;
 
 	return 0;
 }
 
+int __init early_make_pgtable(unsigned long address)
+{
+	unsigned long physaddr = address - __PAGE_OFFSET;
+	pmdval_t pmd;
+
+	pmd = (physaddr & PMD_MASK) + early_pmd_flags;
+
+	return __early_make_pgtable(address, pmd);
+}
+
 /* Don't add a printk in there. printk relies on the PDA which is not initialized 
    yet. */
 static void __init clear_bss(void)
@@ -122,6 +136,12 @@ static void __init copy_bootdata(char *real_mode_data)
 	char * command_line;
 	unsigned long cmd_line_ptr;
 
+	/*
+	 * If SME is active, this will create decrypted mappings of the
+	 * boot data in advance of the copy operations.
+	 */
+	sme_map_bootdata(real_mode_data);
+
 	memcpy(&boot_params, real_mode_data, sizeof boot_params);
 	sanitize_boot_params(&boot_params);
 	cmd_line_ptr = get_cmd_line_ptr();
@@ -129,6 +149,14 @@ static void __init copy_bootdata(char *real_mode_data)
 		command_line = __va(cmd_line_ptr);
 		memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
 	}
+
+	/*
+	 * The old boot data is no longer needed and won't be reserved,
+	 * freeing up that memory for use by the system. If SME is active,
+	 * we need to remove the mappings that were created so that the
+	 * memory doesn't remain mapped as decrypted.
+	 */
+	sme_unmap_bootdata(real_mode_data);
 }
 
 asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index cab13f7..bd5b9a7 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -114,6 +114,7 @@
 #include <asm/microcode.h>
 #include <asm/mmu_context.h>
 #include <asm/kaslr.h>
+#include <asm/mem_encrypt.h>
 
 /*
  * max_low_pfn_mapped: highest direct mapped pfn under 4GB
@@ -376,6 +377,15 @@ static void __init reserve_initrd(void)
 	    !ramdisk_image || !ramdisk_size)
 		return;		/* No initrd provided by bootloader */
 
+	/*
+	 * If SME is active, this memory will be marked encrypted by the
+	 * kernel when it is accessed (including relocation). However, the
+	 * ramdisk image was loaded decrypted by the bootloader, so make
+	 * sure that it is encrypted before accessing it.
+	 */
+	if (sme_active())
+		sme_early_encrypt(ramdisk_image, ramdisk_end - ramdisk_image);
+
 	initrd_start = 0;
 
 	mapped_size = memblock_mem_size(max_pfn_mapped);
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index ac3565c..ec548e9 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -16,8 +16,12 @@
 
 #include <asm/tlbflush.h>
 #include <asm/fixmap.h>
+#include <asm/setup.h>
+#include <asm/bootparam.h>
 
 extern pmdval_t early_pmd_flags;
+int __init __early_make_pgtable(unsigned long, pmdval_t);
+void __init __early_pgtable_flush(void);
 
 /*
  * Since SME related variables are set early in the boot process they must
@@ -103,6 +107,76 @@ void __init sme_early_decrypt(resource_size_t paddr, unsigned long size)
 	__sme_early_enc_dec(paddr, size, false);
 }
 
+static void __init __sme_early_map_unmap_mem(void *vaddr, unsigned long size,
+					     bool map)
+{
+	unsigned long paddr = (unsigned long)vaddr - __PAGE_OFFSET;
+	pmdval_t pmd_flags, pmd;
+
+	/* Use early_pmd_flags but remove the encryption mask */
+	pmd_flags = early_pmd_flags & ~sme_me_mask;
+
+	do {
+		pmd = map ? (paddr & PMD_MASK) + pmd_flags : 0;
+		__early_make_pgtable((unsigned long)vaddr, pmd);
+
+		vaddr += PMD_SIZE;
+		paddr += PMD_SIZE;
+		size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE;
+	} while (size);
+}
+
+static void __init __sme_map_unmap_bootdata(char *real_mode_data, bool map)
+{
+	struct boot_params *boot_data;
+	unsigned long cmdline_paddr;
+
+	__sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), map);
+	boot_data = (struct boot_params *)real_mode_data;
+
+	/*
+	 * Determine the command line address only after having established
+	 * the decrypted mapping.
+	 */
+	cmdline_paddr = boot_data->hdr.cmd_line_ptr |
+			((u64)boot_data->ext_cmd_line_ptr << 32);
+
+	if (cmdline_paddr)
+		__sme_early_map_unmap_mem(__va(cmdline_paddr),
+					  COMMAND_LINE_SIZE, map);
+}
+
+void __init sme_unmap_bootdata(char *real_mode_data)
+{
+	/* If SME is not active, the bootdata is in the correct state */
+	if (!sme_active())
+		return;
+
+	/*
+	 * The bootdata and command line aren't needed anymore so clear
+	 * any mapping of them.
+	 */
+	__sme_map_unmap_bootdata(real_mode_data, false);
+
+	__early_pgtable_flush();
+}
+
+void __init sme_map_bootdata(char *real_mode_data)
+{
+	/* If SME is not active, the bootdata is in the correct state */
+	if (!sme_active())
+		return;
+
+	/*
+	 * The bootdata and command line will not be encrypted, so they
+	 * need to be mapped as decrypted memory so they can be copied
+	 * properly.
+	 */
+	__sme_map_unmap_bootdata(real_mode_data, true);
+
+	__early_pgtable_flush();
+}
+
 void __init sme_early_init(void)
 {
 	unsigned int i;

WARNING: multiple messages have this Message-ID (diff)
From: Tom Lendacky <thomas.lendacky@amd.com>
To: linux-arch@vger.kernel.org, linux-efi@vger.kernel.org,
	kvm@vger.kernel.org, linux-doc@vger.kernel.org, x86@kernel.org,
	linux-kernel@vger.kernel.org, kasan-dev@googlegroups.com,
	linux-mm@kvack.org, iommu@lists.linux-foundation.org
Cc: "Rik van Riel" <riel@redhat.com>,
	"Radim Krčmář" <rkrcmar@redhat.com>,
	"Toshimitsu Kani" <toshi.kani@hpe.com>,
	"Arnd Bergmann" <arnd@arndb.de>,
	"Jonathan Corbet" <corbet@lwn.net>,
	"Matt Fleming" <matt@codeblueprint.co.uk>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	"Joerg Roedel" <joro@8bytes.org>,
	"Konrad Rzeszutek Wilk" <konrad.wilk@oracle.com>,
	"Paolo Bonzini" <pbonzini@redhat.com>,
	"Brijesh Singh" <brijesh.singh@amd.com>,
	"Ingo Molnar" <mingo@redhat.com>,
	"Alexander Potapenko" <glider@google.com>,
	"Andy Lutomirski" <luto@kernel.org>,
	"H. Peter Anvin" <hpa@zytor.com>,
	"Borislav Petkov" <bp@alien8.de>,
	"Andrey Ryabinin" <aryabinin@virtuozzo.com>,
	"Thomas Gleixner" <tglx@linutronix.de>,
	"Larry Woodman" <lwoodman@redhat.com>,
	"Dmitry Vyukov" <dvyukov@google.com>
Subject: [RFC PATCH v4 10/28] x86: Insure that boot memory areas are mapped properly
Date: Thu, 16 Feb 2017 09:44:11 -0600	[thread overview]
Message-ID: <20170216154411.19244.99258.stgit@tlendack-t1.amdoffice.net> (raw)
In-Reply-To: <20170216154158.19244.66630.stgit@tlendack-t1.amdoffice.net>

The boot data and command line data are present in memory in a decrypted
state and are copied early in the boot process.  The early page fault
support will map these areas as encrypted, so before attempting to copy
them, add decrypted mappings so the data is accessed properly when copied.

For the initrd, encrypt this data in place. Since the future mapping of the
initrd area will be mapped as encrypted the data will be accessed properly.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
---
 arch/x86/include/asm/mem_encrypt.h |   11 +++++
 arch/x86/kernel/head64.c           |   34 +++++++++++++++--
 arch/x86/kernel/setup.c            |   10 +++++
 arch/x86/mm/mem_encrypt.c          |   74 ++++++++++++++++++++++++++++++++++++
 4 files changed, 126 insertions(+), 3 deletions(-)

diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 3c9052c..e2b7364 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -31,6 +31,9 @@ void __init sme_early_encrypt(resource_size_t paddr,
 void __init sme_early_decrypt(resource_size_t paddr,
 			      unsigned long size);
 
+void __init sme_map_bootdata(char *real_mode_data);
+void __init sme_unmap_bootdata(char *real_mode_data);
+
 void __init sme_early_init(void);
 
 #define __sme_pa(x)		(__pa((x)) | sme_me_mask)
@@ -57,6 +60,14 @@ static inline void __init sme_early_decrypt(resource_size_t paddr,
 {
 }
 
+static inline void __init sme_map_bootdata(char *real_mode_data)
+{
+}
+
+static inline void __init sme_unmap_bootdata(char *real_mode_data)
+{
+}
+
 static inline void __init sme_early_init(void)
 {
 }
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 182a4c7..03f8e74 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -46,13 +46,18 @@ static void __init reset_early_page_tables(void)
 	write_cr3(__sme_pa_nodebug(early_level4_pgt));
 }
 
+void __init __early_pgtable_flush(void)
+{
+	write_cr3(__sme_pa_nodebug(early_level4_pgt));
+}
+
 /* Create a new PMD entry */
-int __init early_make_pgtable(unsigned long address)
+int __init __early_make_pgtable(unsigned long address, pmdval_t pmd)
 {
 	unsigned long physaddr = address - __PAGE_OFFSET;
 	pgdval_t pgd, *pgd_p;
 	pudval_t pud, *pud_p;
-	pmdval_t pmd, *pmd_p;
+	pmdval_t *pmd_p;
 
 	/* Invalid address or early pgt is done ?  */
 	if (physaddr >= MAXMEM || read_cr3() != __sme_pa_nodebug(early_level4_pgt))
@@ -94,12 +99,21 @@ int __init early_make_pgtable(unsigned long address)
 		memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
 		*pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
 	}
-	pmd = (physaddr & PMD_MASK) + early_pmd_flags;
 	pmd_p[pmd_index(address)] = pmd;
 
 	return 0;
 }
 
+int __init early_make_pgtable(unsigned long address)
+{
+	unsigned long physaddr = address - __PAGE_OFFSET;
+	pmdval_t pmd;
+
+	pmd = (physaddr & PMD_MASK) + early_pmd_flags;
+
+	return __early_make_pgtable(address, pmd);
+}
+
 /* Don't add a printk in there. printk relies on the PDA which is not initialized 
    yet. */
 static void __init clear_bss(void)
@@ -122,6 +136,12 @@ static void __init copy_bootdata(char *real_mode_data)
 	char * command_line;
 	unsigned long cmd_line_ptr;
 
+	/*
+	 * If SME is active, this will create decrypted mappings of the
+	 * boot data in advance of the copy operations.
+	 */
+	sme_map_bootdata(real_mode_data);
+
 	memcpy(&boot_params, real_mode_data, sizeof boot_params);
 	sanitize_boot_params(&boot_params);
 	cmd_line_ptr = get_cmd_line_ptr();
@@ -129,6 +149,14 @@ static void __init copy_bootdata(char *real_mode_data)
 		command_line = __va(cmd_line_ptr);
 		memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
 	}
+
+	/*
+	 * The old boot data is no longer needed and won't be reserved,
+	 * freeing up that memory for use by the system. If SME is active,
+	 * we need to remove the mappings that were created so that the
+	 * memory doesn't remain mapped as decrypted.
+	 */
+	sme_unmap_bootdata(real_mode_data);
 }
 
 asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index cab13f7..bd5b9a7 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -114,6 +114,7 @@
 #include <asm/microcode.h>
 #include <asm/mmu_context.h>
 #include <asm/kaslr.h>
+#include <asm/mem_encrypt.h>
 
 /*
  * max_low_pfn_mapped: highest direct mapped pfn under 4GB
@@ -376,6 +377,15 @@ static void __init reserve_initrd(void)
 	    !ramdisk_image || !ramdisk_size)
 		return;		/* No initrd provided by bootloader */
 
+	/*
+	 * If SME is active, this memory will be marked encrypted by the
+	 * kernel when it is accessed (including relocation). However, the
+	 * ramdisk image was loaded decrypted by the bootloader, so make
+	 * sure that it is encrypted before accessing it.
+	 */
+	if (sme_active())
+		sme_early_encrypt(ramdisk_image, ramdisk_end - ramdisk_image);
+
 	initrd_start = 0;
 
 	mapped_size = memblock_mem_size(max_pfn_mapped);
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index ac3565c..ec548e9 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -16,8 +16,12 @@
 
 #include <asm/tlbflush.h>
 #include <asm/fixmap.h>
+#include <asm/setup.h>
+#include <asm/bootparam.h>
 
 extern pmdval_t early_pmd_flags;
+int __init __early_make_pgtable(unsigned long, pmdval_t);
+void __init __early_pgtable_flush(void);
 
 /*
  * Since SME related variables are set early in the boot process they must
@@ -103,6 +107,76 @@ void __init sme_early_decrypt(resource_size_t paddr, unsigned long size)
 	__sme_early_enc_dec(paddr, size, false);
 }
 
+static void __init __sme_early_map_unmap_mem(void *vaddr, unsigned long size,
+					     bool map)
+{
+	unsigned long paddr = (unsigned long)vaddr - __PAGE_OFFSET;
+	pmdval_t pmd_flags, pmd;
+
+	/* Use early_pmd_flags but remove the encryption mask */
+	pmd_flags = early_pmd_flags & ~sme_me_mask;
+
+	do {
+		pmd = map ? (paddr & PMD_MASK) + pmd_flags : 0;
+		__early_make_pgtable((unsigned long)vaddr, pmd);
+
+		vaddr += PMD_SIZE;
+		paddr += PMD_SIZE;
+		size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE;
+	} while (size);
+}
+
+static void __init __sme_map_unmap_bootdata(char *real_mode_data, bool map)
+{
+	struct boot_params *boot_data;
+	unsigned long cmdline_paddr;
+
+	__sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), map);
+	boot_data = (struct boot_params *)real_mode_data;
+
+	/*
+	 * Determine the command line address only after having established
+	 * the decrypted mapping.
+	 */
+	cmdline_paddr = boot_data->hdr.cmd_line_ptr |
+			((u64)boot_data->ext_cmd_line_ptr << 32);
+
+	if (cmdline_paddr)
+		__sme_early_map_unmap_mem(__va(cmdline_paddr),
+					  COMMAND_LINE_SIZE, map);
+}
+
+void __init sme_unmap_bootdata(char *real_mode_data)
+{
+	/* If SME is not active, the bootdata is in the correct state */
+	if (!sme_active())
+		return;
+
+	/*
+	 * The bootdata and command line aren't needed anymore so clear
+	 * any mapping of them.
+	 */
+	__sme_map_unmap_bootdata(real_mode_data, false);
+
+	__early_pgtable_flush();
+}
+
+void __init sme_map_bootdata(char *real_mode_data)
+{
+	/* If SME is not active, the bootdata is in the correct state */
+	if (!sme_active())
+		return;
+
+	/*
+	 * The bootdata and command line will not be encrypted, so they
+	 * need to be mapped as decrypted memory so they can be copied
+	 * properly.
+	 */
+	__sme_map_unmap_bootdata(real_mode_data, true);
+
+	__early_pgtable_flush();
+}
+
 void __init sme_early_init(void)
 {
 	unsigned int i;

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2017-02-16 15:44 UTC|newest]

Thread overview: 355+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-02-16 15:41 [RFC PATCH v4 00/28] x86: Secure Memory Encryption (AMD) Tom Lendacky
2017-02-16 15:41 ` Tom Lendacky
2017-02-16 15:41 ` Tom Lendacky
2017-02-16 15:42 ` [RFC PATCH v4 01/28] x86: Documentation for AMD Secure Memory Encryption (SME) Tom Lendacky
2017-02-16 15:42   ` Tom Lendacky
2017-02-16 15:42   ` Tom Lendacky
2017-02-16 15:42   ` Tom Lendacky
2017-02-16 15:42   ` Tom Lendacky
2017-02-16 17:56   ` Borislav Petkov
2017-02-16 17:56     ` Borislav Petkov
2017-02-16 19:48     ` Tom Lendacky
2017-02-16 19:48       ` Tom Lendacky
2017-02-16 19:48       ` Tom Lendacky
2017-02-16 15:42 ` [RFC PATCH v4 02/28] x86: Set the write-protect cache mode for full PAT support Tom Lendacky
2017-02-16 15:42   ` Tom Lendacky
2017-02-16 15:42   ` Tom Lendacky
2017-02-16 15:42   ` Tom Lendacky
2017-02-16 15:42   ` Tom Lendacky
2017-02-17 11:07   ` Borislav Petkov
2017-02-17 11:07     ` Borislav Petkov
2017-02-17 15:56     ` Tom Lendacky
2017-02-17 15:56       ` Tom Lendacky
2017-02-17 15:56       ` Tom Lendacky
2017-02-16 15:42 ` [RFC PATCH v4 03/28] x86: Add the Secure Memory Encryption CPU feature Tom Lendacky
2017-02-16 15:42   ` Tom Lendacky
2017-02-16 15:42   ` Tom Lendacky
2017-02-16 15:42   ` Tom Lendacky
2017-02-16 18:13   ` Borislav Petkov
2017-02-16 18:13     ` Borislav Petkov
2017-02-16 19:42     ` Tom Lendacky
2017-02-16 19:42       ` Tom Lendacky
2017-02-16 19:42       ` Tom Lendacky
2017-02-16 20:06       ` Borislav Petkov
2017-02-16 20:06         ` Borislav Petkov
2017-02-16 15:42 ` [RFC PATCH v4 04/28] x86: Handle reduction in physical address size with SME Tom Lendacky
2017-02-16 15:42   ` Tom Lendacky
2017-02-16 15:42   ` Tom Lendacky
2017-02-16 15:42   ` Tom Lendacky
2017-02-17 11:04   ` Borislav Petkov
2017-02-17 11:04     ` Borislav Petkov
2017-02-16 15:43 ` [RFC PATCH v4 05/28] x86: Add Secure Memory Encryption (SME) support Tom Lendacky
2017-02-16 15:43   ` Tom Lendacky
2017-02-16 15:43   ` Tom Lendacky
2017-02-16 15:43   ` Tom Lendacky
2017-02-16 15:43   ` Tom Lendacky
2017-02-17 12:00   ` Borislav Petkov
2017-02-17 12:00     ` Borislav Petkov
2017-02-25 15:29   ` Borislav Petkov
2017-02-25 15:29     ` Borislav Petkov
2017-02-25 15:29     ` Borislav Petkov
2017-02-28 23:01     ` Tom Lendacky
2017-02-28 23:01       ` Tom Lendacky
2017-02-28 23:01       ` Tom Lendacky
2017-02-16 15:43 ` [RFC PATCH v4 06/28] x86: Add support to enable SME during early boot processing Tom Lendacky
2017-02-16 15:43   ` Tom Lendacky
2017-02-16 15:43   ` Tom Lendacky
2017-02-16 15:43   ` Tom Lendacky
2017-02-20 12:51   ` Borislav Petkov
2017-02-20 12:51     ` Borislav Petkov
2017-02-21 14:55     ` Tom Lendacky
2017-02-21 14:55       ` Tom Lendacky
2017-02-21 14:55       ` Tom Lendacky
2017-02-21 15:10       ` Borislav Petkov
2017-02-21 15:10         ` Borislav Petkov
2017-02-16 15:43 ` [RFC PATCH v4 07/28] x86: Provide general kernel support for memory encryption Tom Lendacky
2017-02-16 15:43   ` Tom Lendacky
2017-02-16 15:43   ` Tom Lendacky
2017-02-16 15:43   ` Tom Lendacky
2017-02-20 15:21   ` Borislav Petkov
2017-02-20 15:21     ` Borislav Petkov
2017-02-21 17:18     ` Tom Lendacky
2017-02-21 17:18       ` Tom Lendacky
2017-02-21 17:18       ` Tom Lendacky
2017-02-22 12:08       ` Borislav Petkov
2017-02-22 12:08         ` Borislav Petkov
2017-02-20 18:38   ` Borislav Petkov
2017-02-20 18:38     ` Borislav Petkov
2017-02-22 16:43     ` Tom Lendacky
2017-02-22 16:43       ` Tom Lendacky
2017-02-22 16:43       ` Tom Lendacky
2017-02-22 18:13   ` Dave Hansen
2017-02-22 18:13     ` Dave Hansen
2017-02-23 23:12     ` Tom Lendacky
2017-02-23 23:12       ` Tom Lendacky
2017-02-23 23:12       ` Tom Lendacky
2017-02-23 23:12       ` Tom Lendacky
2017-02-22 18:13   ` Dave Hansen
2017-02-22 18:13     ` Dave Hansen
2017-02-16 15:43 ` [RFC PATCH v4 08/28] x86: Extend the early_memremap support with additional attrs Tom Lendacky
2017-02-16 15:43   ` Tom Lendacky
2017-02-16 15:43   ` Tom Lendacky
2017-02-16 15:43   ` Tom Lendacky
2017-02-16 15:43   ` Tom Lendacky
2017-02-20 15:43   ` Borislav Petkov
2017-02-20 15:43     ` Borislav Petkov
2017-02-22 15:42     ` Tom Lendacky
2017-02-22 15:42       ` Tom Lendacky
2017-02-22 15:42       ` Tom Lendacky
2017-02-16 15:43 ` [RFC PATCH v4 09/28] x86: Add support for early encryption/decryption of memory Tom Lendacky
2017-02-16 15:43   ` Tom Lendacky
2017-02-16 15:43   ` Tom Lendacky
2017-02-16 15:43   ` Tom Lendacky
2017-02-16 15:43   ` Tom Lendacky
2017-02-20 18:22   ` Borislav Petkov
2017-02-20 18:22     ` Borislav Petkov
2017-02-22 15:48     ` Tom Lendacky
2017-02-22 15:48       ` Tom Lendacky
2017-02-22 15:48       ` Tom Lendacky
2017-02-16 15:44 ` Tom Lendacky [this message]
2017-02-16 15:44   ` [RFC PATCH v4 10/28] x86: Insure that boot memory areas are mapped properly Tom Lendacky
2017-02-16 15:44   ` Tom Lendacky
2017-02-16 15:44   ` Tom Lendacky
2017-02-16 15:44   ` Tom Lendacky
2017-02-20 19:45   ` Borislav Petkov
2017-02-20 19:45     ` Borislav Petkov
2017-02-22 18:34     ` Tom Lendacky
2017-02-22 18:34       ` Tom Lendacky
2017-02-22 18:34       ` Tom Lendacky
2017-02-16 15:44 ` [RFC PATCH v4 11/28] x86: Add support to determine the E820 type of an address Tom Lendacky
2017-02-16 15:44   ` Tom Lendacky
2017-02-16 15:44   ` Tom Lendacky
2017-02-16 15:44   ` Tom Lendacky
2017-02-20 20:09   ` Borislav Petkov
2017-02-20 20:09     ` Borislav Petkov
2017-02-28 22:34     ` Tom Lendacky
2017-02-28 22:34       ` Tom Lendacky
2017-02-28 22:34       ` Tom Lendacky
2017-03-03  9:52       ` Borislav Petkov
2017-03-03  9:52         ` Borislav Petkov
2017-03-03  9:52         ` Borislav Petkov
2017-02-16 15:44 ` [RFC PATCH v4 12/28] efi: Add an EFI table address match function Tom Lendacky
2017-02-16 15:44   ` Tom Lendacky
2017-02-16 15:44   ` Tom Lendacky
2017-02-16 15:44   ` Tom Lendacky
2017-02-16 15:44   ` Tom Lendacky
2017-02-16 15:44 ` [RFC PATCH v4 13/28] efi: Update efi_mem_type() to return defined EFI mem types Tom Lendacky
2017-02-16 15:44   ` Tom Lendacky
2017-02-16 15:44   ` Tom Lendacky
2017-02-16 15:44   ` Tom Lendacky
2017-02-16 15:44   ` Tom Lendacky
2017-02-21 12:05   ` Matt Fleming
2017-02-21 12:05     ` Matt Fleming
2017-02-21 12:05     ` Matt Fleming
2017-02-23 17:27     ` Tom Lendacky
2017-02-23 17:27       ` Tom Lendacky
2017-02-23 17:27       ` Tom Lendacky
2017-02-24  9:57       ` Matt Fleming
2017-02-24  9:57         ` Matt Fleming
2017-02-24  9:57         ` Matt Fleming
2017-02-16 15:45 ` [RFC PATCH v4 14/28] Add support to access boot related data in the clear Tom Lendacky
2017-02-16 15:45   ` Tom Lendacky
2017-02-16 15:45   ` Tom Lendacky
2017-02-16 15:45   ` Tom Lendacky
2017-02-21 15:06   ` Borislav Petkov
2017-02-21 15:06     ` Borislav Petkov
2017-02-23 21:34     ` Tom Lendacky
2017-02-23 21:34       ` Tom Lendacky
2017-02-23 21:34       ` Tom Lendacky
2017-02-24 10:21       ` Borislav Petkov
2017-02-24 10:21         ` Borislav Petkov
2017-02-24 15:04         ` Tom Lendacky
2017-02-24 15:04           ` Tom Lendacky
2017-02-24 15:04           ` Tom Lendacky
2017-02-24 15:22           ` Borislav Petkov
2017-02-24 15:22             ` Borislav Petkov
2017-02-24 15:22             ` Borislav Petkov
2017-03-08  6:55   ` Dave Young
2017-03-08  6:55     ` Dave Young
2017-03-08  6:55     ` Dave Young
2017-03-08  6:55     ` Dave Young
2017-03-08  6:55     ` Dave Young
2017-03-17 19:50     ` Tom Lendacky
2017-03-17 19:50       ` Tom Lendacky
2017-03-17 19:50       ` Tom Lendacky
2017-02-16 15:45 ` [RFC PATCH v4 15/28] Add support to access persistent memory " Tom Lendacky
2017-02-16 15:45   ` Tom Lendacky
2017-02-16 15:45   ` Tom Lendacky
2017-02-16 15:45   ` Tom Lendacky
2017-03-17 22:58   ` Elliott, Robert (Persistent Memory)
2017-03-17 22:58     ` Elliott, Robert (Persistent Memory)
2017-03-23 21:02     ` Tom Lendacky
2017-03-23 21:02       ` Tom Lendacky
2017-02-16 15:45 ` [RFC PATCH v4 16/28] x86: Add support for changing memory encryption attribute Tom Lendacky
2017-02-16 15:45   ` Tom Lendacky
2017-02-16 15:45   ` Tom Lendacky
2017-02-16 15:45   ` Tom Lendacky
2017-02-22 18:52   ` Borislav Petkov
2017-02-22 18:52     ` Borislav Petkov
2017-02-22 18:52     ` Borislav Petkov
2017-02-28 22:46     ` Tom Lendacky
2017-02-28 22:46       ` Tom Lendacky
2017-02-28 22:46       ` Tom Lendacky
2017-02-16 15:45 ` [RFC PATCH v4 17/28] x86: Decrypt trampoline area if memory encryption is active Tom Lendacky
2017-02-16 15:45   ` Tom Lendacky
2017-02-16 15:45   ` Tom Lendacky
2017-02-16 15:45   ` Tom Lendacky
2017-02-16 15:46 ` [RFC PATCH v4 18/28] x86: DMA support for memory encryption Tom Lendacky
2017-02-16 15:46   ` Tom Lendacky
2017-02-16 15:46   ` Tom Lendacky
2017-02-16 15:46   ` Tom Lendacky
2017-02-16 15:46   ` Tom Lendacky
2017-02-25 17:10   ` Borislav Petkov
2017-02-25 17:10     ` Borislav Petkov
2017-02-25 17:10     ` Borislav Petkov
2017-03-06 17:47     ` Tom Lendacky
2017-03-06 17:47       ` Tom Lendacky
2017-03-06 17:47       ` Tom Lendacky
2017-02-16 15:46 ` [RFC PATCH v4 19/28] swiotlb: Add warnings for use of bounce buffers with SME Tom Lendacky
2017-02-16 15:46   ` Tom Lendacky
2017-02-16 15:46   ` Tom Lendacky
2017-02-16 15:46   ` Tom Lendacky
2017-02-17 15:59   ` Konrad Rzeszutek Wilk
2017-02-17 15:59     ` Konrad Rzeszutek Wilk
2017-02-17 15:59     ` Konrad Rzeszutek Wilk
2017-02-17 16:51     ` Tom Lendacky
2017-02-17 16:51       ` Tom Lendacky
2017-02-17 16:51       ` Tom Lendacky
2017-03-02 17:01       ` Paolo Bonzini
2017-03-02 17:01         ` Paolo Bonzini
2017-03-02 17:01         ` Paolo Bonzini
2017-02-27 17:52   ` Borislav Petkov
2017-02-27 17:52     ` Borislav Petkov
2017-02-27 17:52     ` Borislav Petkov
2017-02-28 23:19     ` Tom Lendacky
2017-02-28 23:19       ` Tom Lendacky
2017-02-28 23:19       ` Tom Lendacky
2017-03-01 11:17       ` Borislav Petkov
2017-03-01 11:17         ` Borislav Petkov
2017-03-01 11:17         ` Borislav Petkov
2017-02-16 15:46 ` [RFC PATCH v4 20/28] iommu/amd: Disable AMD IOMMU if memory encryption is active Tom Lendacky
2017-02-16 15:46   ` Tom Lendacky
2017-02-16 15:46   ` Tom Lendacky
2017-02-16 15:46   ` Tom Lendacky
2017-02-16 15:46 ` [RFC PATCH v4 21/28] x86: Check for memory encryption on the APs Tom Lendacky
2017-02-16 15:46   ` Tom Lendacky
2017-02-16 15:46   ` Tom Lendacky
2017-02-16 15:46   ` Tom Lendacky
2017-02-27 18:17   ` Borislav Petkov
2017-02-27 18:17     ` Borislav Petkov
2017-02-28 23:28     ` Tom Lendacky
2017-02-28 23:28       ` Tom Lendacky
2017-02-28 23:28       ` Tom Lendacky
2017-03-01 11:17       ` Borislav Petkov
2017-03-01 11:17         ` Borislav Petkov
2017-02-16 15:47 ` [RFC PATCH v4 22/28] x86: Do not specify encrypted memory for video mappings Tom Lendacky
2017-02-16 15:47   ` Tom Lendacky
2017-02-16 15:47   ` Tom Lendacky
2017-02-16 15:47   ` Tom Lendacky
2017-02-16 15:47 ` [RFC PATCH v4 23/28] x86/kvm: Enable Secure Memory Encryption of nested page tables Tom Lendacky
2017-02-16 15:47   ` Tom Lendacky
2017-02-16 15:47   ` Tom Lendacky
2017-02-16 15:47   ` Tom Lendacky
2017-02-16 15:47   ` Tom Lendacky
2017-02-16 15:47 ` [RFC PATCH v4 24/28] x86: Access the setup data through debugfs decrypted Tom Lendacky
2017-02-16 15:47   ` Tom Lendacky
2017-02-16 15:47   ` Tom Lendacky
2017-02-16 15:47   ` Tom Lendacky
2017-02-16 15:47   ` Tom Lendacky
2017-03-08  7:04   ` Dave Young
2017-03-08  7:04     ` Dave Young
2017-03-08  7:04     ` Dave Young
2017-03-17 19:54     ` Tom Lendacky
2017-03-17 19:54       ` Tom Lendacky
2017-03-17 19:54       ` Tom Lendacky
2017-02-16 15:47 ` [RFC PATCH v4 25/28] x86: Access the setup data through sysfs decrypted Tom Lendacky
2017-02-16 15:47   ` Tom Lendacky
2017-02-16 15:47   ` Tom Lendacky
2017-02-16 15:47   ` Tom Lendacky
2017-02-16 15:47   ` Tom Lendacky
2017-03-08  7:09   ` Dave Young
2017-03-08  7:09     ` Dave Young
2017-03-08  7:09     ` Dave Young
2017-03-17 20:09     ` Tom Lendacky
2017-03-17 20:09       ` Tom Lendacky
2017-03-17 20:09       ` Tom Lendacky
2017-02-16 15:47 ` [RFC PATCH v4 26/28] x86: Allow kexec to be used with SME Tom Lendacky
2017-02-16 15:47   ` Tom Lendacky
2017-02-16 15:47   ` Tom Lendacky
2017-02-16 15:47   ` Tom Lendacky
2017-02-16 15:47   ` Tom Lendacky
2017-02-17 15:57   ` Konrad Rzeszutek Wilk
2017-02-17 15:57     ` Konrad Rzeszutek Wilk
2017-02-17 15:57     ` Konrad Rzeszutek Wilk
2017-02-17 16:43     ` Tom Lendacky
2017-02-17 16:43       ` Tom Lendacky
2017-02-17 16:43       ` Tom Lendacky
2017-03-01  9:25       ` Dave Young
2017-03-01  9:25         ` Dave Young
2017-03-01  9:25         ` Dave Young
2017-03-01  9:27         ` Dave Young
2017-03-01  9:27           ` Dave Young
2017-03-01  9:27           ` Dave Young
2017-03-01  9:27           ` Dave Young
2017-03-06 17:58         ` Tom Lendacky
2017-03-06 17:58           ` Tom Lendacky
2017-03-06 17:58           ` Tom Lendacky
2017-03-06 18:04           ` Tom Lendacky
2017-03-06 18:04             ` Tom Lendacky
2017-03-06 18:04             ` Tom Lendacky
2017-03-06 18:04             ` Tom Lendacky
2017-03-08  8:12           ` Dave Young
2017-03-08  8:12             ` Dave Young
2017-03-08  8:12             ` Dave Young
2017-02-28 10:35   ` Borislav Petkov
2017-02-28 10:35     ` Borislav Petkov
2017-02-28 10:35     ` Borislav Petkov
2017-03-01 15:36     ` Tom Lendacky
2017-03-01 15:36       ` Tom Lendacky
2017-03-01 15:36       ` Tom Lendacky
2017-03-01 15:36       ` Tom Lendacky
2017-02-16 15:48 ` [RFC PATCH v4 27/28] x86: Add support to encrypt the kernel in-place Tom Lendacky
2017-02-16 15:48   ` Tom Lendacky
2017-02-16 15:48   ` Tom Lendacky
2017-02-16 15:48   ` Tom Lendacky
2017-03-01 17:36   ` Borislav Petkov
2017-03-01 17:36     ` Borislav Petkov
2017-03-02 18:30     ` Tom Lendacky
2017-03-02 18:30       ` Tom Lendacky
2017-03-02 18:30       ` Tom Lendacky
2017-03-02 18:51       ` Borislav Petkov
2017-03-02 18:51         ` Borislav Petkov
2017-03-02 18:51         ` Borislav Petkov
2017-02-16 15:48 ` [RFC PATCH v4 28/28] x86: Add support to make use of Secure Memory Encryption Tom Lendacky
2017-02-16 15:48   ` Tom Lendacky
2017-02-16 15:48   ` Tom Lendacky
2017-02-16 15:48   ` Tom Lendacky
2017-03-01 18:40   ` Borislav Petkov
2017-03-01 18:40     ` Borislav Petkov
2017-03-01 18:40     ` Borislav Petkov
2017-03-07 16:05     ` Tom Lendacky
2017-03-07 16:05       ` Tom Lendacky
2017-03-07 16:05       ` Tom Lendacky
2017-03-07 17:42       ` Borislav Petkov
2017-03-07 17:42         ` Borislav Petkov
2017-03-07 17:42         ` Borislav Petkov
2017-03-08 15:05       ` Borislav Petkov
2017-03-08 15:05         ` Borislav Petkov
2017-03-08 15:05         ` Borislav Petkov
2017-02-18 18:12 ` [RFC PATCH v4 00/28] x86: Secure Memory Encryption (AMD) Borislav Petkov
2017-02-18 18:12   ` Borislav Petkov
2017-02-21 15:09   ` Tom Lendacky
2017-02-21 15:09     ` Tom Lendacky
2017-02-21 15:09     ` Tom Lendacky
2017-02-21 17:42   ` Rik van Riel
2017-02-21 17:42     ` Rik van Riel
2017-02-21 17:53     ` Borislav Petkov
2017-02-21 17:53       ` Borislav Petkov
2017-03-01  9:17 ` Dave Young
2017-03-01  9:17   ` Dave Young
2017-03-01  9:17   ` Dave Young
2017-03-01  9:17   ` Dave Young
2017-03-01 17:51   ` Tom Lendacky
2017-03-01 17:51     ` Tom Lendacky
2017-03-01 17:51     ` Tom Lendacky
2017-03-01 17:51     ` Tom Lendacky

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170216154411.19244.99258.stgit@tlendack-t1.amdoffice.net \
    --to=thomas.lendacky@amd.com \
    --cc=arnd@arndb.de \
    --cc=aryabinin@virtuozzo.com \
    --cc=bp@alien8.de \
    --cc=brijesh.singh@amd.com \
    --cc=corbet@lwn.net \
    --cc=dvyukov@google.com \
    --cc=glider@google.com \
    --cc=hpa@zytor.com \
    --cc=iommu@lists.linux-foundation.org \
    --cc=joro@8bytes.org \
    --cc=kasan-dev@googlegroups.com \
    --cc=konrad.wilk@oracle.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-efi@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luto@kernel.org \
    --cc=lwoodman@redhat.com \
    --cc=matt@codeblueprint.co.uk \
    --cc=mingo@redhat.com \
    --cc=mst@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=riel@redhat.com \
    --cc=rkrcmar@redhat.com \
    --cc=tglx@linutronix.de \
    --cc=toshi.kani@hpe.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.