All of lore.kernel.org
 help / color / mirror / Atom feed
* Patch "x86/mm: Prepare sme_encrypt_kernel() for PAGE aligned encryption" has been added to the 4.14-stable tree
@ 2018-01-19 15:38 gregkh
  0 siblings, 0 replies; only message in thread
From: gregkh @ 2018-01-19 15:38 UTC (permalink / raw)
  To: thomas.lendacky, bp, bp, brijesh.singh, gregkh, mingo,
	nix.or.die, peterz, tglx, torvalds
  Cc: stable, stable-commits


This is a note to let you know that I've just added the patch titled

    x86/mm: Prepare sme_encrypt_kernel() for PAGE aligned encryption

to the 4.14-stable tree which can be found at:
    http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary

The filename of the patch is:
     x86-mm-prepare-sme_encrypt_kernel-for-page-aligned-encryption.patch
and it can be found in the queue-4.14 subdirectory.

If you, or anyone else, feels it should not be added to the stable tree,
please let <stable@vger.kernel.org> know about it.


>From cc5f01e28d6c60f274fd1e33b245f679f79f543c Mon Sep 17 00:00:00 2001
From: Tom Lendacky <thomas.lendacky@amd.com>
Date: Wed, 10 Jan 2018 13:26:26 -0600
Subject: x86/mm: Prepare sme_encrypt_kernel() for PAGE aligned encryption

From: Tom Lendacky <thomas.lendacky@amd.com>

commit cc5f01e28d6c60f274fd1e33b245f679f79f543c upstream.

In preparation for encrypting more than just the kernel, the encryption
support in sme_encrypt_kernel() needs to support 4KB page aligned
encryption instead of just 2MB large page aligned encryption.

Update the routines that populate the PGD to support non-2MB aligned
addresses.  This is done by creating PTE page tables for the start
and end portion of the address range that fall outside of the 2MB
alignment.  This results in, at most, two extra pages to hold the
PTE entries for each mapping of a range.

Tested-by: Gabriel Craciunescu <nix.or.die@gmail.com>
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Reviewed-by: Borislav Petkov <bp@suse.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brijesh Singh <brijesh.singh@amd.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20180110192626.6026.75387.stgit@tlendack-t1.amdoffice.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

---
 arch/x86/mm/mem_encrypt.c      |  123 +++++++++++++++++++++++++++++++++++------
 arch/x86/mm/mem_encrypt_boot.S |   20 ++++--
 2 files changed, 121 insertions(+), 22 deletions(-)

--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -218,6 +218,7 @@ struct sme_populate_pgd_data {
 	pgd_t	*pgd;
 
 	pmdval_t pmd_flags;
+	pteval_t pte_flags;
 	unsigned long paddr;
 
 	unsigned long vaddr;
@@ -242,6 +243,7 @@ static void __init sme_clear_pgd(struct
 #define PGD_FLAGS		_KERNPG_TABLE_NOENC
 #define P4D_FLAGS		_KERNPG_TABLE_NOENC
 #define PUD_FLAGS		_KERNPG_TABLE_NOENC
+#define PMD_FLAGS		_KERNPG_TABLE_NOENC
 
 #define PMD_FLAGS_LARGE		(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
 
@@ -251,7 +253,15 @@ static void __init sme_clear_pgd(struct
 
 #define PMD_FLAGS_ENC		(PMD_FLAGS_LARGE | _PAGE_ENC)
 
-static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
+#define PTE_FLAGS		(__PAGE_KERNEL_EXEC & ~_PAGE_GLOBAL)
+
+#define PTE_FLAGS_DEC		PTE_FLAGS
+#define PTE_FLAGS_DEC_WP	((PTE_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
+				 (_PAGE_PAT | _PAGE_PWT))
+
+#define PTE_FLAGS_ENC		(PTE_FLAGS | _PAGE_ENC)
+
+static pmd_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
 {
 	pgd_t *pgd_p;
 	p4d_t *p4d_p;
@@ -302,7 +312,7 @@ static void __init sme_populate_pgd_larg
 	pud_p += pud_index(ppd->vaddr);
 	if (native_pud_val(*pud_p)) {
 		if (native_pud_val(*pud_p) & _PAGE_PSE)
-			return;
+			return NULL;
 
 		pmd_p = (pmd_t *)(native_pud_val(*pud_p) & ~PTE_FLAGS_MASK);
 	} else {
@@ -316,16 +326,55 @@ static void __init sme_populate_pgd_larg
 		native_set_pud(pud_p, pud);
 	}
 
+	return pmd_p;
+}
+
+static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
+{
+	pmd_t *pmd_p;
+
+	pmd_p = sme_prepare_pgd(ppd);
+	if (!pmd_p)
+		return;
+
 	pmd_p += pmd_index(ppd->vaddr);
 	if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE))
 		native_set_pmd(pmd_p, native_make_pmd(ppd->paddr | ppd->pmd_flags));
 }
 
-static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
-				   pmdval_t pmd_flags)
+static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
 {
-	ppd->pmd_flags = pmd_flags;
+	pmd_t *pmd_p;
+	pte_t *pte_p;
 
+	pmd_p = sme_prepare_pgd(ppd);
+	if (!pmd_p)
+		return;
+
+	pmd_p += pmd_index(ppd->vaddr);
+	if (native_pmd_val(*pmd_p)) {
+		if (native_pmd_val(*pmd_p) & _PAGE_PSE)
+			return;
+
+		pte_p = (pte_t *)(native_pmd_val(*pmd_p) & ~PTE_FLAGS_MASK);
+	} else {
+		pmd_t pmd;
+
+		pte_p = ppd->pgtable_area;
+		memset(pte_p, 0, sizeof(*pte_p) * PTRS_PER_PTE);
+		ppd->pgtable_area += sizeof(*pte_p) * PTRS_PER_PTE;
+
+		pmd = native_make_pmd((pteval_t)pte_p + PMD_FLAGS);
+		native_set_pmd(pmd_p, pmd);
+	}
+
+	pte_p += pte_index(ppd->vaddr);
+	if (!native_pte_val(*pte_p))
+		native_set_pte(pte_p, native_make_pte(ppd->paddr | ppd->pte_flags));
+}
+
+static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
+{
 	while (ppd->vaddr < ppd->vaddr_end) {
 		sme_populate_pgd_large(ppd);
 
@@ -334,33 +383,71 @@ static void __init __sme_map_range(struc
 	}
 }
 
+static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
+{
+	while (ppd->vaddr < ppd->vaddr_end) {
+		sme_populate_pgd(ppd);
+
+		ppd->vaddr += PAGE_SIZE;
+		ppd->paddr += PAGE_SIZE;
+	}
+}
+
+static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
+				   pmdval_t pmd_flags, pteval_t pte_flags)
+{
+	unsigned long vaddr_end;
+
+	ppd->pmd_flags = pmd_flags;
+	ppd->pte_flags = pte_flags;
+
+	/* Save original end value since we modify the struct value */
+	vaddr_end = ppd->vaddr_end;
+
+	/* If start is not 2MB aligned, create PTE entries */
+	ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_PAGE_SIZE);
+	__sme_map_range_pte(ppd);
+
+	/* Create PMD entries */
+	ppd->vaddr_end = vaddr_end & PMD_PAGE_MASK;
+	__sme_map_range_pmd(ppd);
+
+	/* If end is not 2MB aligned, create PTE entries */
+	ppd->vaddr_end = vaddr_end;
+	__sme_map_range_pte(ppd);
+}
+
 static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
 {
-	__sme_map_range(ppd, PMD_FLAGS_ENC);
+	__sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC);
 }
 
 static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
 {
-	__sme_map_range(ppd, PMD_FLAGS_DEC);
+	__sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC);
 }
 
 static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
 {
-	__sme_map_range(ppd, PMD_FLAGS_DEC_WP);
+	__sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP);
 }
 
 static unsigned long __init sme_pgtable_calc(unsigned long len)
 {
-	unsigned long p4d_size, pud_size, pmd_size;
+	unsigned long p4d_size, pud_size, pmd_size, pte_size;
 	unsigned long total;
 
 	/*
 	 * Perform a relatively simplistic calculation of the pagetable
-	 * entries that are needed. That mappings will be covered by 2MB
-	 * PMD entries so we can conservatively calculate the required
+	 * entries that are needed. Those mappings will be covered mostly
+	 * by 2MB PMD entries so we can conservatively calculate the required
 	 * number of P4D, PUD and PMD structures needed to perform the
-	 * mappings. Incrementing the count for each covers the case where
-	 * the addresses cross entries.
+	 * mappings.  For mappings that are not 2MB aligned, PTE mappings
+	 * would be needed for the start and end portion of the address range
+	 * that fall outside of the 2MB alignment.  This results in, at most,
+	 * two extra pages to hold PTE entries for each range that is mapped.
+	 * Incrementing the count for each covers the case where the addresses
+	 * cross entries.
 	 */
 	if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
 		p4d_size = (ALIGN(len, PGDIR_SIZE) / PGDIR_SIZE) + 1;
@@ -374,8 +461,9 @@ static unsigned long __init sme_pgtable_
 	}
 	pmd_size = (ALIGN(len, PUD_SIZE) / PUD_SIZE) + 1;
 	pmd_size *= sizeof(pmd_t) * PTRS_PER_PMD;
+	pte_size = 2 * sizeof(pte_t) * PTRS_PER_PTE;
 
-	total = p4d_size + pud_size + pmd_size;
+	total = p4d_size + pud_size + pmd_size + pte_size;
 
 	/*
 	 * Now calculate the added pagetable structures needed to populate
@@ -458,10 +546,13 @@ void __init sme_encrypt_kernel(void)
 
 	/*
 	 * The total workarea includes the executable encryption area and
-	 * the pagetable area.
+	 * the pagetable area. The start of the workarea is already 2MB
+	 * aligned, align the end of the workarea on a 2MB boundary so that
+	 * we don't try to create/allocate PTE entries from the workarea
+	 * before it is mapped.
 	 */
 	workarea_len = execute_len + pgtable_area_len;
-	workarea_end = workarea_start + workarea_len;
+	workarea_end = ALIGN(workarea_start + workarea_len, PMD_PAGE_SIZE);
 
 	/*
 	 * Set the address to the start of where newly created pagetable
--- a/arch/x86/mm/mem_encrypt_boot.S
+++ b/arch/x86/mm/mem_encrypt_boot.S
@@ -104,6 +104,7 @@ ENTRY(__enc_copy)
 	mov	%rdx, %cr4
 
 	push	%r15
+	push	%r12
 
 	movq	%rcx, %r9		/* Save kernel length */
 	movq	%rdi, %r10		/* Save encrypted kernel address */
@@ -119,21 +120,27 @@ ENTRY(__enc_copy)
 
 	wbinvd				/* Invalidate any cache entries */
 
-	/* Copy/encrypt 2MB at a time */
+	/* Copy/encrypt up to 2MB at a time */
+	movq	$PMD_PAGE_SIZE, %r12
 1:
+	cmpq	%r12, %r9
+	jnb	2f
+	movq	%r9, %r12
+
+2:
 	movq	%r11, %rsi		/* Source - decrypted kernel */
 	movq	%r8, %rdi		/* Dest   - intermediate copy buffer */
-	movq	$PMD_PAGE_SIZE, %rcx	/* 2MB length */
+	movq	%r12, %rcx
 	rep	movsb
 
 	movq	%r8, %rsi		/* Source - intermediate copy buffer */
 	movq	%r10, %rdi		/* Dest   - encrypted kernel */
-	movq	$PMD_PAGE_SIZE, %rcx	/* 2MB length */
+	movq	%r12, %rcx
 	rep	movsb
 
-	addq	$PMD_PAGE_SIZE, %r11
-	addq	$PMD_PAGE_SIZE, %r10
-	subq	$PMD_PAGE_SIZE, %r9	/* Kernel length decrement */
+	addq	%r12, %r11
+	addq	%r12, %r10
+	subq	%r12, %r9		/* Kernel length decrement */
 	jnz	1b			/* Kernel length not zero? */
 
 	/* Restore PAT register */
@@ -142,6 +149,7 @@ ENTRY(__enc_copy)
 	mov	%r15, %rdx		/* Restore original PAT value */
 	wrmsr
 
+	pop	%r12
 	pop	%r15
 
 	ret


Patches currently in stable-queue which might be from thomas.lendacky@amd.com are

queue-4.14/x86-mm-clean-up-register-saving-in-the-__enc_copy-assembly-code.patch
queue-4.14/x86-mm-use-a-struct-to-reduce-parameters-for-sme-pgd-mapping.patch
queue-4.14/x86-mm-centralize-pmd-flags-in-sme_encrypt_kernel.patch
queue-4.14/x86-retpoline-fill-rsb-on-context-switch-for-affected-cpus.patch
queue-4.14/x86-mm-prepare-sme_encrypt_kernel-for-page-aligned-encryption.patch
queue-4.14/x86-retpoline-add-lfence-to-the-retpoline-rsb-filling-rsb-macros.patch

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2018-01-19 15:39 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-01-19 15:38 Patch "x86/mm: Prepare sme_encrypt_kernel() for PAGE aligned encryption" has been added to the 4.14-stable tree gregkh

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.