linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Tom Lendacky <thomas.lendacky@amd.com>
To: Joerg Roedel <joro@8bytes.org>, x86@kernel.org
Cc: Joerg Roedel <jroedel@suse.de>,
	Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
	"H. Peter Anvin" <hpa@zytor.com>,
	Dave Hansen <dave.hansen@linux.intel.com>,
	Andy Lutomirski <luto@kernel.org>,
	Peter Zijlstra <peterz@infradead.org>,
	Kees Cook <keescook@chromium.org>,
	Arvind Sankar <nivedita@alum.mit.edu>,
	Martin Radev <martin.b.radev@gmail.com>,
	linux-kernel@vger.kernel.org
Subject: Re: [PATCH v4 3/5] x86/boot/compressed/64: Check SEV encryption in 64-bit boot-path
Date: Wed, 28 Oct 2020 12:25:19 -0500	[thread overview]
Message-ID: <a10053a4-be13-a593-2645-47f1723be1f2@amd.com> (raw)
In-Reply-To: <20201028164659.27002-4-joro@8bytes.org>

On 10/28/20 11:46 AM, Joerg Roedel wrote:
> From: Joerg Roedel <jroedel@suse.de>
> 
> Check whether the hypervisor reported the correct C-bit when running as
> an SEV guest. Using a wrong C-bit position could be used to leak
> sensitive data from the guest to the hypervisor.
> 
> The check function is in arch/x86/kernel/sev_verify_cbit.S so that it
> can be re-used in the running kernel image.
> 
> Signed-off-by: Joerg Roedel <jroedel@suse.de>

Just one minor comment below, otherwise:

Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com>

> ---
>  arch/x86/boot/compressed/ident_map_64.c |  1 +
>  arch/x86/boot/compressed/mem_encrypt.S  |  4 ++
>  arch/x86/boot/compressed/misc.h         |  2 +
>  arch/x86/kernel/sev_verify_cbit.S       | 90 +++++++++++++++++++++++++
>  4 files changed, 97 insertions(+)
>  create mode 100644 arch/x86/kernel/sev_verify_cbit.S
> 
> diff --git a/arch/x86/boot/compressed/ident_map_64.c b/arch/x86/boot/compressed/ident_map_64.c
> index a5e5db6ada3c..39b2eded7bc2 100644
> --- a/arch/x86/boot/compressed/ident_map_64.c
> +++ b/arch/x86/boot/compressed/ident_map_64.c
> @@ -164,6 +164,7 @@ void initialize_identity_maps(void *rmode)
>  	add_identity_map(cmdline, cmdline + COMMAND_LINE_SIZE);
>  
>  	/* Load the new page-table. */
> +	sev_verify_cbit(top_level_pgt);
>  	write_cr3(top_level_pgt);
>  }
>  
> diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S
> index 0bae1ca658d9..3275dbab085d 100644
> --- a/arch/x86/boot/compressed/mem_encrypt.S
> +++ b/arch/x86/boot/compressed/mem_encrypt.S
> @@ -68,6 +68,9 @@ SYM_FUNC_START(get_sev_encryption_bit)
>  SYM_FUNC_END(get_sev_encryption_bit)
>  
>  	.code64
> +
> +#include "../../kernel/sev_verify_cbit.S"
> +
>  SYM_FUNC_START(set_sev_encryption_mask)
>  #ifdef CONFIG_AMD_MEM_ENCRYPT
>  	push	%rbp
> @@ -111,4 +114,5 @@ SYM_FUNC_END(set_sev_encryption_mask)
>  	.balign	8
>  SYM_DATA(sme_me_mask,		.quad 0)
>  SYM_DATA(sev_status,		.quad 0)
> +SYM_DATA(sev_check_data,	.quad 0)
>  #endif
> diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
> index 6d31f1b4c4d1..d9a631c5973c 100644
> --- a/arch/x86/boot/compressed/misc.h
> +++ b/arch/x86/boot/compressed/misc.h
> @@ -159,4 +159,6 @@ void boot_page_fault(void);
>  void boot_stage1_vc(void);
>  void boot_stage2_vc(void);
>  
> +unsigned long sev_verify_cbit(unsigned long cr3);
> +
>  #endif /* BOOT_COMPRESSED_MISC_H */
> diff --git a/arch/x86/kernel/sev_verify_cbit.S b/arch/x86/kernel/sev_verify_cbit.S
> new file mode 100644
> index 000000000000..b96f0573f8af
> --- /dev/null
> +++ b/arch/x86/kernel/sev_verify_cbit.S
> @@ -0,0 +1,90 @@
> +/* SPDX-License-Identifier: GPL-2.0-only */
> +/*
> + *	sev_verify_cbit.S - Code for verification of the C-bit position reported
> + *			    by the Hypervisor when running with SEV enabled.
> + *
> + *	Copyright (c) 2020  Joerg Roedel (jroedel@suse.de)
> + *
> + * Implements sev_verify_cbit() which is called before switching to a new
> + * long-mode page-table at boot.
> + *
> + * It verifies that the C-bit position is correct by writing a random value to
> + * an encrypted memory location while on the current page-table. Then it
> + * switches to the new page-table to verify the memory content is still the
> + * same. After that it switches back to the current page-table and when the
> + * check succeeded it returns. If the check failed the code invalidates the
> + * stack pointer and goes into a hlt loop. The stack-pointer is invalidated to
> + * make sure no interrupt or exception can get the CPU out of the hlt loop.
> + *
> + * New page-table pointer is expected in %rdi (first parameter)
> + *
> + */
> +SYM_FUNC_START(sev_verify_cbit)
> +#ifdef CONFIG_AMD_MEM_ENCRYPT
> +	/* First check if a C-bit was detected */
> +	movq	sme_me_mask(%rip), %rsi
> +	testq	%rsi, %rsi
> +	jz	3f
> +
> +	/* sme_me_mask != 0 could mean SME or SEV - Check also for SEV */
> +	movq	sev_status(%rip), %rsi
> +	testq	%rsi, %rsi
> +	jz	3f
> +
> +	/* Save CR4 in %rsi */
> +	movq	%cr4, %rsi
> +
> +	/* Disable Global Pages */
> +	movq	%rsi, %rdx
> +	andq	$(~X86_CR4_PGE), %rdx
> +	movq	%rdx, %cr4
> +
> +	/*
> +	 * Verified that running under SEV - now get a random value using
> +	 * RDRAND. This instruction is mandatory when running as an SEV guest.
> +	 *
> +	 * Don't bail out of the loop if RDRAND returns errors. It is better to
> +	 * prevent forward progress than to work with a non-random value here.
> +	 */
> +1:	rdrand	%rdx
> +	jnc	1b
> +
> +	/* Store value to memory and keep it in %r10 */

This should say "keep it in %rdx"

> +	movq	%rdx, sev_check_data(%rip)
> +
> +	/* Backup current %cr3 value to restore it later */
> +	movq	%cr3, %rcx
> +
> +	/* Switch to new %cr3 - This might unmap the stack */
> +	movq	%rdi, %cr3
> +
> +	/*
> +	 * Compare value in %rdx with memory location - If C-Bit is incorrect
> +	 * this would read the encrypted data and make the check fail.
> +	 */
> +	cmpq	%rdx, sev_check_data(%rip)
> +
> +	/* Restore old %cr3 */
> +	movq	%rcx, %cr3
> +
> +	/* Restore previous CR4 */
> +	movq	%rsi, %cr4
> +
> +	/* Check CMPQ result */
> +	je	3f
> +
> +	/*
> +	 * The check failed - Prevent any forward progress to prevent ROP
> +	 * attacks, invalidate the stack and go into a hlt loop.
> +	 */
> +	xorq	%rsp, %rsp
> +	subq	$0x1000, %rsp
> +2:	hlt
> +	jmp 2b
> +3:
> +#endif
> +	/* Return page-table pointer */
> +	movq	%rdi, %rax
> +	ret
> +SYM_FUNC_END(sev_verify_cbit)
> +
> 

  reply	other threads:[~2020-10-28 21:59 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-10-28 16:46 [PATCH v4 0/5] x86/sev-es: Mitigate some HV attack vectors Joerg Roedel
2020-10-28 16:46 ` [PATCH v4 1/5] x86/boot/compressed/64: Introduce sev_status Joerg Roedel
2020-10-28 17:14   ` Tom Lendacky
2020-10-29 19:17   ` [tip: x86/seves] " tip-bot2 for Joerg Roedel
2020-10-28 16:46 ` [PATCH v4 2/5] x86/boot/compressed/64: Add CPUID sanity check to early #VC handler Joerg Roedel
2020-10-28 17:15   ` Tom Lendacky
2020-10-29 19:17   ` [tip: x86/seves] x86/boot/compressed/64: Sanity-check CPUID results in the " tip-bot2 for Joerg Roedel
2020-10-28 16:46 ` [PATCH v4 3/5] x86/boot/compressed/64: Check SEV encryption in 64-bit boot-path Joerg Roedel
2020-10-28 17:25   ` Tom Lendacky [this message]
2020-10-29 19:17   ` [tip: x86/seves] " tip-bot2 for Joerg Roedel
2020-10-28 16:46 ` [PATCH v4 4/5] x86/head/64: Check SEV encryption before switching to kernel page-table Joerg Roedel
2020-10-28 17:29   ` Tom Lendacky
2020-10-29 19:17   ` [tip: x86/seves] " tip-bot2 for Joerg Roedel
2020-10-28 16:46 ` [PATCH v4 5/5] x86/sev-es: Do not support MMIO to/from encrypted memory Joerg Roedel
2020-10-28 17:31   ` Tom Lendacky
2020-10-29 19:17   ` [tip: x86/seves] " tip-bot2 for Joerg Roedel

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=a10053a4-be13-a593-2645-47f1723be1f2@amd.com \
    --to=thomas.lendacky@amd.com \
    --cc=bp@alien8.de \
    --cc=dave.hansen@linux.intel.com \
    --cc=hpa@zytor.com \
    --cc=joro@8bytes.org \
    --cc=jroedel@suse.de \
    --cc=keescook@chromium.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=luto@kernel.org \
    --cc=martin.b.radev@gmail.com \
    --cc=mingo@redhat.com \
    --cc=nivedita@alum.mit.edu \
    --cc=peterz@infradead.org \
    --cc=tglx@linutronix.de \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).