From: Borislav Petkov <bp@alien8.de>
To: Brijesh Singh <brijesh.singh@amd.com>
Cc: x86@kernel.org, linux-kernel@vger.kernel.org,
kvm@vger.kernel.org, tglx@linutronix.de, jroedel@suse.de,
thomas.lendacky@amd.com, pbonzini@redhat.com, mingo@redhat.com,
dave.hansen@intel.com, rientjes@google.com, seanjc@google.com,
peterz@infradead.org, hpa@zytor.com, tony.luck@intel.com
Subject: Re: [PATCH Part1 RFC v2 14/20] x86/sev: Add helper for validating pages in early enc attribute changes
Date: Wed, 26 May 2021 12:39:08 +0200 [thread overview]
Message-ID: <YK4lTP+bHBzUxAOZ@zn.tnic> (raw)
In-Reply-To: <20210430121616.2295-15-brijesh.singh@amd.com>
On Fri, Apr 30, 2021 at 07:16:10AM -0500, Brijesh Singh wrote:
> +static void pvalidate_pages(unsigned long vaddr, unsigned int npages, bool validate)
> +{
> + unsigned long vaddr_end;
> + int rc;
> +
> + vaddr = vaddr & PAGE_MASK;
> + vaddr_end = vaddr + (npages << PAGE_SHIFT);
> +
> + while (vaddr < vaddr_end) {
> + rc = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
> + if (rc) {
> + WARN(rc, "Failed to validate address 0x%lx ret %d", vaddr, rc);
WARN does return the condition it warned on, look at its definition.
IOW, you can do (and btw e_fail is not needed either):
rc = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
if (WARN(rc, "Failed to validate address 0x%lx ret %d", vaddr, rc))
sev_es_terminate(1, GHCB_TERM_PVALIDATE);
Ditto for the other WARN.
But looking at that function more, you probably could reorganize it a
bit and do this instead:
...
while (vaddr < vaddr_end) {
if (!pvalidate(vaddr, RMP_PG_SIZE_4K, validate)) {
vaddr = vaddr + PAGE_SIZE;
continue;
}
WARN(rc, "Failed to validate address 0x%lx ret %d", vaddr, rc);
sev_es_terminate(1, GHCB_TERM_PVALIDATE);
}
}
and have the failure case at the end and no labels or return statements
for less clutter.
> + goto e_fail;
> + }
> +
> + vaddr = vaddr + PAGE_SIZE;
> + }
> +
> + return;
> +
> +e_fail:
> + sev_es_terminate(1, GHCB_TERM_PVALIDATE);
> +}
> +
> +static void __init early_snp_set_page_state(unsigned long paddr, unsigned int npages, int op)
> +{
> + unsigned long paddr_end;
> + u64 val;
> +
> + paddr = paddr & PAGE_MASK;
> + paddr_end = paddr + (npages << PAGE_SHIFT);
> +
> + while (paddr < paddr_end) {
> + /*
> + * Use the MSR protcol because this function can be called before the GHCB
WARNING: 'protcol' may be misspelled - perhaps 'protocol'?
#209: FILE: arch/x86/kernel/sev.c:562:
+ * Use the MSR protcol because this function can be called before the GHCB
I think I've said this before:
Please integrate scripts/checkpatch.pl into your patch creation
workflow. Some of the warnings/errors *actually* make sense.
> + * is established.
> + */
> + sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
> + VMGEXIT();
> +
> + val = sev_es_rd_ghcb_msr();
> +
> + if (GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP)
Does that one need a warning too or am I being too paranoid?
> + goto e_term;
> +
> + if (GHCB_MSR_PSC_RESP_VAL(val)) {
> + WARN(1, "Failed to change page state to '%s' paddr 0x%lx error 0x%llx\n",
> + op == SNP_PAGE_STATE_PRIVATE ? "private" : "shared", paddr,
> + GHCB_MSR_PSC_RESP_VAL(val));
> + goto e_term;
if (WARN(GHCB_MSR_PSC_RESP_VAL(val),
"Failed to change page state to '%s' paddr 0x%lx error 0x%llx\n",
op == SNP_PAGE_STATE_PRIVATE ? "private" : "shared",
paddr, GHCB_MSR_PSC_RESP_VAL(val)))
goto e_term;
> + }
> +
> + paddr = paddr + PAGE_SIZE;
> + }
> +
> + return;
> +
> +e_term:
> + sev_es_terminate(1, GHCB_TERM_PSC);
> +}
> +
> +void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
> + unsigned int npages)
> +{
> + if (!sev_snp_active())
> + return;
> +
> + /* Ask hypervisor to add the memory pages in RMP table as a 'private'. */
> + early_snp_set_page_state(paddr, npages, SNP_PAGE_STATE_PRIVATE);
> +
> + /* Validate the memory pages after its added in the RMP table. */
after they've been added...
> + pvalidate_pages(vaddr, npages, 1);
> +}
> +
> +void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
> + unsigned int npages)
> +{
> + if (!sev_snp_active())
> + return;
> +
> + /* Invalidate memory pages before making it shared in the RMP table. */
Ditto.
> + pvalidate_pages(vaddr, npages, 0);
> +
> + /* Ask hypervisor to make the memory pages shared in the RMP table. */
> + early_snp_set_page_state(paddr, npages, SNP_PAGE_STATE_SHARED);
> +}
> +
> int sev_es_setup_ap_jump_table(struct real_mode_header *rmh)
> {
> u16 startup_cs, startup_ip;
> diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
> index 076d993acba3..f722518b244f 100644
> --- a/arch/x86/mm/mem_encrypt.c
> +++ b/arch/x86/mm/mem_encrypt.c
> @@ -30,6 +30,7 @@
> #include <asm/processor-flags.h>
> #include <asm/msr.h>
> #include <asm/cmdline.h>
> +#include <asm/sev.h>
>
> #include "mm_internal.h"
>
> @@ -50,6 +51,30 @@ bool sev_enabled __section(".data");
> /* Buffer used for early in-place encryption by BSP, no locking needed */
> static char sme_early_buffer[PAGE_SIZE] __initdata __aligned(PAGE_SIZE);
>
> +/*
> + * When SNP is active, this routine changes the page state from private to
s/this routine changes/change/
> + * shared before copying the data from the source to destination and restore
> + * after the copy. This is required because the source address is mapped as
> + * decrypted by the caller of the routine.
> + */
> +static inline void __init snp_memcpy(void *dst, void *src, size_t sz, unsigned long paddr,
> + bool dec)
decrypt
> +{
> + unsigned long npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
> +
> + if (dec) {
> + /* If the paddr needs to be accessed decrypted, make the page shared before memcpy. */
> + early_snp_set_memory_shared((unsigned long)__va(paddr), paddr, npages);
> +
> + memcpy(dst, src, sz);
> +
> + /* Restore the page state after the memcpy. */
> + early_snp_set_memory_private((unsigned long)__va(paddr), paddr, npages);
> + } else {
> + memcpy(dst, src, sz);
> + }
Hmm, this function needs reorg. How about this:
/*
* When SNP is active, change the page state from private to shared before
* copying the data from the source to destination and restore after the copy.
* This is required because the source address is mapped as decrypted by the
* caller of the routine.
*/
static inline void __init snp_memcpy(void *dst, void *src, size_t sz,
unsigned long paddr, bool decrypt)
{
unsigned long npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
if (!sev_snp_active() || !decrypt) {
memcpy(dst, src, sz);
return;
}
/*
* If the paddr needs to be accessed decrypted, mark the page
* shared in the RMP table before copying it.
*/
early_snp_set_memory_shared((unsigned long)__va(paddr), paddr, npages);
memcpy(dst, src, sz);
/* Restore the page state after the memcpy. */
early_snp_set_memory_private((unsigned long)__va(paddr), paddr, npages);
}
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette
next prev parent reply other threads:[~2021-05-26 10:39 UTC|newest]
Thread overview: 62+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-04-30 12:15 [PATCH Part1 RFC v2 00/20] Add AMD Secure Nested Paging (SEV-SNP) Guest Support Brijesh Singh
2021-04-30 12:15 ` [PATCH Part1 RFC v2 01/20] x86/sev: Define the GHCB MSR protocol for AP reset hold Brijesh Singh
2021-04-30 12:15 ` [PATCH Part1 RFC v2 02/20] x86/sev: Save the negotiated GHCB version Brijesh Singh
2021-05-11 9:23 ` Borislav Petkov
2021-05-11 18:29 ` Brijesh Singh
2021-05-11 18:41 ` Borislav Petkov
2021-05-12 14:03 ` Brijesh Singh
2021-05-12 14:31 ` Borislav Petkov
2021-05-12 15:03 ` Brijesh Singh
2021-04-30 12:15 ` [PATCH Part1 RFC v2 03/20] x86/sev: Add support for hypervisor feature VMGEXIT Brijesh Singh
2021-05-11 10:01 ` Borislav Petkov
2021-05-11 18:53 ` Brijesh Singh
2021-05-17 14:40 ` Borislav Petkov
2021-04-30 12:16 ` [PATCH Part1 RFC v2 04/20] x86/sev: Increase the GHCB protocol version Brijesh Singh
2021-04-30 12:16 ` [PATCH Part1 RFC v2 05/20] x86/sev: Define SNP Page State Change VMGEXIT structure Brijesh Singh
2021-05-18 10:41 ` Borislav Petkov
2021-05-18 15:06 ` Brijesh Singh
2021-04-30 12:16 ` [PATCH Part1 RFC v2 06/20] x86/sev: Define SNP guest request NAE events Brijesh Singh
2021-05-18 10:45 ` Borislav Petkov
2021-05-18 13:42 ` Brijesh Singh
2021-05-18 13:54 ` Borislav Petkov
2021-05-18 14:13 ` Brijesh Singh
2021-04-30 12:16 ` [PATCH Part1 RFC v2 07/20] x86/sev: Define error codes for reason set 1 Brijesh Singh
2021-05-18 11:05 ` Borislav Petkov
2021-04-30 12:16 ` [PATCH Part1 RFC v2 08/20] x86/mm: Add sev_snp_active() helper Brijesh Singh
2021-05-18 18:11 ` Borislav Petkov
2021-05-19 17:28 ` Brijesh Singh
2021-04-30 12:16 ` [PATCH Part1 RFC v2 09/20] x86/sev: check SEV-SNP features support Brijesh Singh
2021-05-20 16:02 ` Borislav Petkov
2021-05-20 17:40 ` Brijesh Singh
2021-04-30 12:16 ` [PATCH Part1 RFC v2 10/20] x86/sev: Add a helper for the PVALIDATE instruction Brijesh Singh
2021-04-30 13:05 ` Brijesh Singh
2021-05-20 17:32 ` Borislav Petkov
2021-05-20 17:44 ` Brijesh Singh
2021-05-20 17:51 ` Borislav Petkov
2021-05-20 17:57 ` Brijesh Singh
2021-04-30 12:16 ` [PATCH Part1 RFC v2 11/20] x86/compressed: Add helper for validating pages in the decompression stage Brijesh Singh
2021-05-20 17:52 ` Borislav Petkov
2021-05-20 18:05 ` Brijesh Singh
2021-05-25 10:18 ` Borislav Petkov
2021-04-30 12:16 ` [PATCH Part1 RFC v2 12/20] x86/compressed: Register GHCB memory when SEV-SNP is active Brijesh Singh
2021-05-25 10:41 ` Borislav Petkov
2021-04-30 12:16 ` [PATCH Part1 RFC v2 13/20] x86/sev: " Brijesh Singh
2021-05-25 11:11 ` Borislav Petkov
2021-05-25 14:28 ` Brijesh Singh
2021-05-25 14:35 ` Borislav Petkov
2021-05-25 14:47 ` Brijesh Singh
2021-05-26 9:57 ` Borislav Petkov
2021-05-26 13:23 ` Brijesh Singh
2021-04-30 12:16 ` [PATCH Part1 RFC v2 14/20] x86/sev: Add helper for validating pages in early enc attribute changes Brijesh Singh
2021-05-26 10:39 ` Borislav Petkov [this message]
2021-05-26 13:34 ` Brijesh Singh
2021-04-30 12:16 ` [PATCH Part1 RFC v2 15/20] x86/kernel: Make the bss.decrypted section shared in RMP table Brijesh Singh
2021-04-30 12:16 ` [PATCH Part1 RFC v2 16/20] x86/kernel: Validate rom memory before accessing when SEV-SNP is active Brijesh Singh
2021-05-27 11:49 ` Borislav Petkov
2021-05-27 12:12 ` Brijesh Singh
2021-05-27 12:23 ` Borislav Petkov
2021-05-27 12:56 ` Brijesh Singh
2021-04-30 12:16 ` [PATCH Part1 RFC v2 17/20] x86/mm: Add support to validate memory when changing C-bit Brijesh Singh
2021-04-30 12:16 ` [PATCH Part1 RFC v2 18/20] x86/boot: Add Confidential Computing address to setup_header Brijesh Singh
2021-04-30 12:16 ` [PATCH Part1 RFC v2 19/20] x86/sev: Register SNP guest request platform device Brijesh Singh
2021-04-30 12:16 ` [PATCH Part1 RFC v2 20/20] virt: Add SEV-SNP guest driver Brijesh Singh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=YK4lTP+bHBzUxAOZ@zn.tnic \
--to=bp@alien8.de \
--cc=brijesh.singh@amd.com \
--cc=dave.hansen@intel.com \
--cc=hpa@zytor.com \
--cc=jroedel@suse.de \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@redhat.com \
--cc=pbonzini@redhat.com \
--cc=peterz@infradead.org \
--cc=rientjes@google.com \
--cc=seanjc@google.com \
--cc=tglx@linutronix.de \
--cc=thomas.lendacky@amd.com \
--cc=tony.luck@intel.com \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).