All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Singh, Brijesh" <brijesh.singh@amd.com>
To: "kvm@vger.kernel.org" <kvm@vger.kernel.org>
Cc: "Singh, Brijesh" <brijesh.singh@amd.com>,
	"Thomas Gleixner" <tglx@linutronix.de>,
	"Ingo Molnar" <mingo@redhat.com>,
	"H. Peter Anvin" <hpa@zytor.com>,
	"Paolo Bonzini" <pbonzini@redhat.com>,
	"Radim Krčmář" <rkrcmar@redhat.com>,
	"Joerg Roedel" <joro@8bytes.org>, "Borislav Petkov" <bp@suse.de>,
	"Lendacky, Thomas" <Thomas.Lendacky@amd.com>,
	"x86@kernel.org" <x86@kernel.org>,
	"linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>
Subject: [PATCH v3 10/11] mm: x86: Invoke hypercall when page encryption status is changed
Date: Wed, 10 Jul 2019 20:13:11 +0000	[thread overview]
Message-ID: <20190710201244.25195-11-brijesh.singh@amd.com> (raw)
In-Reply-To: <20190710201244.25195-1-brijesh.singh@amd.com>

Invoke a hypercall when a memory region is changed from encrypted ->
decrypted and vice versa. Hypervisor need to know the page encryption
status during the guest migration.

Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Borislav Petkov <bp@suse.de>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: x86@kernel.org
Cc: kvm@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
 arch/x86/include/asm/mem_encrypt.h |  3 ++
 arch/x86/mm/mem_encrypt.c          | 45 +++++++++++++++++++++++++++++-
 arch/x86/mm/pageattr.c             | 15 ++++++++++
 3 files changed, 62 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 0c196c47d621..6e654ab5a8e4 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -94,4 +94,7 @@ extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypte
 
 #endif	/* __ASSEMBLY__ */
 
+extern void set_memory_enc_dec_hypercall(unsigned long vaddr,
+					 unsigned long size, bool enc);
+
 #endif	/* __X86_MEM_ENCRYPT_H__ */
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index e0df96fdfe46..f3fda1de2869 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -15,6 +15,7 @@
 #include <linux/dma-direct.h>
 #include <linux/swiotlb.h>
 #include <linux/mem_encrypt.h>
+#include <linux/kvm_para.h>
 
 #include <asm/tlbflush.h>
 #include <asm/fixmap.h>
@@ -25,6 +26,7 @@
 #include <asm/processor-flags.h>
 #include <asm/msr.h>
 #include <asm/cmdline.h>
+#include <asm/kvm_para.h>
 
 #include "mm_internal.h"
 
@@ -192,6 +194,45 @@ void __init sme_early_init(void)
 		swiotlb_force = SWIOTLB_FORCE;
 }
 
+void set_memory_enc_dec_hypercall(unsigned long vaddr, unsigned long sz, bool enc)
+{
+	unsigned long vaddr_end, vaddr_next;
+
+	vaddr_end = vaddr + sz;
+
+	for (; vaddr < vaddr_end; vaddr = vaddr_next) {
+		int psize, pmask, level;
+		unsigned long pfn;
+		pte_t *kpte;
+
+		kpte = lookup_address(vaddr, &level);
+		if (!kpte || pte_none(*kpte))
+			return;
+
+		switch (level) {
+		case PG_LEVEL_4K:
+			pfn = pte_pfn(*kpte);
+			break;
+		case PG_LEVEL_2M:
+			pfn = pmd_pfn(*(pmd_t *)kpte);
+			break;
+		case PG_LEVEL_1G:
+			pfn = pud_pfn(*(pud_t *)kpte);
+			break;
+		default:
+			return;
+		}
+
+		psize = page_level_size(level);
+		pmask = page_level_mask(level);
+
+		kvm_sev_hypercall3(KVM_HC_PAGE_ENC_STATUS,
+				   pfn << PAGE_SHIFT, psize >> PAGE_SHIFT, enc);
+
+		vaddr_next = (vaddr & pmask) + psize;
+	}
+}
+
 static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
 {
 	pgprot_t old_prot, new_prot;
@@ -249,12 +290,13 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
 static int __init early_set_memory_enc_dec(unsigned long vaddr,
 					   unsigned long size, bool enc)
 {
-	unsigned long vaddr_end, vaddr_next;
+	unsigned long vaddr_end, vaddr_next, start;
 	unsigned long psize, pmask;
 	int split_page_size_mask;
 	int level, ret;
 	pte_t *kpte;
 
+	start = vaddr;
 	vaddr_next = vaddr;
 	vaddr_end = vaddr + size;
 
@@ -309,6 +351,7 @@ static int __init early_set_memory_enc_dec(unsigned long vaddr,
 
 	ret = 0;
 
+	set_memory_enc_dec_hypercall(start, size, enc);
 out:
 	__flush_tlb_all();
 	return ret;
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 6a9a77a403c9..971f70f58f49 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -26,6 +26,7 @@
 #include <asm/proto.h>
 #include <asm/pat.h>
 #include <asm/set_memory.h>
+#include <asm/mem_encrypt.h>
 
 #include "mm_internal.h"
 
@@ -2020,6 +2021,12 @@ int set_memory_global(unsigned long addr, int numpages)
 				    __pgprot(_PAGE_GLOBAL), 0);
 }
 
+void __attribute__((weak)) set_memory_enc_dec_hypercall(unsigned long addr,
+							unsigned long size,
+							bool enc)
+{
+}
+
 static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
 {
 	struct cpa_data cpa;
@@ -2060,6 +2067,14 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
 	 */
 	cpa_flush(&cpa, 0);
 
+	/*
+	 * When SEV is active, notify hypervisor that a given memory range is mapped
+	 * encrypted or decrypted. Hypervisor will use this information during
+	 * the VM migration.
+	 */
+	if (sev_active())
+		set_memory_enc_dec_hypercall(addr, numpages << PAGE_SHIFT, enc);
+
 	return ret;
 }
 
-- 
2.17.1


  parent reply	other threads:[~2019-07-10 20:13 UTC|newest]

Thread overview: 42+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-07-10 20:12 [PATCH v3 00/11] Add AMD SEV guest live migration support Singh, Brijesh
2019-07-10 20:13 ` [PATCH v3 01/11] KVM: SVM: Add KVM_SEV SEND_START command Singh, Brijesh
2019-08-22 10:08   ` Borislav Petkov
2019-08-22 13:23     ` Singh, Brijesh
2019-11-12 18:35   ` Peter Gonda
2019-11-12 22:27     ` Brijesh Singh
2019-11-14 19:27       ` Peter Gonda
2019-11-19 14:06         ` Brijesh Singh
2019-07-10 20:13 ` [PATCH v3 02/11] KVM: SVM: Add KVM_SEND_UPDATE_DATA command Singh, Brijesh
2019-08-22 12:02   ` Borislav Petkov
2019-08-22 13:27     ` Singh, Brijesh
2019-08-22 13:34       ` Borislav Petkov
2019-11-12 22:23   ` Peter Gonda
2019-07-10 20:13 ` [PATCH v3 03/11] KVM: SVM: Add KVM_SEV_SEND_FINISH command Singh, Brijesh
2019-08-26 13:29   ` Borislav Petkov
2019-07-10 20:13 ` [PATCH v3 04/11] KVM: SVM: Add support for KVM_SEV_RECEIVE_START command Singh, Brijesh
2019-07-10 20:13 ` [PATCH v3 05/11] KVM: SVM: Add KVM_SEV_RECEIVE_UPDATE_DATA command Singh, Brijesh
2019-08-27 12:09   ` Borislav Petkov
2019-11-14 21:02   ` Peter Gonda
2019-07-10 20:13 ` [PATCH v3 06/11] KVM: SVM: Add KVM_SEV_RECEIVE_FINISH command Singh, Brijesh
2019-08-27 12:19   ` Borislav Petkov
2019-07-10 20:13 ` [PATCH v3 07/11] KVM: x86: Add AMD SEV specific Hypercall3 Singh, Brijesh
2019-07-10 20:13 ` [PATCH v3 08/11] KVM: X86: Introduce KVM_HC_PAGE_ENC_STATUS hypercall Singh, Brijesh
2019-07-21 20:57   ` David Rientjes
2019-07-22 17:12     ` Cfir Cohen
2019-07-23 15:31       ` Singh, Brijesh
2019-07-23 15:16     ` Singh, Brijesh
2019-11-25 19:07   ` Peter Gonda
2019-07-10 20:13 ` [PATCH v3 09/11] KVM: x86: Introduce KVM_GET_PAGE_ENC_BITMAP ioctl Singh, Brijesh
2019-08-29 16:26   ` Borislav Petkov
2019-08-29 16:41     ` Singh, Brijesh
2019-08-29 16:56       ` Borislav Petkov
2019-07-10 20:13 ` Singh, Brijesh [this message]
2019-08-29 16:52   ` [PATCH v3 10/11] mm: x86: Invoke hypercall when page encryption status is changed Borislav Petkov
2019-08-29 18:07   ` Borislav Petkov
2019-08-29 18:21     ` Thomas Gleixner
2019-08-29 18:32       ` Thomas Gleixner
2019-07-10 20:13 ` [PATCH v3 11/11] KVM: x86: Introduce KVM_SET_PAGE_ENC_BITMAP ioctl Singh, Brijesh
2019-11-15  1:22   ` Steve Rutherford
2019-11-15  1:39     ` Steve Rutherford
2019-07-12 15:52 ` [PATCH v3 00/11] Add AMD SEV guest live migration support Konrad Rzeszutek Wilk
2019-07-12 16:31   ` Singh, Brijesh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190710201244.25195-11-brijesh.singh@amd.com \
    --to=brijesh.singh@amd.com \
    --cc=Thomas.Lendacky@amd.com \
    --cc=bp@suse.de \
    --cc=hpa@zytor.com \
    --cc=joro@8bytes.org \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=rkrcmar@redhat.com \
    --cc=tglx@linutronix.de \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.