All of lore.kernel.org
 help / color / mirror / Atom feed
From: Brijesh Singh <brijesh.singh@amd.com>
To: <simon.guinot@sequanux.org>, <linux-efi@vger.kernel.org>,
	<brijesh.singh@amd.com>, <kvm@vger.kernel.org>,
	<rkrcmar@redhat.com>, <matt@codeblueprint.co.uk>,
	<linus.walleij@linaro.org>, <linux-mm@kvack.org>,
	<paul.gortmaker@windriver.com>, <hpa@zytor.com>,
	<dan.j.williams@intel.com>, <aarcange@redhat.com>,
	<sfr@canb.auug.org.au>, <andriy.shevchenko@linux.intel.com>,
	<herbert@gondor.apana.org.au>, <bhe@redhat.com>,
	<xemul@parallels.com>, <joro@8bytes.org>, <x86@kernel.org>,
	<mingo@redhat.com>, <msalter@redhat.com>,
	<ross.zwisler@linux.intel.com>, <bp@suse.de>, <dyoung@redhat.com>,
	<thomas.lendacky@amd.com>, <jroedel@suse.de>,
	<keescook@chromium.org>, <toshi.kani@hpe.com>,
	<mathieu.desnoyers@efficios.com>, <devel@linuxdriverproject.org>,
	<tglx@linutronix.de>, <mchehab@kernel.org>,
	<iamjoonsoo.kim@lge.com>, <labbott@fedoraproject.org>,
	<tony.luck@intel.com>, <alexandre.bounine@idt.com>,
	<kuleshovmail@gmail.com>, <linux-kernel@vger.kernel.org>,
	<mcgrof@kernel.org>, <linux-crypto@vger.kernel.org>,
	<pbonzini@redhat.com>, <akpm@linux-foundation.org>,
	<davem@davemloft.net>
Subject: [RFC PATCH v1 20/28] KVM: SVM: prepare for SEV guest management API support
Date: Mon, 22 Aug 2016 19:28:02 -0400	[thread overview]
Message-ID: <147190848221.9523.931142742439444357.stgit@brijesh-build-machine> (raw)
In-Reply-To: <147190820782.9523.4967724730957229273.stgit@brijesh-build-machine>

The patch adds initial support required for Secure Encrypted
Virtualization (SEV) guest management API's.

ASID management:
 - Reserve asid range for SEV guest, SEV asid range is obtained
   through CPUID Fn8000_001f[ECX]. A non-SEV guest can use any
   asid outside the SEV asid range.
 - SEV guest must have asid value within asid range obtained
   through CPUID.
 - SEV guest must have the same asid for all vcpu's. A TLB flush
   is required if different vcpu for the same ASID is to be run
   on the same host CPU.

- save SEV private structure in kvm_arch.

- If SEV is available then initialize PSP firmware during hardware probe

Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
 arch/x86/include/asm/kvm_host.h |    9 ++
 arch/x86/kvm/svm.c              |  213 +++++++++++++++++++++++++++++++++++++++
 2 files changed, 221 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index b1dd673..9b885fc 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -715,6 +715,12 @@ struct kvm_hv {
 	u64 hv_crash_ctl;
 };
 
+struct kvm_sev_info {
+	unsigned int asid;	/* asid for this guest */
+	unsigned int handle;	/* firmware handle */
+	unsigned int ref_count; /* number of active vcpus */
+};
+
 struct kvm_arch {
 	unsigned int n_used_mmu_pages;
 	unsigned int n_requested_mmu_pages;
@@ -799,6 +805,9 @@ struct kvm_arch {
 
 	bool x2apic_format;
 	bool x2apic_broadcast_quirk_disabled;
+
+	/* struct for SEV guest */
+	struct kvm_sev_info sev_info;
 };
 
 struct kvm_vm_stat {
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index f010b23..dcee635 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -34,6 +34,7 @@
 #include <linux/sched.h>
 #include <linux/trace_events.h>
 #include <linux/slab.h>
+#include <linux/ccp-psp.h>
 
 #include <asm/apic.h>
 #include <asm/perf_event.h>
@@ -186,6 +187,9 @@ struct vcpu_svm {
 	struct page *avic_backing_page;
 	u64 *avic_physical_id_cache;
 	bool avic_is_running;
+
+	/* which host cpu was used for running this vcpu */
+	bool last_cpuid;
 };
 
 #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK	(0xFF)
@@ -243,6 +247,25 @@ static int avic;
 module_param(avic, int, S_IRUGO);
 #endif
 
+/* Secure Encrypted Virtualization */
+static bool sev_enabled;
+static unsigned long max_sev_asid;
+static unsigned long *sev_asid_bitmap;
+
+#define kvm_sev_guest()		(kvm->arch.sev_info.handle)
+#define kvm_sev_handle()	(kvm->arch.sev_info.handle)
+#define kvm_sev_ref()		(kvm->arch.sev_info.ref_count++)
+#define kvm_sev_unref()		(kvm->arch.sev_info.ref_count--)
+#define svm_sev_handle()	(svm->vcpu.kvm->arch.sev_info.handle)
+#define svm_sev_asid()		(svm->vcpu.kvm->arch.sev_info.asid)
+#define svm_sev_ref()		(svm->vcpu.kvm->arch.sev_info.ref_count++)
+#define svm_sev_unref()		(svm->vcpu.kvm->arch.sev_info.ref_count--)
+#define svm_sev_guest()		(svm->vcpu.kvm->arch.sev_info.handle)
+#define svm_sev_ref_count()	(svm->vcpu.kvm->arch.sev_info.ref_count)
+
+static int sev_asid_new(void);
+static void sev_asid_free(int asid);
+
 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
 static void svm_flush_tlb(struct kvm_vcpu *vcpu);
 static void svm_complete_interrupts(struct vcpu_svm *svm);
@@ -474,6 +497,8 @@ struct svm_cpu_data {
 	struct kvm_ldttss_desc *tss_desc;
 
 	struct page *save_area;
+
+	void **sev_vmcb;  /* index = sev_asid, value = vmcb pointer */
 };
 
 static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
@@ -727,7 +752,10 @@ static int svm_hardware_enable(void)
 	sd->asid_generation = 1;
 	sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
 	sd->next_asid = sd->max_asid + 1;
-	sd->min_asid = 1;
+	sd->min_asid = max_sev_asid + 1;
+
+	if (sev_enabled)
+		memset(sd->sev_vmcb, 0, (max_sev_asid + 1) * sizeof(void *));
 
 	native_store_gdt(&gdt_descr);
 	gdt = (struct desc_struct *)gdt_descr.address;
@@ -788,6 +816,7 @@ static void svm_cpu_uninit(int cpu)
 
 	per_cpu(svm_data, raw_smp_processor_id()) = NULL;
 	__free_page(sd->save_area);
+	kfree(sd->sev_vmcb);
 	kfree(sd);
 }
 
@@ -805,6 +834,14 @@ static int svm_cpu_init(int cpu)
 	if (!sd->save_area)
 		goto err_1;
 
+	if (sev_enabled) {
+		sd->sev_vmcb = kmalloc((max_sev_asid + 1) * sizeof(void *),
+					GFP_KERNEL);
+		r = -ENOMEM;
+		if (!sd->sev_vmcb)
+			goto err_1;
+	}
+
 	per_cpu(svm_data, cpu) = sd;
 
 	return 0;
@@ -931,6 +968,74 @@ static void svm_disable_lbrv(struct vcpu_svm *svm)
 	set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
 }
 
+static __init void sev_hardware_setup(void)
+{
+	int ret, psp_ret;
+	struct psp_data_init *init;
+	struct psp_data_status *status;
+
+	/*
+	 * Check SEV Feature Support: Fn8001_001F[EAX]
+	 * 	Bit 1: Secure Memory Virtualization supported
+	 */
+	if (!(cpuid_eax(0x8000001F) & 0x2))
+		return;
+
+	/*
+	 * Get maximum number of encrypted guest supported: Fn8001_001F[ECX]
+	 * 	Bit 31:0: Number of supported guest
+	 */
+	max_sev_asid = cpuid_ecx(0x8000001F);
+	if (!max_sev_asid)
+		return;
+
+	init = kzalloc(sizeof(*init), GFP_KERNEL);
+	if (!init)
+		return;
+
+	status = kzalloc(sizeof(*status), GFP_KERNEL);
+	if (!status)
+		goto err_1;
+
+	/* Initialize PSP firmware */
+	init->hdr.buffer_len = sizeof(*init);
+	init->flags = 0;
+	ret = psp_platform_init(init, &psp_ret);
+	if (ret) {
+		printk(KERN_ERR "SEV: PSP_INIT ret=%d (%#x)\n", ret, psp_ret);
+		goto err_2;
+	}
+
+	/* Initialize SEV ASID bitmap */
+	sev_asid_bitmap = kmalloc(max(sizeof(unsigned long),
+				      max_sev_asid/8 + 1), GFP_KERNEL);
+	if (IS_ERR(sev_asid_bitmap)) {
+		psp_platform_shutdown(&psp_ret);
+		goto err_2;
+	}
+	bitmap_zero(sev_asid_bitmap, max_sev_asid);
+	set_bit(0, sev_asid_bitmap);  /* mark ASID 0 as used */
+
+	sev_enabled = 1;
+	printk(KERN_INFO "kvm: SEV enabled\n");
+
+	/* Query the platform status and print API version */
+	status->hdr.buffer_len = sizeof(*status);
+	ret = psp_platform_status(status, &psp_ret);
+	if (ret) {
+		printk(KERN_ERR "SEV: PLATFORM_STATUS ret=%#x\n", psp_ret);
+		goto err_2;
+	}
+
+	printk(KERN_INFO "SEV API: %d.%d\n",
+			status->api_major, status->api_minor);
+err_2:
+	kfree(status);
+err_1:
+	kfree(init);
+	return;
+}
+
 static __init int svm_hardware_setup(void)
 {
 	int cpu;
@@ -966,6 +1071,8 @@ static __init int svm_hardware_setup(void)
 		kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
 	}
 
+	sev_hardware_setup();
+
 	for_each_possible_cpu(cpu) {
 		r = svm_cpu_init(cpu);
 		if (r)
@@ -1003,10 +1110,25 @@ err:
 	return r;
 }
 
+static __exit void sev_hardware_unsetup(void)
+{
+	int ret, psp_ret;
+
+	ret = psp_platform_shutdown(&psp_ret);
+	if (ret)
+		printk(KERN_ERR "failed to shutdown PSP rc=%d (%#0x10x)\n",
+		ret, psp_ret);
+
+	kfree(sev_asid_bitmap);
+}
+
 static __exit void svm_hardware_unsetup(void)
 {
 	int cpu;
 
+	if (sev_enabled)
+		sev_hardware_unsetup();
+
 	for_each_possible_cpu(cpu)
 		svm_cpu_uninit(cpu);
 
@@ -1088,6 +1210,11 @@ static void avic_init_vmcb(struct vcpu_svm *svm)
 	svm->vcpu.arch.apicv_active = true;
 }
 
+static void sev_init_vmcb(struct vcpu_svm *svm)
+{
+	svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
+}
+
 static void init_vmcb(struct vcpu_svm *svm)
 {
 	struct vmcb_control_area *control = &svm->vmcb->control;
@@ -1202,6 +1329,10 @@ static void init_vmcb(struct vcpu_svm *svm)
 	if (avic)
 		avic_init_vmcb(svm);
 
+	if (svm_sev_guest())
+		sev_init_vmcb(svm);
+
+
 	mark_all_dirty(svm->vmcb);
 
 	enable_gif(svm);
@@ -1413,6 +1544,14 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
 		avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE);
 }
 
+static void sev_init_vcpu(struct vcpu_svm *svm)
+{
+	if (!svm_sev_guest())
+		return;
+
+	svm_sev_ref();
+}
+
 static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
 {
 	struct vcpu_svm *svm;
@@ -1475,6 +1614,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
 	init_vmcb(svm);
 
 	svm_init_osvw(&svm->vcpu);
+	sev_init_vcpu(svm);
 
 	return &svm->vcpu;
 
@@ -1494,6 +1634,23 @@ out:
 	return ERR_PTR(err);
 }
 
+static void sev_uninit_vcpu(struct vcpu_svm *svm)
+{
+	int cpu;
+	int asid = svm_sev_asid();
+	struct svm_cpu_data *sd;
+
+	if (!svm_sev_guest())
+		return;
+
+	svm_sev_unref();
+
+	for_each_possible_cpu(cpu) {
+		sd = per_cpu(svm_data, cpu);
+		sd->sev_vmcb[asid] = NULL;
+	}
+}
+
 static void svm_free_vcpu(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
@@ -1502,6 +1659,7 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
 	__free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
 	__free_page(virt_to_page(svm->nested.hsave));
 	__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
+	sev_uninit_vcpu(svm);
 	kvm_vcpu_uninit(vcpu);
 	kmem_cache_free(kvm_vcpu_cache, svm);
 }
@@ -1945,6 +2103,11 @@ static int pf_interception(struct vcpu_svm *svm)
 	default:
 		error_code = svm->vmcb->control.exit_info_1;
 
+		/* In SEV mode, the guest physical address will have C-bit
+		 * set. C-bit must be cleared before handling the fault.
+		 */
+		if (svm_sev_guest())
+			fault_address &= ~sme_me_mask;
 		trace_kvm_page_fault(fault_address, error_code);
 		if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
 			kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
@@ -4131,12 +4294,40 @@ static void reload_tss(struct kvm_vcpu *vcpu)
 	load_TR_desc();
 }
 
+static void pre_sev_run(struct vcpu_svm *svm)
+{
+	int asid = svm_sev_asid();
+	int cpu = raw_smp_processor_id();
+	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
+
+	/* Assign the asid allocated for this SEV guest */
+	svm->vmcb->control.asid = svm_sev_asid();
+
+	/* Flush guest TLB:
+	 * - when different VMCB for the same ASID is to be run on the
+	 *   same host CPU
+	 *   or 
+	 * - this VMCB was executed on different host cpu in previous VMRUNs.
+	 */
+	if (sd->sev_vmcb[asid] != (void *)svm->vmcb ||
+		svm->last_cpuid != cpu)
+		svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
+
+	svm->last_cpuid = cpu;
+	sd->sev_vmcb[asid] = (void *)svm->vmcb;
+
+	mark_dirty(svm->vmcb, VMCB_ASID);
+}
+
 static void pre_svm_run(struct vcpu_svm *svm)
 {
 	int cpu = raw_smp_processor_id();
 
 	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
 
+	if (svm_sev_guest())
+		return pre_sev_run(svm);
+
 	/* FIXME: handle wraparound of asid_generation */
 	if (svm->asid_generation != sd->asid_generation)
 		new_asid(svm, sd);
@@ -4985,6 +5176,26 @@ static inline void avic_post_state_restore(struct kvm_vcpu *vcpu)
 	avic_handle_ldr_update(vcpu);
 }
 
+static int sev_asid_new(void)
+{
+	int pos;
+
+	if (!sev_enabled)
+		return -ENOTTY;
+
+	pos = find_first_zero_bit(sev_asid_bitmap, max_sev_asid);
+	if (pos >= max_sev_asid)
+		return -EBUSY;
+
+	set_bit(pos, sev_asid_bitmap);
+	return pos;
+}
+
+static void sev_asid_free(int asid)
+{
+	clear_bit(asid, sev_asid_bitmap);
+}
+
 static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
 	.cpu_has_kvm_support = has_svm,
 	.disabled_by_bios = is_disabled,

WARNING: multiple messages have this Message-ID (diff)
From: Brijesh Singh <brijesh.singh@amd.com>
To: simon.guinot@sequanux.org, linux-efi@vger.kernel.org,
	brijesh.singh@amd.com, kvm@vger.kernel.org, rkrcmar@redhat.com,
	matt@codeblueprint.co.uk, linus.walleij@linaro.org,
	linux-mm@kvack.org, paul.gortmaker@windriver.com, hpa@zytor.com,
	dan.j.williams@intel.com, aarcange@redhat.com,
	sfr@canb.auug.org.au, andriy.shevchenko@linux.intel.com,
	herbert@gondor.apana.org.au, bhe@redhat.com, xemul@parallels.com,
	joro@8bytes.org, x86@kernel.org, mingo@redhat.com,
	msalter@redhat.com, ross.zwisler@linux.intel.com, bp@suse.de,
	dyoung@redhat.com, thomas.lendacky@amd.com, jroedel@suse.de,
	keescook@chromium.org, toshi.kani@hpe.com,
	mathieu.desnoyers@efficios.com, devel@linuxdriverproject.org,
	tglx@linutronix.de, mchehab@kernel.org, iamjoonsoo.kim@lge.com,
	labbott@fedoraproject.org, tony.luck@intel.com, alexandre.bounin
Subject: [RFC PATCH v1 20/28] KVM: SVM: prepare for SEV guest management API support
Date: Mon, 22 Aug 2016 19:28:02 -0400	[thread overview]
Message-ID: <147190848221.9523.931142742439444357.stgit@brijesh-build-machine> (raw)
In-Reply-To: <147190820782.9523.4967724730957229273.stgit@brijesh-build-machine>

The patch adds initial support required for Secure Encrypted
Virtualization (SEV) guest management API's.

ASID management:
 - Reserve asid range for SEV guest, SEV asid range is obtained
   through CPUID Fn8000_001f[ECX]. A non-SEV guest can use any
   asid outside the SEV asid range.
 - SEV guest must have asid value within asid range obtained
   through CPUID.
 - SEV guest must have the same asid for all vcpu's. A TLB flush
   is required if different vcpu for the same ASID is to be run
   on the same host CPU.

- save SEV private structure in kvm_arch.

- If SEV is available then initialize PSP firmware during hardware probe

Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
 arch/x86/include/asm/kvm_host.h |    9 ++
 arch/x86/kvm/svm.c              |  213 +++++++++++++++++++++++++++++++++++++++
 2 files changed, 221 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index b1dd673..9b885fc 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -715,6 +715,12 @@ struct kvm_hv {
 	u64 hv_crash_ctl;
 };
 
+struct kvm_sev_info {
+	unsigned int asid;	/* asid for this guest */
+	unsigned int handle;	/* firmware handle */
+	unsigned int ref_count; /* number of active vcpus */
+};
+
 struct kvm_arch {
 	unsigned int n_used_mmu_pages;
 	unsigned int n_requested_mmu_pages;
@@ -799,6 +805,9 @@ struct kvm_arch {
 
 	bool x2apic_format;
 	bool x2apic_broadcast_quirk_disabled;
+
+	/* struct for SEV guest */
+	struct kvm_sev_info sev_info;
 };
 
 struct kvm_vm_stat {
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index f010b23..dcee635 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -34,6 +34,7 @@
 #include <linux/sched.h>
 #include <linux/trace_events.h>
 #include <linux/slab.h>
+#include <linux/ccp-psp.h>
 
 #include <asm/apic.h>
 #include <asm/perf_event.h>
@@ -186,6 +187,9 @@ struct vcpu_svm {
 	struct page *avic_backing_page;
 	u64 *avic_physical_id_cache;
 	bool avic_is_running;
+
+	/* which host cpu was used for running this vcpu */
+	bool last_cpuid;
 };
 
 #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK	(0xFF)
@@ -243,6 +247,25 @@ static int avic;
 module_param(avic, int, S_IRUGO);
 #endif
 
+/* Secure Encrypted Virtualization */
+static bool sev_enabled;
+static unsigned long max_sev_asid;
+static unsigned long *sev_asid_bitmap;
+
+#define kvm_sev_guest()		(kvm->arch.sev_info.handle)
+#define kvm_sev_handle()	(kvm->arch.sev_info.handle)
+#define kvm_sev_ref()		(kvm->arch.sev_info.ref_count++)
+#define kvm_sev_unref()		(kvm->arch.sev_info.ref_count--)
+#define svm_sev_handle()	(svm->vcpu.kvm->arch.sev_info.handle)
+#define svm_sev_asid()		(svm->vcpu.kvm->arch.sev_info.asid)
+#define svm_sev_ref()		(svm->vcpu.kvm->arch.sev_info.ref_count++)
+#define svm_sev_unref()		(svm->vcpu.kvm->arch.sev_info.ref_count--)
+#define svm_sev_guest()		(svm->vcpu.kvm->arch.sev_info.handle)
+#define svm_sev_ref_count()	(svm->vcpu.kvm->arch.sev_info.ref_count)
+
+static int sev_asid_new(void);
+static void sev_asid_free(int asid);
+
 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
 static void svm_flush_tlb(struct kvm_vcpu *vcpu);
 static void svm_complete_interrupts(struct vcpu_svm *svm);
@@ -474,6 +497,8 @@ struct svm_cpu_data {
 	struct kvm_ldttss_desc *tss_desc;
 
 	struct page *save_area;
+
+	void **sev_vmcb;  /* index = sev_asid, value = vmcb pointer */
 };
 
 static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
@@ -727,7 +752,10 @@ static int svm_hardware_enable(void)
 	sd->asid_generation = 1;
 	sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
 	sd->next_asid = sd->max_asid + 1;
-	sd->min_asid = 1;
+	sd->min_asid = max_sev_asid + 1;
+
+	if (sev_enabled)
+		memset(sd->sev_vmcb, 0, (max_sev_asid + 1) * sizeof(void *));
 
 	native_store_gdt(&gdt_descr);
 	gdt = (struct desc_struct *)gdt_descr.address;
@@ -788,6 +816,7 @@ static void svm_cpu_uninit(int cpu)
 
 	per_cpu(svm_data, raw_smp_processor_id()) = NULL;
 	__free_page(sd->save_area);
+	kfree(sd->sev_vmcb);
 	kfree(sd);
 }
 
@@ -805,6 +834,14 @@ static int svm_cpu_init(int cpu)
 	if (!sd->save_area)
 		goto err_1;
 
+	if (sev_enabled) {
+		sd->sev_vmcb = kmalloc((max_sev_asid + 1) * sizeof(void *),
+					GFP_KERNEL);
+		r = -ENOMEM;
+		if (!sd->sev_vmcb)
+			goto err_1;
+	}
+
 	per_cpu(svm_data, cpu) = sd;
 
 	return 0;
@@ -931,6 +968,74 @@ static void svm_disable_lbrv(struct vcpu_svm *svm)
 	set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
 }
 
+static __init void sev_hardware_setup(void)
+{
+	int ret, psp_ret;
+	struct psp_data_init *init;
+	struct psp_data_status *status;
+
+	/*
+	 * Check SEV Feature Support: Fn8001_001F[EAX]
+	 * 	Bit 1: Secure Memory Virtualization supported
+	 */
+	if (!(cpuid_eax(0x8000001F) & 0x2))
+		return;
+
+	/*
+	 * Get maximum number of encrypted guest supported: Fn8001_001F[ECX]
+	 * 	Bit 31:0: Number of supported guest
+	 */
+	max_sev_asid = cpuid_ecx(0x8000001F);
+	if (!max_sev_asid)
+		return;
+
+	init = kzalloc(sizeof(*init), GFP_KERNEL);
+	if (!init)
+		return;
+
+	status = kzalloc(sizeof(*status), GFP_KERNEL);
+	if (!status)
+		goto err_1;
+
+	/* Initialize PSP firmware */
+	init->hdr.buffer_len = sizeof(*init);
+	init->flags = 0;
+	ret = psp_platform_init(init, &psp_ret);
+	if (ret) {
+		printk(KERN_ERR "SEV: PSP_INIT ret=%d (%#x)\n", ret, psp_ret);
+		goto err_2;
+	}
+
+	/* Initialize SEV ASID bitmap */
+	sev_asid_bitmap = kmalloc(max(sizeof(unsigned long),
+				      max_sev_asid/8 + 1), GFP_KERNEL);
+	if (IS_ERR(sev_asid_bitmap)) {
+		psp_platform_shutdown(&psp_ret);
+		goto err_2;
+	}
+	bitmap_zero(sev_asid_bitmap, max_sev_asid);
+	set_bit(0, sev_asid_bitmap);  /* mark ASID 0 as used */
+
+	sev_enabled = 1;
+	printk(KERN_INFO "kvm: SEV enabled\n");
+
+	/* Query the platform status and print API version */
+	status->hdr.buffer_len = sizeof(*status);
+	ret = psp_platform_status(status, &psp_ret);
+	if (ret) {
+		printk(KERN_ERR "SEV: PLATFORM_STATUS ret=%#x\n", psp_ret);
+		goto err_2;
+	}
+
+	printk(KERN_INFO "SEV API: %d.%d\n",
+			status->api_major, status->api_minor);
+err_2:
+	kfree(status);
+err_1:
+	kfree(init);
+	return;
+}
+
 static __init int svm_hardware_setup(void)
 {
 	int cpu;
@@ -966,6 +1071,8 @@ static __init int svm_hardware_setup(void)
 		kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
 	}
 
+	sev_hardware_setup();
+
 	for_each_possible_cpu(cpu) {
 		r = svm_cpu_init(cpu);
 		if (r)
@@ -1003,10 +1110,25 @@ err:
 	return r;
 }
 
+static __exit void sev_hardware_unsetup(void)
+{
+	int ret, psp_ret;
+
+	ret = psp_platform_shutdown(&psp_ret);
+	if (ret)
+		printk(KERN_ERR "failed to shutdown PSP rc=%d (%#0x10x)\n",
+		ret, psp_ret);
+
+	kfree(sev_asid_bitmap);
+}
+
 static __exit void svm_hardware_unsetup(void)
 {
 	int cpu;
 
+	if (sev_enabled)
+		sev_hardware_unsetup();
+
 	for_each_possible_cpu(cpu)
 		svm_cpu_uninit(cpu);
 
@@ -1088,6 +1210,11 @@ static void avic_init_vmcb(struct vcpu_svm *svm)
 	svm->vcpu.arch.apicv_active = true;
 }
 
+static void sev_init_vmcb(struct vcpu_svm *svm)
+{
+	svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
+}
+
 static void init_vmcb(struct vcpu_svm *svm)
 {
 	struct vmcb_control_area *control = &svm->vmcb->control;
@@ -1202,6 +1329,10 @@ static void init_vmcb(struct vcpu_svm *svm)
 	if (avic)
 		avic_init_vmcb(svm);
 
+	if (svm_sev_guest())
+		sev_init_vmcb(svm);
+
+
 	mark_all_dirty(svm->vmcb);
 
 	enable_gif(svm);
@@ -1413,6 +1544,14 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
 		avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE);
 }
 
+static void sev_init_vcpu(struct vcpu_svm *svm)
+{
+	if (!svm_sev_guest())
+		return;
+
+	svm_sev_ref();
+}
+
 static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
 {
 	struct vcpu_svm *svm;
@@ -1475,6 +1614,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
 	init_vmcb(svm);
 
 	svm_init_osvw(&svm->vcpu);
+	sev_init_vcpu(svm);
 
 	return &svm->vcpu;
 
@@ -1494,6 +1634,23 @@ out:
 	return ERR_PTR(err);
 }
 
+static void sev_uninit_vcpu(struct vcpu_svm *svm)
+{
+	int cpu;
+	int asid = svm_sev_asid();
+	struct svm_cpu_data *sd;
+
+	if (!svm_sev_guest())
+		return;
+
+	svm_sev_unref();
+
+	for_each_possible_cpu(cpu) {
+		sd = per_cpu(svm_data, cpu);
+		sd->sev_vmcb[asid] = NULL;
+	}
+}
+
 static void svm_free_vcpu(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
@@ -1502,6 +1659,7 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
 	__free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
 	__free_page(virt_to_page(svm->nested.hsave));
 	__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
+	sev_uninit_vcpu(svm);
 	kvm_vcpu_uninit(vcpu);
 	kmem_cache_free(kvm_vcpu_cache, svm);
 }
@@ -1945,6 +2103,11 @@ static int pf_interception(struct vcpu_svm *svm)
 	default:
 		error_code = svm->vmcb->control.exit_info_1;
 
+		/* In SEV mode, the guest physical address will have C-bit
+		 * set. C-bit must be cleared before handling the fault.
+		 */
+		if (svm_sev_guest())
+			fault_address &= ~sme_me_mask;
 		trace_kvm_page_fault(fault_address, error_code);
 		if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
 			kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
@@ -4131,12 +4294,40 @@ static void reload_tss(struct kvm_vcpu *vcpu)
 	load_TR_desc();
 }
 
+static void pre_sev_run(struct vcpu_svm *svm)
+{
+	int asid = svm_sev_asid();
+	int cpu = raw_smp_processor_id();
+	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
+
+	/* Assign the asid allocated for this SEV guest */
+	svm->vmcb->control.asid = svm_sev_asid();
+
+	/* Flush guest TLB:
+	 * - when different VMCB for the same ASID is to be run on the
+	 *   same host CPU
+	 *   or 
+	 * - this VMCB was executed on different host cpu in previous VMRUNs.
+	 */
+	if (sd->sev_vmcb[asid] != (void *)svm->vmcb ||
+		svm->last_cpuid != cpu)
+		svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
+
+	svm->last_cpuid = cpu;
+	sd->sev_vmcb[asid] = (void *)svm->vmcb;
+
+	mark_dirty(svm->vmcb, VMCB_ASID);
+}
+
 static void pre_svm_run(struct vcpu_svm *svm)
 {
 	int cpu = raw_smp_processor_id();
 
 	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
 
+	if (svm_sev_guest())
+		return pre_sev_run(svm);
+
 	/* FIXME: handle wraparound of asid_generation */
 	if (svm->asid_generation != sd->asid_generation)
 		new_asid(svm, sd);
@@ -4985,6 +5176,26 @@ static inline void avic_post_state_restore(struct kvm_vcpu *vcpu)
 	avic_handle_ldr_update(vcpu);
 }
 
+static int sev_asid_new(void)
+{
+	int pos;
+
+	if (!sev_enabled)
+		return -ENOTTY;
+
+	pos = find_first_zero_bit(sev_asid_bitmap, max_sev_asid);
+	if (pos >= max_sev_asid)
+		return -EBUSY;
+
+	set_bit(pos, sev_asid_bitmap);
+	return pos;
+}
+
+static void sev_asid_free(int asid)
+{
+	clear_bit(asid, sev_asid_bitmap);
+}
+
 static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
 	.cpu_has_kvm_support = has_svm,
 	.disabled_by_bios = is_disabled,

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

WARNING: multiple messages have this Message-ID (diff)
From: Brijesh Singh <brijesh.singh@amd.com>
To: <simon.guinot@sequanux.org>, <linux-efi@vger.kernel.org>,
	<brijesh.singh@amd.com>, <kvm@vger.kernel.org>,
	<rkrcmar@redhat.com>, <matt@codeblueprint.co.uk>,
	<linus.walleij@linaro.org>, <linux-mm@kvack.org>,
	<paul.gortmaker@windriver.com>, <hpa@zytor.com>,
	<dan.j.williams@intel.com>, <aarcange@redhat.com>,
	<sfr@canb.auug.org.au>, <andriy.shevchenko@linux.intel.com>,
	<herbert@gondor.apana.org.au>, <bhe@redhat.com>,
	<xemul@parallels.com>, <joro@8bytes.org>, <x86@kernel.org>,
	<mingo@redhat.com>, <msalter@redhat.com>,
	<ross.zwisler@linux.intel.com>, <bp@suse.de>, <dyoung@redhat.com>,
	<thomas.lendacky@amd.com>, <jroedel@suse.de>,
	<keescook@chromium.org>, <toshi.kani@hpe.com>,
	<mathieu.desnoyers@efficios.com>, <devel@linuxdriverproject.org>,
	<tglx@linutronix.de>, <mchehab@kernel.org>,
	<iamjoonsoo.kim@lge.com>, <labbott@fedoraproject.org>,
	<tony.luck@intel.com>,
	<alexandre.bounin
Subject: [RFC PATCH v1 20/28] KVM: SVM: prepare for SEV guest management API support
Date: Mon, 22 Aug 2016 19:28:02 -0400	[thread overview]
Message-ID: <147190848221.9523.931142742439444357.stgit@brijesh-build-machine> (raw)
In-Reply-To: <147190820782.9523.4967724730957229273.stgit@brijesh-build-machine>

The patch adds initial support required for Secure Encrypted
Virtualization (SEV) guest management API's.

ASID management:
 - Reserve asid range for SEV guest, SEV asid range is obtained
   through CPUID Fn8000_001f[ECX]. A non-SEV guest can use any
   asid outside the SEV asid range.
 - SEV guest must have asid value within asid range obtained
   through CPUID.
 - SEV guest must have the same asid for all vcpu's. A TLB flush
   is required if different vcpu for the same ASID is to be run
   on the same host CPU.

- save SEV private structure in kvm_arch.

- If SEV is available then initialize PSP firmware during hardware probe

Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
 arch/x86/include/asm/kvm_host.h |    9 ++
 arch/x86/kvm/svm.c              |  213 +++++++++++++++++++++++++++++++++++++++
 2 files changed, 221 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index b1dd673..9b885fc 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -715,6 +715,12 @@ struct kvm_hv {
 	u64 hv_crash_ctl;
 };
 
+struct kvm_sev_info {
+	unsigned int asid;	/* asid for this guest */
+	unsigned int handle;	/* firmware handle */
+	unsigned int ref_count; /* number of active vcpus */
+};
+
 struct kvm_arch {
 	unsigned int n_used_mmu_pages;
 	unsigned int n_requested_mmu_pages;
@@ -799,6 +805,9 @@ struct kvm_arch {
 
 	bool x2apic_format;
 	bool x2apic_broadcast_quirk_disabled;
+
+	/* struct for SEV guest */
+	struct kvm_sev_info sev_info;
 };
 
 struct kvm_vm_stat {
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index f010b23..dcee635 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -34,6 +34,7 @@
 #include <linux/sched.h>
 #include <linux/trace_events.h>
 #include <linux/slab.h>
+#include <linux/ccp-psp.h>
 
 #include <asm/apic.h>
 #include <asm/perf_event.h>
@@ -186,6 +187,9 @@ struct vcpu_svm {
 	struct page *avic_backing_page;
 	u64 *avic_physical_id_cache;
 	bool avic_is_running;
+
+	/* which host cpu was used for running this vcpu */
+	bool last_cpuid;
 };
 
 #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK	(0xFF)
@@ -243,6 +247,25 @@ static int avic;
 module_param(avic, int, S_IRUGO);
 #endif
 
+/* Secure Encrypted Virtualization */
+static bool sev_enabled;
+static unsigned long max_sev_asid;
+static unsigned long *sev_asid_bitmap;
+
+#define kvm_sev_guest()		(kvm->arch.sev_info.handle)
+#define kvm_sev_handle()	(kvm->arch.sev_info.handle)
+#define kvm_sev_ref()		(kvm->arch.sev_info.ref_count++)
+#define kvm_sev_unref()		(kvm->arch.sev_info.ref_count--)
+#define svm_sev_handle()	(svm->vcpu.kvm->arch.sev_info.handle)
+#define svm_sev_asid()		(svm->vcpu.kvm->arch.sev_info.asid)
+#define svm_sev_ref()		(svm->vcpu.kvm->arch.sev_info.ref_count++)
+#define svm_sev_unref()		(svm->vcpu.kvm->arch.sev_info.ref_count--)
+#define svm_sev_guest()		(svm->vcpu.kvm->arch.sev_info.handle)
+#define svm_sev_ref_count()	(svm->vcpu.kvm->arch.sev_info.ref_count)
+
+static int sev_asid_new(void);
+static void sev_asid_free(int asid);
+
 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
 static void svm_flush_tlb(struct kvm_vcpu *vcpu);
 static void svm_complete_interrupts(struct vcpu_svm *svm);
@@ -474,6 +497,8 @@ struct svm_cpu_data {
 	struct kvm_ldttss_desc *tss_desc;
 
 	struct page *save_area;
+
+	void **sev_vmcb;  /* index = sev_asid, value = vmcb pointer */
 };
 
 static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
@@ -727,7 +752,10 @@ static int svm_hardware_enable(void)
 	sd->asid_generation = 1;
 	sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
 	sd->next_asid = sd->max_asid + 1;
-	sd->min_asid = 1;
+	sd->min_asid = max_sev_asid + 1;
+
+	if (sev_enabled)
+		memset(sd->sev_vmcb, 0, (max_sev_asid + 1) * sizeof(void *));
 
 	native_store_gdt(&gdt_descr);
 	gdt = (struct desc_struct *)gdt_descr.address;
@@ -788,6 +816,7 @@ static void svm_cpu_uninit(int cpu)
 
 	per_cpu(svm_data, raw_smp_processor_id()) = NULL;
 	__free_page(sd->save_area);
+	kfree(sd->sev_vmcb);
 	kfree(sd);
 }
 
@@ -805,6 +834,14 @@ static int svm_cpu_init(int cpu)
 	if (!sd->save_area)
 		goto err_1;
 
+	if (sev_enabled) {
+		sd->sev_vmcb = kmalloc((max_sev_asid + 1) * sizeof(void *),
+					GFP_KERNEL);
+		r = -ENOMEM;
+		if (!sd->sev_vmcb)
+			goto err_1;
+	}
+
 	per_cpu(svm_data, cpu) = sd;
 
 	return 0;
@@ -931,6 +968,74 @@ static void svm_disable_lbrv(struct vcpu_svm *svm)
 	set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
 }
 
+static __init void sev_hardware_setup(void)
+{
+	int ret, psp_ret;
+	struct psp_data_init *init;
+	struct psp_data_status *status;
+
+	/*
+	 * Check SEV Feature Support: Fn8001_001F[EAX]
+	 * 	Bit 1: Secure Memory Virtualization supported
+	 */
+	if (!(cpuid_eax(0x8000001F) & 0x2))
+		return;
+
+	/*
+	 * Get maximum number of encrypted guest supported: Fn8001_001F[ECX]
+	 * 	Bit 31:0: Number of supported guest
+	 */
+	max_sev_asid = cpuid_ecx(0x8000001F);
+	if (!max_sev_asid)
+		return;
+
+	init = kzalloc(sizeof(*init), GFP_KERNEL);
+	if (!init)
+		return;
+
+	status = kzalloc(sizeof(*status), GFP_KERNEL);
+	if (!status)
+		goto err_1;
+
+	/* Initialize PSP firmware */
+	init->hdr.buffer_len = sizeof(*init);
+	init->flags = 0;
+	ret = psp_platform_init(init, &psp_ret);
+	if (ret) {
+		printk(KERN_ERR "SEV: PSP_INIT ret=%d (%#x)\n", ret, psp_ret);
+		goto err_2;
+	}
+
+	/* Initialize SEV ASID bitmap */
+	sev_asid_bitmap = kmalloc(max(sizeof(unsigned long),
+				      max_sev_asid/8 + 1), GFP_KERNEL);
+	if (IS_ERR(sev_asid_bitmap)) {
+		psp_platform_shutdown(&psp_ret);
+		goto err_2;
+	}
+	bitmap_zero(sev_asid_bitmap, max_sev_asid);
+	set_bit(0, sev_asid_bitmap);  /* mark ASID 0 as used */
+
+	sev_enabled = 1;
+	printk(KERN_INFO "kvm: SEV enabled\n");
+
+	/* Query the platform status and print API version */
+	status->hdr.buffer_len = sizeof(*status);
+	ret = psp_platform_status(status, &psp_ret);
+	if (ret) {
+		printk(KERN_ERR "SEV: PLATFORM_STATUS ret=%#x\n", psp_ret);
+		goto err_2;
+	}
+
+	printk(KERN_INFO "SEV API: %d.%d\n",
+			status->api_major, status->api_minor);
+err_2:
+	kfree(status);
+err_1:
+	kfree(init);
+	return;
+}
+
 static __init int svm_hardware_setup(void)
 {
 	int cpu;
@@ -966,6 +1071,8 @@ static __init int svm_hardware_setup(void)
 		kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
 	}
 
+	sev_hardware_setup();
+
 	for_each_possible_cpu(cpu) {
 		r = svm_cpu_init(cpu);
 		if (r)
@@ -1003,10 +1110,25 @@ err:
 	return r;
 }
 
+static __exit void sev_hardware_unsetup(void)
+{
+	int ret, psp_ret;
+
+	ret = psp_platform_shutdown(&psp_ret);
+	if (ret)
+		printk(KERN_ERR "failed to shutdown PSP rc=%d (%#0x10x)\n",
+		ret, psp_ret);
+
+	kfree(sev_asid_bitmap);
+}
+
 static __exit void svm_hardware_unsetup(void)
 {
 	int cpu;
 
+	if (sev_enabled)
+		sev_hardware_unsetup();
+
 	for_each_possible_cpu(cpu)
 		svm_cpu_uninit(cpu);
 
@@ -1088,6 +1210,11 @@ static void avic_init_vmcb(struct vcpu_svm *svm)
 	svm->vcpu.arch.apicv_active = true;
 }
 
+static void sev_init_vmcb(struct vcpu_svm *svm)
+{
+	svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
+}
+
 static void init_vmcb(struct vcpu_svm *svm)
 {
 	struct vmcb_control_area *control = &svm->vmcb->control;
@@ -1202,6 +1329,10 @@ static void init_vmcb(struct vcpu_svm *svm)
 	if (avic)
 		avic_init_vmcb(svm);
 
+	if (svm_sev_guest())
+		sev_init_vmcb(svm);
+
+
 	mark_all_dirty(svm->vmcb);
 
 	enable_gif(svm);
@@ -1413,6 +1544,14 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
 		avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE);
 }
 
+static void sev_init_vcpu(struct vcpu_svm *svm)
+{
+	if (!svm_sev_guest())
+		return;
+
+	svm_sev_ref();
+}
+
 static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
 {
 	struct vcpu_svm *svm;
@@ -1475,6 +1614,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
 	init_vmcb(svm);
 
 	svm_init_osvw(&svm->vcpu);
+	sev_init_vcpu(svm);
 
 	return &svm->vcpu;
 
@@ -1494,6 +1634,23 @@ out:
 	return ERR_PTR(err);
 }
 
+static void sev_uninit_vcpu(struct vcpu_svm *svm)
+{
+	int cpu;
+	int asid = svm_sev_asid();
+	struct svm_cpu_data *sd;
+
+	if (!svm_sev_guest())
+		return;
+
+	svm_sev_unref();
+
+	for_each_possible_cpu(cpu) {
+		sd = per_cpu(svm_data, cpu);
+		sd->sev_vmcb[asid] = NULL;
+	}
+}
+
 static void svm_free_vcpu(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
@@ -1502,6 +1659,7 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
 	__free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
 	__free_page(virt_to_page(svm->nested.hsave));
 	__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
+	sev_uninit_vcpu(svm);
 	kvm_vcpu_uninit(vcpu);
 	kmem_cache_free(kvm_vcpu_cache, svm);
 }
@@ -1945,6 +2103,11 @@ static int pf_interception(struct vcpu_svm *svm)
 	default:
 		error_code = svm->vmcb->control.exit_info_1;
 
+		/* In SEV mode, the guest physical address will have C-bit
+		 * set. C-bit must be cleared before handling the fault.
+		 */
+		if (svm_sev_guest())
+			fault_address &= ~sme_me_mask;
 		trace_kvm_page_fault(fault_address, error_code);
 		if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
 			kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
@@ -4131,12 +4294,40 @@ static void reload_tss(struct kvm_vcpu *vcpu)
 	load_TR_desc();
 }
 
+static void pre_sev_run(struct vcpu_svm *svm)
+{
+	int asid = svm_sev_asid();
+	int cpu = raw_smp_processor_id();
+	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
+
+	/* Assign the asid allocated for this SEV guest */
+	svm->vmcb->control.asid = svm_sev_asid();
+
+	/* Flush guest TLB:
+	 * - when different VMCB for the same ASID is to be run on the
+	 *   same host CPU
+	 *   or 
+	 * - this VMCB was executed on different host cpu in previous VMRUNs.
+	 */
+	if (sd->sev_vmcb[asid] != (void *)svm->vmcb ||
+		svm->last_cpuid != cpu)
+		svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
+
+	svm->last_cpuid = cpu;
+	sd->sev_vmcb[asid] = (void *)svm->vmcb;
+
+	mark_dirty(svm->vmcb, VMCB_ASID);
+}
+
 static void pre_svm_run(struct vcpu_svm *svm)
 {
 	int cpu = raw_smp_processor_id();
 
 	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
 
+	if (svm_sev_guest())
+		return pre_sev_run(svm);
+
 	/* FIXME: handle wraparound of asid_generation */
 	if (svm->asid_generation != sd->asid_generation)
 		new_asid(svm, sd);
@@ -4985,6 +5176,26 @@ static inline void avic_post_state_restore(struct kvm_vcpu *vcpu)
 	avic_handle_ldr_update(vcpu);
 }
 
+static int sev_asid_new(void)
+{
+	int pos;
+
+	if (!sev_enabled)
+		return -ENOTTY;
+
+	pos = find_first_zero_bit(sev_asid_bitmap, max_sev_asid);
+	if (pos >= max_sev_asid)
+		return -EBUSY;
+
+	set_bit(pos, sev_asid_bitmap);
+	return pos;
+}
+
+static void sev_asid_free(int asid)
+{
+	clear_bit(asid, sev_asid_bitmap);
+}
+
 static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
 	.cpu_has_kvm_support = has_svm,
 	.disabled_by_bios = is_disabled,

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

WARNING: multiple messages have this Message-ID (diff)
From: Brijesh Singh <brijesh.singh@amd.com>
To: simon.guinot@sequanux.org, linux-efi@vger.kernel.org,
	brijesh.singh@amd.com, kvm@vger.kernel.org, rkrcmar@redhat.com,
	matt@codeblueprint.co.uk, linus.walleij@linaro.org,
	linux-mm@kvack.org, paul.gortmaker@windriver.com, hpa@zytor.com,
	dan.j.williams@intel.com, aarcange@redhat.com,
	sfr@canb.auug.org.au, andriy.shevchenko@linux.intel.com,
	herbert@gondor.apana.org.au, bhe@redhat.com, xemul@parallels.com,
	joro@8bytes.org, x86@kernel.org, mingo@redhat.com,
	msalter@redhat.com, ross.zwisler@linux.intel.com, bp@suse.de,
	dyoung@redhat.com, thomas.lendacky@amd.com, jroedel@suse.de,
	keescook@chromium.org, toshi.kani@hpe.com,
	mathieu.desnoyers@efficios.com, devel@linuxdriverproject.org,
	tglx@linutronix.de, mchehab@kernel.org, iamjoonsoo.kim@lge.com,
	labbott@fedoraproject.org, tony.luck@intel.com,
	alexandre.bounine@idt.com, kuleshovmail@gmail.com,
	linux-kernel@vger.kernel.org, mcgrof@kernel.org,
	linux-crypto@vger.kernel.org, pbonzini@redhat.com,
	akpm@linux-foundation.org, davem@davemloft.net
Subject: [RFC PATCH v1 20/28] KVM: SVM: prepare for SEV guest management API support
Date: Mon, 22 Aug 2016 19:28:02 -0400	[thread overview]
Message-ID: <147190848221.9523.931142742439444357.stgit@brijesh-build-machine> (raw)
In-Reply-To: <147190820782.9523.4967724730957229273.stgit@brijesh-build-machine>

The patch adds initial support required for Secure Encrypted
Virtualization (SEV) guest management API's.

ASID management:
 - Reserve asid range for SEV guest, SEV asid range is obtained
   through CPUID Fn8000_001f[ECX]. A non-SEV guest can use any
   asid outside the SEV asid range.
 - SEV guest must have asid value within asid range obtained
   through CPUID.
 - SEV guest must have the same asid for all vcpu's. A TLB flush
   is required if different vcpu for the same ASID is to be run
   on the same host CPU.

- save SEV private structure in kvm_arch.

- If SEV is available then initialize PSP firmware during hardware probe

Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
 arch/x86/include/asm/kvm_host.h |    9 ++
 arch/x86/kvm/svm.c              |  213 +++++++++++++++++++++++++++++++++++++++
 2 files changed, 221 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index b1dd673..9b885fc 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -715,6 +715,12 @@ struct kvm_hv {
 	u64 hv_crash_ctl;
 };
 
+struct kvm_sev_info {
+	unsigned int asid;	/* asid for this guest */
+	unsigned int handle;	/* firmware handle */
+	unsigned int ref_count; /* number of active vcpus */
+};
+
 struct kvm_arch {
 	unsigned int n_used_mmu_pages;
 	unsigned int n_requested_mmu_pages;
@@ -799,6 +805,9 @@ struct kvm_arch {
 
 	bool x2apic_format;
 	bool x2apic_broadcast_quirk_disabled;
+
+	/* struct for SEV guest */
+	struct kvm_sev_info sev_info;
 };
 
 struct kvm_vm_stat {
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index f010b23..dcee635 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -34,6 +34,7 @@
 #include <linux/sched.h>
 #include <linux/trace_events.h>
 #include <linux/slab.h>
+#include <linux/ccp-psp.h>
 
 #include <asm/apic.h>
 #include <asm/perf_event.h>
@@ -186,6 +187,9 @@ struct vcpu_svm {
 	struct page *avic_backing_page;
 	u64 *avic_physical_id_cache;
 	bool avic_is_running;
+
+	/* which host cpu was used for running this vcpu */
+	bool last_cpuid;
 };
 
 #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK	(0xFF)
@@ -243,6 +247,25 @@ static int avic;
 module_param(avic, int, S_IRUGO);
 #endif
 
+/* Secure Encrypted Virtualization */
+static bool sev_enabled;
+static unsigned long max_sev_asid;
+static unsigned long *sev_asid_bitmap;
+
+#define kvm_sev_guest()		(kvm->arch.sev_info.handle)
+#define kvm_sev_handle()	(kvm->arch.sev_info.handle)
+#define kvm_sev_ref()		(kvm->arch.sev_info.ref_count++)
+#define kvm_sev_unref()		(kvm->arch.sev_info.ref_count--)
+#define svm_sev_handle()	(svm->vcpu.kvm->arch.sev_info.handle)
+#define svm_sev_asid()		(svm->vcpu.kvm->arch.sev_info.asid)
+#define svm_sev_ref()		(svm->vcpu.kvm->arch.sev_info.ref_count++)
+#define svm_sev_unref()		(svm->vcpu.kvm->arch.sev_info.ref_count--)
+#define svm_sev_guest()		(svm->vcpu.kvm->arch.sev_info.handle)
+#define svm_sev_ref_count()	(svm->vcpu.kvm->arch.sev_info.ref_count)
+
+static int sev_asid_new(void);
+static void sev_asid_free(int asid);
+
 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
 static void svm_flush_tlb(struct kvm_vcpu *vcpu);
 static void svm_complete_interrupts(struct vcpu_svm *svm);
@@ -474,6 +497,8 @@ struct svm_cpu_data {
 	struct kvm_ldttss_desc *tss_desc;
 
 	struct page *save_area;
+
+	void **sev_vmcb;  /* index = sev_asid, value = vmcb pointer */
 };
 
 static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
@@ -727,7 +752,10 @@ static int svm_hardware_enable(void)
 	sd->asid_generation = 1;
 	sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
 	sd->next_asid = sd->max_asid + 1;
-	sd->min_asid = 1;
+	sd->min_asid = max_sev_asid + 1;
+
+	if (sev_enabled)
+		memset(sd->sev_vmcb, 0, (max_sev_asid + 1) * sizeof(void *));
 
 	native_store_gdt(&gdt_descr);
 	gdt = (struct desc_struct *)gdt_descr.address;
@@ -788,6 +816,7 @@ static void svm_cpu_uninit(int cpu)
 
 	per_cpu(svm_data, raw_smp_processor_id()) = NULL;
 	__free_page(sd->save_area);
+	kfree(sd->sev_vmcb);
 	kfree(sd);
 }
 
@@ -805,6 +834,14 @@ static int svm_cpu_init(int cpu)
 	if (!sd->save_area)
 		goto err_1;
 
+	if (sev_enabled) {
+		sd->sev_vmcb = kmalloc((max_sev_asid + 1) * sizeof(void *),
+					GFP_KERNEL);
+		r = -ENOMEM;
+		if (!sd->sev_vmcb)
+			goto err_1;
+	}
+
 	per_cpu(svm_data, cpu) = sd;
 
 	return 0;
@@ -931,6 +968,74 @@ static void svm_disable_lbrv(struct vcpu_svm *svm)
 	set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
 }
 
+static __init void sev_hardware_setup(void)
+{
+	int ret, psp_ret;
+	struct psp_data_init *init;
+	struct psp_data_status *status;
+
+	/*
+	 * Check SEV Feature Support: Fn8001_001F[EAX]
+	 * 	Bit 1: Secure Memory Virtualization supported
+	 */
+	if (!(cpuid_eax(0x8000001F) & 0x2))
+		return;
+
+	/*
+	 * Get maximum number of encrypted guest supported: Fn8001_001F[ECX]
+	 * 	Bit 31:0: Number of supported guest
+	 */
+	max_sev_asid = cpuid_ecx(0x8000001F);
+	if (!max_sev_asid)
+		return;
+
+	init = kzalloc(sizeof(*init), GFP_KERNEL);
+	if (!init)
+		return;
+
+	status = kzalloc(sizeof(*status), GFP_KERNEL);
+	if (!status)
+		goto err_1;
+
+	/* Initialize PSP firmware */
+	init->hdr.buffer_len = sizeof(*init);
+	init->flags = 0;
+	ret = psp_platform_init(init, &psp_ret);
+	if (ret) {
+		printk(KERN_ERR "SEV: PSP_INIT ret=%d (%#x)\n", ret, psp_ret);
+		goto err_2;
+	}
+
+	/* Initialize SEV ASID bitmap */
+	sev_asid_bitmap = kmalloc(max(sizeof(unsigned long),
+				      max_sev_asid/8 + 1), GFP_KERNEL);
+	if (IS_ERR(sev_asid_bitmap)) {
+		psp_platform_shutdown(&psp_ret);
+		goto err_2;
+	}
+	bitmap_zero(sev_asid_bitmap, max_sev_asid);
+	set_bit(0, sev_asid_bitmap);  /* mark ASID 0 as used */
+
+	sev_enabled = 1;
+	printk(KERN_INFO "kvm: SEV enabled\n");
+
+	/* Query the platform status and print API version */
+	status->hdr.buffer_len = sizeof(*status);
+	ret = psp_platform_status(status, &psp_ret);
+	if (ret) {
+		printk(KERN_ERR "SEV: PLATFORM_STATUS ret=%#x\n", psp_ret);
+		goto err_2;
+	}
+
+	printk(KERN_INFO "SEV API: %d.%d\n",
+			status->api_major, status->api_minor);
+err_2:
+	kfree(status);
+err_1:
+	kfree(init);
+	return;
+}
+
 static __init int svm_hardware_setup(void)
 {
 	int cpu;
@@ -966,6 +1071,8 @@ static __init int svm_hardware_setup(void)
 		kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
 	}
 
+	sev_hardware_setup();
+
 	for_each_possible_cpu(cpu) {
 		r = svm_cpu_init(cpu);
 		if (r)
@@ -1003,10 +1110,25 @@ err:
 	return r;
 }
 
+static __exit void sev_hardware_unsetup(void)
+{
+	int ret, psp_ret;
+
+	ret = psp_platform_shutdown(&psp_ret);
+	if (ret)
+		printk(KERN_ERR "failed to shutdown PSP rc=%d (%#0x10x)\n",
+		ret, psp_ret);
+
+	kfree(sev_asid_bitmap);
+}
+
 static __exit void svm_hardware_unsetup(void)
 {
 	int cpu;
 
+	if (sev_enabled)
+		sev_hardware_unsetup();
+
 	for_each_possible_cpu(cpu)
 		svm_cpu_uninit(cpu);
 
@@ -1088,6 +1210,11 @@ static void avic_init_vmcb(struct vcpu_svm *svm)
 	svm->vcpu.arch.apicv_active = true;
 }
 
+static void sev_init_vmcb(struct vcpu_svm *svm)
+{
+	svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
+}
+
 static void init_vmcb(struct vcpu_svm *svm)
 {
 	struct vmcb_control_area *control = &svm->vmcb->control;
@@ -1202,6 +1329,10 @@ static void init_vmcb(struct vcpu_svm *svm)
 	if (avic)
 		avic_init_vmcb(svm);
 
+	if (svm_sev_guest())
+		sev_init_vmcb(svm);
+
+
 	mark_all_dirty(svm->vmcb);
 
 	enable_gif(svm);
@@ -1413,6 +1544,14 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
 		avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE);
 }
 
+static void sev_init_vcpu(struct vcpu_svm *svm)
+{
+	if (!svm_sev_guest())
+		return;
+
+	svm_sev_ref();
+}
+
 static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
 {
 	struct vcpu_svm *svm;
@@ -1475,6 +1614,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
 	init_vmcb(svm);
 
 	svm_init_osvw(&svm->vcpu);
+	sev_init_vcpu(svm);
 
 	return &svm->vcpu;
 
@@ -1494,6 +1634,23 @@ out:
 	return ERR_PTR(err);
 }
 
+static void sev_uninit_vcpu(struct vcpu_svm *svm)
+{
+	int cpu;
+	int asid = svm_sev_asid();
+	struct svm_cpu_data *sd;
+
+	if (!svm_sev_guest())
+		return;
+
+	svm_sev_unref();
+
+	for_each_possible_cpu(cpu) {
+		sd = per_cpu(svm_data, cpu);
+		sd->sev_vmcb[asid] = NULL;
+	}
+}
+
 static void svm_free_vcpu(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
@@ -1502,6 +1659,7 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
 	__free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
 	__free_page(virt_to_page(svm->nested.hsave));
 	__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
+	sev_uninit_vcpu(svm);
 	kvm_vcpu_uninit(vcpu);
 	kmem_cache_free(kvm_vcpu_cache, svm);
 }
@@ -1945,6 +2103,11 @@ static int pf_interception(struct vcpu_svm *svm)
 	default:
 		error_code = svm->vmcb->control.exit_info_1;
 
+		/* In SEV mode, the guest physical address will have C-bit
+		 * set. C-bit must be cleared before handling the fault.
+		 */
+		if (svm_sev_guest())
+			fault_address &= ~sme_me_mask;
 		trace_kvm_page_fault(fault_address, error_code);
 		if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
 			kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
@@ -4131,12 +4294,40 @@ static void reload_tss(struct kvm_vcpu *vcpu)
 	load_TR_desc();
 }
 
+static void pre_sev_run(struct vcpu_svm *svm)
+{
+	int asid = svm_sev_asid();
+	int cpu = raw_smp_processor_id();
+	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
+
+	/* Assign the asid allocated for this SEV guest */
+	svm->vmcb->control.asid = svm_sev_asid();
+
+	/* Flush guest TLB:
+	 * - when different VMCB for the same ASID is to be run on the
+	 *   same host CPU
+	 *   or 
+	 * - this VMCB was executed on different host cpu in previous VMRUNs.
+	 */
+	if (sd->sev_vmcb[asid] != (void *)svm->vmcb ||
+		svm->last_cpuid != cpu)
+		svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
+
+	svm->last_cpuid = cpu;
+	sd->sev_vmcb[asid] = (void *)svm->vmcb;
+
+	mark_dirty(svm->vmcb, VMCB_ASID);
+}
+
 static void pre_svm_run(struct vcpu_svm *svm)
 {
 	int cpu = raw_smp_processor_id();
 
 	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
 
+	if (svm_sev_guest())
+		return pre_sev_run(svm);
+
 	/* FIXME: handle wraparound of asid_generation */
 	if (svm->asid_generation != sd->asid_generation)
 		new_asid(svm, sd);
@@ -4985,6 +5176,26 @@ static inline void avic_post_state_restore(struct kvm_vcpu *vcpu)
 	avic_handle_ldr_update(vcpu);
 }
 
+static int sev_asid_new(void)
+{
+	int pos;
+
+	if (!sev_enabled)
+		return -ENOTTY;
+
+	pos = find_first_zero_bit(sev_asid_bitmap, max_sev_asid);
+	if (pos >= max_sev_asid)
+		return -EBUSY;
+
+	set_bit(pos, sev_asid_bitmap);
+	return pos;
+}
+
+static void sev_asid_free(int asid)
+{
+	clear_bit(asid, sev_asid_bitmap);
+}
+
 static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
 	.cpu_has_kvm_support = has_svm,
 	.disabled_by_bios = is_disabled,

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2016-08-23  0:01 UTC|newest]

Thread overview: 255+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-08-22 23:23 [RFC PATCH v1 00/28] x86: Secure Encrypted Virtualization (AMD) Brijesh Singh
2016-08-22 23:23 ` Brijesh Singh
2016-08-22 23:23 ` Brijesh Singh
2016-08-22 23:23 ` Brijesh Singh
2016-08-22 23:23 ` [RFC PATCH v1 01/28] kvm: svm: Add support for additional SVM NPF error codes Brijesh Singh
2016-08-22 23:23   ` Brijesh Singh
2016-08-22 23:23   ` Brijesh Singh
2016-08-22 23:23   ` Brijesh Singh
2016-09-13  9:56   ` Borislav Petkov
2016-09-13  9:56     ` Borislav Petkov
2016-09-13  9:56     ` Borislav Petkov
2016-08-22 23:23 ` Brijesh Singh
2016-08-22 23:23 ` [RFC PATCH v1 02/28] kvm: svm: Add kvm_fast_pio_in support Brijesh Singh
2016-08-22 23:23   ` Brijesh Singh
2016-08-22 23:23   ` Brijesh Singh
2016-08-22 23:23   ` Brijesh Singh
2016-09-21 10:58   ` Borislav Petkov
2016-09-21 10:58     ` Borislav Petkov
2016-09-21 10:58     ` Borislav Petkov
2016-08-22 23:23 ` Brijesh Singh
2016-08-22 23:24 ` [RFC PATCH v1 03/28] kvm: svm: Use the hardware provided GPA instead of page walk Brijesh Singh
2016-08-22 23:24   ` Brijesh Singh
2016-08-22 23:24   ` Brijesh Singh
2016-08-22 23:24   ` Brijesh Singh
2016-09-21 17:16   ` Borislav Petkov
2016-09-21 17:16     ` Borislav Petkov
2016-09-21 17:16     ` Borislav Petkov
2016-08-22 23:24 ` Brijesh Singh
2016-08-22 23:24 ` [RFC PATCH v1 04/28] x86: Secure Encrypted Virtualization (SEV) support Brijesh Singh
2016-08-22 23:24 ` Brijesh Singh
2016-08-22 23:24   ` Brijesh Singh
2016-08-22 23:24   ` Brijesh Singh
2016-08-22 23:24   ` Brijesh Singh
2016-09-22 15:00   ` Borislav Petkov
2016-09-22 15:00     ` Borislav Petkov
2016-09-22 15:00     ` Borislav Petkov
2016-08-22 23:24 ` [RFC PATCH v1 05/28] KVM: SVM: prepare for new bit definition in nested_ctl Brijesh Singh
2016-08-22 23:24   ` Brijesh Singh
2016-08-22 23:24   ` Brijesh Singh
2016-08-22 23:24   ` Brijesh Singh
2016-09-22 14:17   ` Borislav Petkov
2016-09-22 14:17     ` Borislav Petkov
2016-09-22 14:17     ` Borislav Petkov
2016-08-22 23:24 ` Brijesh Singh
2016-08-22 23:24 ` [RFC PATCH v1 06/28] KVM: SVM: Add SEV feature definitions to KVM Brijesh Singh
2016-08-22 23:24 ` Brijesh Singh
2016-08-22 23:24   ` Brijesh Singh
2016-08-22 23:24   ` Brijesh Singh
2016-08-22 23:24   ` Brijesh Singh
2016-08-22 23:24 ` [RFC PATCH v1 07/28] x86: Do not encrypt memory areas if SEV is enabled Brijesh Singh
2016-08-22 23:24   ` Brijesh Singh
2016-08-22 23:24   ` Brijesh Singh
2016-08-22 23:24   ` Brijesh Singh
2016-08-22 23:24 ` Brijesh Singh
2016-08-22 23:25 ` [RFC PATCH v1 08/28] Access BOOT related data encrypted with SEV active Brijesh Singh
2016-08-22 23:25   ` Brijesh Singh
2016-08-22 23:25   ` Brijesh Singh
2016-08-22 23:25   ` Brijesh Singh
2016-08-22 23:25 ` Brijesh Singh
2016-08-22 23:25 ` [RFC PATCH v1 09/28] x86/efi: Access EFI data as encrypted when SEV is active Brijesh Singh
2016-08-22 23:25   ` Brijesh Singh
2016-08-22 23:25   ` Brijesh Singh
2016-08-22 23:25   ` Brijesh Singh
2016-09-22 14:35   ` Borislav Petkov
2016-09-22 14:35     ` Borislav Petkov
2016-09-22 14:35     ` Borislav Petkov
2016-09-22 14:35     ` Borislav Petkov
2016-09-22 14:45     ` Paolo Bonzini
2016-09-22 14:45       ` Paolo Bonzini
2016-09-22 14:45       ` Paolo Bonzini
2016-09-22 14:59       ` Borislav Petkov
2016-09-22 14:59         ` Borislav Petkov
2016-09-22 14:59         ` Borislav Petkov
2016-09-22 14:59         ` Borislav Petkov
2016-09-22 15:05         ` Paolo Bonzini
2016-09-22 15:05           ` Paolo Bonzini
2016-09-22 15:05           ` Paolo Bonzini
2016-09-22 17:07           ` Borislav Petkov
2016-09-22 17:07             ` Borislav Petkov
2016-09-22 17:07             ` Borislav Petkov
2016-09-22 17:07             ` Borislav Petkov
2016-09-22 17:08             ` Paolo Bonzini
2016-09-22 17:08               ` Paolo Bonzini
2016-09-22 17:08               ` Paolo Bonzini
2016-09-22 17:08               ` Paolo Bonzini
2016-09-22 17:27               ` Borislav Petkov
2016-09-22 17:27                 ` Borislav Petkov
2016-09-22 17:27                 ` Borislav Petkov
2016-09-22 19:04             ` Tom Lendacky
2016-09-22 19:04               ` Tom Lendacky
2016-09-22 19:04               ` Tom Lendacky
2016-09-22 19:04               ` Tom Lendacky
2016-09-22 19:11               ` Borislav Petkov
2016-09-22 19:11                 ` Borislav Petkov
2016-09-22 19:11                 ` Borislav Petkov
2016-09-22 19:49                 ` Tom Lendacky
2016-09-22 19:49                   ` Tom Lendacky
2016-09-22 19:49                   ` Tom Lendacky
2016-09-22 19:49                   ` Tom Lendacky
2016-09-22 20:10                   ` Borislav Petkov
2016-09-22 20:10                     ` Borislav Petkov
2016-09-22 20:10                     ` Borislav Petkov
2016-09-22 18:59         ` Tom Lendacky
2016-09-22 18:59           ` Tom Lendacky
2016-09-22 18:59           ` Tom Lendacky
2016-09-22 18:59           ` Tom Lendacky
2016-09-22 18:47       ` Tom Lendacky
2016-09-22 18:47         ` Tom Lendacky
2016-09-22 18:47         ` Tom Lendacky
2016-09-22 18:47         ` Tom Lendacky
2016-09-22 18:50         ` Paolo Bonzini
2016-09-22 18:50           ` Paolo Bonzini
2016-09-22 18:50           ` Paolo Bonzini
2016-09-22 17:46     ` Tom Lendacky
2016-09-22 17:46       ` Tom Lendacky
2016-09-22 17:46       ` Tom Lendacky
2016-09-22 17:46       ` Tom Lendacky
2016-09-22 18:23       ` Paolo Bonzini
2016-09-22 18:23         ` Paolo Bonzini
2016-09-22 18:23         ` Paolo Bonzini
2016-09-22 18:37         ` Borislav Petkov
2016-09-22 18:37           ` Borislav Petkov
2016-09-22 18:37           ` Borislav Petkov
     [not found]           ` <20160922183759.7ahw2kbxit3epnzk-fF5Pk5pvG8Y@public.gmane.org>
2016-09-22 18:44             ` Paolo Bonzini
2016-09-22 18:44               ` Paolo Bonzini
2016-09-23  9:33           ` Kai Huang
2016-09-23  9:33             ` Kai Huang
2016-09-23  9:33             ` Kai Huang
2016-09-23  9:50             ` Borislav Petkov
2016-09-23  9:50               ` Borislav Petkov
2016-09-23  9:50               ` Borislav Petkov
2016-08-22 23:25 ` Brijesh Singh
2016-08-22 23:25 ` [RFC PATCH v1 10/28] x86: Change early_ioremap to early_memremap for BOOT data Brijesh Singh
2016-08-22 23:25   ` Brijesh Singh
2016-08-22 23:25   ` Brijesh Singh
2016-08-22 23:25   ` Brijesh Singh
2016-08-22 23:25 ` Brijesh Singh
2016-08-22 23:25 ` [RFC PATCH v1 11/28] x86: Don't decrypt trampoline area if SEV is active Brijesh Singh
2016-08-22 23:25 ` Brijesh Singh
2016-08-22 23:25   ` Brijesh Singh
2016-08-22 23:25   ` Brijesh Singh
2016-08-22 23:25   ` Brijesh Singh
2016-08-22 23:26 ` [RFC PATCH v1 12/28] x86: DMA support for SEV memory encryption Brijesh Singh
2016-08-22 23:26   ` Brijesh Singh
2016-08-22 23:26   ` Brijesh Singh
2016-08-22 23:26   ` Brijesh Singh
2016-08-22 23:26 ` Brijesh Singh
2016-08-22 23:26 ` [RFC PATCH v1 13/28] iommu/amd: AMD IOMMU support for SEV Brijesh Singh
2016-08-22 23:26   ` Brijesh Singh
2016-08-22 23:26   ` Brijesh Singh
2016-08-22 23:26   ` Brijesh Singh
2016-08-22 23:26 ` Brijesh Singh
2016-08-22 23:26 ` [RFC PATCH v1 14/28] x86: Don't set the SME MSR bit when SEV is active Brijesh Singh
2016-08-22 23:26 ` Brijesh Singh
2016-08-22 23:26   ` Brijesh Singh
2016-08-22 23:26   ` Brijesh Singh
2016-08-22 23:26   ` Brijesh Singh
2016-08-22 23:26 ` [RFC PATCH v1 15/28] x86: Unroll string I/O " Brijesh Singh
2016-08-22 23:26 ` Brijesh Singh
2016-08-22 23:26   ` Brijesh Singh
2016-08-22 23:26   ` Brijesh Singh
2016-08-22 23:26   ` Brijesh Singh
2016-08-22 23:26 ` [RFC PATCH v1 16/28] x86: Add support to determine if running with SEV enabled Brijesh Singh
2016-08-22 23:26 ` Brijesh Singh
2016-08-22 23:26   ` Brijesh Singh
2016-08-22 23:26   ` Brijesh Singh
2016-08-22 23:26   ` Brijesh Singh
2016-08-22 23:27 ` [RFC PATCH v1 17/28] KVM: SVM: Enable SEV by setting the SEV_ENABLE cpu feature Brijesh Singh
2016-08-22 23:27 ` Brijesh Singh
2016-08-22 23:27   ` Brijesh Singh
2016-08-22 23:27   ` Brijesh Singh
2016-08-22 23:27   ` Brijesh Singh
2016-08-22 23:27 ` [RFC PATCH v1 18/28] crypto: add AMD Platform Security Processor driver Brijesh Singh
2016-08-22 23:27 ` Brijesh Singh
2016-08-22 23:27   ` Brijesh Singh
2016-08-22 23:27   ` Brijesh Singh
2016-08-22 23:27   ` Brijesh Singh
2016-08-23  7:14   ` Herbert Xu
2016-08-23  7:14     ` Herbert Xu
2016-08-23  7:14     ` Herbert Xu
2016-08-24 12:02     ` Tom Lendacky
2016-08-24 12:02       ` Tom Lendacky
2016-08-24 12:02       ` Tom Lendacky
2016-08-24 12:02       ` Tom Lendacky
2016-08-22 23:27 ` [RFC PATCH v1 19/28] KVM: SVM: prepare to reserve asid for SEV guest Brijesh Singh
2016-08-22 23:27 ` Brijesh Singh
2016-08-22 23:27   ` Brijesh Singh
2016-08-22 23:27   ` Brijesh Singh
2016-08-22 23:27   ` Brijesh Singh
2016-10-13 10:17   ` Paolo Bonzini
2016-10-13 10:17     ` Paolo Bonzini
2016-08-22 23:28 ` [RFC PATCH v1 20/28] KVM: SVM: prepare for SEV guest management API support Brijesh Singh
2016-08-22 23:28 ` Brijesh Singh [this message]
2016-08-22 23:28   ` Brijesh Singh
2016-08-22 23:28   ` Brijesh Singh
2016-08-22 23:28   ` Brijesh Singh
2016-10-13 10:41   ` Paolo Bonzini
2016-08-22 23:28 ` [RFC PATCH v1 21/28] KVM: introduce KVM_SEV_ISSUE_CMD ioctl Brijesh Singh
2016-08-22 23:28   ` Brijesh Singh
2016-08-22 23:28   ` Brijesh Singh
2016-08-22 23:28   ` Brijesh Singh
2016-10-13 10:45   ` Paolo Bonzini
2016-10-13 10:45     ` Paolo Bonzini
2016-10-17 17:57     ` Brijesh Singh
2016-10-17 17:57       ` Brijesh Singh
2016-10-17 17:57       ` Brijesh Singh
2016-10-17 20:14       ` Paolo Bonzini
2016-10-17 20:14         ` Paolo Bonzini
2016-10-17 20:14         ` Paolo Bonzini
2016-10-18 19:32         ` Brijesh Singh
2016-10-18 19:32           ` Brijesh Singh
2016-10-18 19:32           ` Brijesh Singh
2016-10-18 21:44           ` Paolo Bonzini
2016-10-18 21:44             ` Paolo Bonzini
2016-10-18 21:44             ` Paolo Bonzini
2016-08-22 23:28 ` Brijesh Singh
2016-08-22 23:28 ` [RFC PATCH v1 22/28] KVM: SVM: add SEV launch start command Brijesh Singh
2016-08-22 23:28   ` Brijesh Singh
2016-08-22 23:28   ` Brijesh Singh
2016-08-22 23:28   ` Brijesh Singh
2016-10-13 11:12   ` Paolo Bonzini
2016-08-22 23:28 ` Brijesh Singh
2016-08-22 23:28 ` [RFC PATCH v1 23/28] KVM: SVM: add SEV launch update command Brijesh Singh
2016-08-22 23:28   ` Brijesh Singh
2016-08-22 23:28   ` Brijesh Singh
2016-08-22 23:28   ` Brijesh Singh
2016-08-22 23:28 ` Brijesh Singh
2016-08-22 23:28 ` [RFC PATCH v1 24/28] KVM: SVM: add SEV_LAUNCH_FINISH command Brijesh Singh
2016-08-22 23:28 ` Brijesh Singh
2016-08-22 23:28   ` Brijesh Singh
2016-08-22 23:28   ` Brijesh Singh
2016-08-22 23:28   ` Brijesh Singh
2016-10-13 11:16   ` Paolo Bonzini
2016-08-22 23:29 ` [RFC PATCH v1 25/28] KVM: SVM: add KVM_SEV_GUEST_STATUS command Brijesh Singh
2016-08-22 23:29 ` Brijesh Singh
2016-08-22 23:29   ` Brijesh Singh
2016-08-22 23:29   ` Brijesh Singh
2016-08-22 23:29   ` Brijesh Singh
2016-08-22 23:29 ` [RFC PATCH v1 26/28] KVM: SVM: add KVM_SEV_DEBUG_DECRYPT command Brijesh Singh
2016-08-22 23:29 ` Brijesh Singh
2016-08-22 23:29   ` Brijesh Singh
2016-08-22 23:29   ` Brijesh Singh
2016-08-22 23:29   ` Brijesh Singh
2016-08-22 23:29 ` [RFC PATCH v1 27/28] KVM: SVM: add KVM_SEV_DEBUG_ENCRYPT command Brijesh Singh
2016-08-22 23:29   ` Brijesh Singh
2016-08-22 23:29   ` Brijesh Singh
2016-08-22 23:29   ` Brijesh Singh
2016-08-22 23:29 ` [RFC PATCH v1 28/28] KVM: SVM: add command to query SEV API version Brijesh Singh
2016-08-22 23:29   ` Brijesh Singh
2016-08-22 23:29   ` Brijesh Singh
2016-08-22 23:29   ` Brijesh Singh
2016-08-22 23:29 ` Brijesh Singh
2016-10-13 11:19 ` [RFC PATCH v1 00/28] x86: Secure Encrypted Virtualization (AMD) Paolo Bonzini
2016-10-17 13:51   ` Brijesh Singh
2016-10-17 13:51     ` Brijesh Singh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=147190848221.9523.931142742439444357.stgit@brijesh-build-machine \
    --to=brijesh.singh@amd.com \
    --cc=aarcange@redhat.com \
    --cc=akpm@linux-foundation.org \
    --cc=alexandre.bounine@idt.com \
    --cc=andriy.shevchenko@linux.intel.com \
    --cc=bhe@redhat.com \
    --cc=bp@suse.de \
    --cc=dan.j.williams@intel.com \
    --cc=davem@davemloft.net \
    --cc=devel@linuxdriverproject.org \
    --cc=dyoung@redhat.com \
    --cc=herbert@gondor.apana.org.au \
    --cc=hpa@zytor.com \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=joro@8bytes.org \
    --cc=jroedel@suse.de \
    --cc=keescook@chromium.org \
    --cc=kuleshovmail@gmail.com \
    --cc=kvm@vger.kernel.org \
    --cc=labbott@fedoraproject.org \
    --cc=linus.walleij@linaro.org \
    --cc=linux-crypto@vger.kernel.org \
    --cc=linux-efi@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mathieu.desnoyers@efficios.com \
    --cc=matt@codeblueprint.co.uk \
    --cc=mcgrof@kernel.org \
    --cc=mchehab@kernel.org \
    --cc=mingo@redhat.com \
    --cc=msalter@redhat.com \
    --cc=paul.gortmaker@windriver.com \
    --cc=pbonzini@redhat.com \
    --cc=rkrcmar@redhat.com \
    --cc=ross.zwisler@linux.intel.com \
    --cc=sfr@canb.auug.org.au \
    --cc=simon.guinot@sequanux.org \
    --cc=tglx@linutronix.de \
    --cc=thomas.lendacky@amd.com \
    --cc=tony.luck@intel.com \
    --cc=toshi.kani@hpe.com \
    --cc=x86@kernel.org \
    --cc=xemul@parallels.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.