All of lore.kernel.org
 help / color / mirror / Atom feed
From: Joerg Roedel <joro@8bytes.org>
To: x86@kernel.org
Cc: hpa@zytor.com, Andy Lutomirski <luto@kernel.org>,
	Dave Hansen <dave.hansen@linux.intel.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Thomas Hellstrom <thellstrom@vmware.com>,
	Jiri Slaby <jslaby@suse.cz>,
	Dan Williams <dan.j.williams@intel.com>,
	Tom Lendacky <thomas.lendacky@amd.com>,
	Juergen Gross <jgross@suse.com>,
	Kees Cook <keescook@chromium.org>,
	linux-kernel@vger.kernel.org, kvm@vger.kernel.org,
	virtualization@lists.linux-foundation.org,
	Joerg Roedel <joro@8bytes.org>, Joerg Roedel <jroedel@suse.de>
Subject: [PATCH 22/62] x86/sev-es: Add handler for MMIO events
Date: Tue, 11 Feb 2020 14:52:16 +0100	[thread overview]
Message-ID: <20200211135256.24617-23-joro@8bytes.org> (raw)
In-Reply-To: <20200211135256.24617-1-joro@8bytes.org>

From: Tom Lendacky <thomas.lendacky@amd.com>

Add handler for VC exceptions caused by MMIO intercepts. These
intercepts come along as nested page faults on pages with reserved
bits set.

TODO:
	- Add return values of helper functions
	- Check permissions on page-table walks
	- Check data segments
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
[ jroedel@suse.de: Adapt to VC handling framework ]
Co-developed-by: Joerg Roedel <jroedel@suse.de>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
---
 arch/x86/boot/compressed/sev-es.c |   8 +
 arch/x86/include/uapi/asm/svm.h   |   5 +
 arch/x86/kernel/sev-es-shared.c   | 236 ++++++++++++++++++++++++++++++
 3 files changed, 249 insertions(+)

diff --git a/arch/x86/boot/compressed/sev-es.c b/arch/x86/boot/compressed/sev-es.c
index 270a23c05f53..55a78b42a2f2 100644
--- a/arch/x86/boot/compressed/sev-es.c
+++ b/arch/x86/boot/compressed/sev-es.c
@@ -67,6 +67,11 @@ static enum es_result es_read_mem(struct es_em_ctxt *ctxt,
 	return ES_OK;
 }
 
+static phys_addr_t es_slow_virt_to_phys(struct ghcb *ghcb, long vaddr)
+{
+	return (phys_addr_t)vaddr;
+}
+
 #undef __init
 #undef __pa
 #define __init
@@ -121,6 +126,9 @@ void boot_vc_handler(struct pt_regs *regs)
 	case SVM_EXIT_CPUID:
 		result = handle_cpuid(boot_ghcb, &ctxt);
 		break;
+	case SVM_EXIT_NPF:
+		result = handle_mmio(boot_ghcb, &ctxt);
+		break;
 	default:
 		result = ES_UNSUPPORTED;
 		break;
diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h
index c68d1618c9b0..8f36ae021a7f 100644
--- a/arch/x86/include/uapi/asm/svm.h
+++ b/arch/x86/include/uapi/asm/svm.h
@@ -81,6 +81,11 @@
 #define SVM_EXIT_AVIC_INCOMPLETE_IPI		0x401
 #define SVM_EXIT_AVIC_UNACCELERATED_ACCESS	0x402
 
+/* SEV-ES software-defined VMGEXIT events */
+#define SVM_VMGEXIT_MMIO_READ			0x80000001
+#define SVM_VMGEXIT_MMIO_WRITE			0x80000002
+#define SVM_VMGEXIT_UNSUPPORTED_EVENT		0x8000ffff
+
 #define SVM_EXIT_ERR           -1
 
 #define SVM_EXIT_REASONS \
diff --git a/arch/x86/kernel/sev-es-shared.c b/arch/x86/kernel/sev-es-shared.c
index 0f422e3b2077..14693eff9614 100644
--- a/arch/x86/kernel/sev-es-shared.c
+++ b/arch/x86/kernel/sev-es-shared.c
@@ -483,3 +483,239 @@ static enum es_result handle_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
 
 	return ES_OK;
 }
+
+/* Map from x86 register index to pt_regs offset */
+static unsigned long *register_from_idx(struct pt_regs *regs, u8 reg)
+{
+	static int reg2pt_regs[] = {
+		10, 11, 12, 5, 19, 4, 13, 14, 9, 8, 7, 6, 3, 2, 1, 0
+	};
+	unsigned long *regs_array = (unsigned long *)regs;
+
+	if (WARN_ONCE(reg > 15, "register index is not valid: %#hhx\n", reg))
+		return NULL;
+
+	return &regs_array[reg2pt_regs[reg]];
+}
+
+static u64 insn_get_eff_addr(struct es_em_ctxt *ctxt)
+{
+	u64 effective_addr;
+	u8 mod, rm;
+
+	if (!ctxt->insn.modrm.nbytes)
+		return 0;
+
+	if (insn_rip_relative(&ctxt->insn))
+		return ctxt->regs->ip + ctxt->insn.displacement.value;
+
+	mod = X86_MODRM_MOD(ctxt->insn.modrm.value);
+	rm = X86_MODRM_RM(ctxt->insn.modrm.value);
+
+	if (ctxt->insn.rex_prefix.nbytes &&
+	    X86_REX_B(ctxt->insn.rex_prefix.value))
+		rm |= 0x8;
+
+	if (mod == 3)
+		return *register_from_idx(ctxt->regs, rm);
+
+	switch (mod) {
+	case 1:
+	case 2:
+		effective_addr = ctxt->insn.displacement.value;
+		break;
+	default:
+		effective_addr = 0;
+	}
+
+	if (ctxt->insn.sib.nbytes) {
+		u8 scale, index, base;
+
+		scale = X86_SIB_SCALE(ctxt->insn.sib.value);
+		index = X86_SIB_INDEX(ctxt->insn.sib.value);
+		base = X86_SIB_BASE(ctxt->insn.sib.value);
+		if (ctxt->insn.rex_prefix.nbytes &&
+		    X86_REX_X(ctxt->insn.rex_prefix.value))
+			index |= 0x8;
+		if (ctxt->insn.rex_prefix.nbytes &&
+		    X86_REX_B(ctxt->insn.rex_prefix.value))
+			base |= 0x8;
+
+		if (index != 4)
+			effective_addr += (*register_from_idx(ctxt->regs, index)
+					   << scale);
+
+		if ((base != 5) || mod)
+			effective_addr += *register_from_idx(ctxt->regs, base);
+		else
+			effective_addr += ctxt->insn.displacement.value;
+	} else {
+		effective_addr += *register_from_idx(ctxt->regs, rm);
+	}
+
+	return effective_addr;
+}
+
+static unsigned long *insn_get_reg(struct es_em_ctxt *ctxt)
+{
+	u8 reg;
+
+	if (!ctxt->insn.modrm.nbytes)
+		return NULL;
+
+	reg = X86_MODRM_REG(ctxt->insn.modrm.value);
+	if (ctxt->insn.rex_prefix.nbytes &&
+	    X86_REX_R(ctxt->insn.rex_prefix.value))
+		reg |= 0x8;
+
+	return register_from_idx(ctxt->regs, reg);
+}
+
+static enum es_result do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
+			      unsigned int bytes, bool read)
+{
+	u64 exit_code, exit_info_1, exit_info_2;
+	unsigned long ghcb_pa = __pa(ghcb);
+
+	/* Register-direct addressing mode not supported with MMIO */
+	if (X86_MODRM_MOD(ctxt->insn.modrm.value) == 3)
+		return ES_UNSUPPORTED;
+
+	exit_code = read ? SVM_VMGEXIT_MMIO_READ : SVM_VMGEXIT_MMIO_WRITE;
+
+	exit_info_1 = insn_get_eff_addr(ctxt);
+	exit_info_1 = es_slow_virt_to_phys(ghcb, exit_info_1);
+	exit_info_2 = bytes;    /* Can never be greater than 8 */
+
+	ghcb->save.sw_scratch = ghcb_pa + offsetof(struct ghcb, shared_buffer);
+
+	return ghcb_hv_call(ghcb, ctxt, exit_code, exit_info_1, exit_info_2);
+}
+
+static enum es_result handle_mmio_twobyte_ops(struct ghcb *ghcb,
+					      struct es_em_ctxt *ctxt)
+{
+	struct insn *insn = &ctxt->insn;
+	unsigned int bytes = 0;
+	enum es_result ret;
+	int sign_byte;
+	long *reg_data;
+
+	switch (insn->opcode.bytes[1]) {
+		/* MMIO Read w/ zero-extension */
+	case 0xb6:
+		bytes = 1;
+		/* Fallthrough */
+	case 0xb7:
+		if (!bytes)
+			bytes = 2;
+
+		ret = do_mmio(ghcb, ctxt, bytes, true);
+		if (ret)
+			break;
+
+		/* Zero extend based on operand size */
+		reg_data = insn_get_reg(ctxt);
+		memset(reg_data, 0, insn->opnd_bytes);
+
+		memcpy(reg_data, ghcb->shared_buffer, bytes);
+		break;
+
+		/* MMIO Read w/ sign-extension */
+	case 0xbe:
+		bytes = 1;
+		/* Fallthrough */
+	case 0xbf:
+		if (!bytes)
+			bytes = 2;
+
+		ret = do_mmio(ghcb, ctxt, bytes, true);
+		if (ret)
+			break;
+
+		/* Sign extend based on operand size */
+		reg_data = insn_get_reg(ctxt);
+		if (bytes == 1) {
+			u8 *val = (u8 *)ghcb->shared_buffer;
+
+			sign_byte = (*val & 0x80) ? 0x00 : 0xff;
+		} else {
+			u16 *val = (u16 *)ghcb->shared_buffer;
+
+			sign_byte = (*val & 0x8000) ? 0x00 : 0xff;
+		}
+		memset(reg_data, sign_byte, insn->opnd_bytes);
+
+		memcpy(reg_data, ghcb->shared_buffer, bytes);
+		break;
+
+	default:
+		ret = ES_UNSUPPORTED;
+	}
+
+	return ret;
+}
+
+static enum es_result handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
+{
+	struct insn *insn = &ctxt->insn;
+	unsigned int bytes = 0;
+	enum es_result ret;
+	long *reg_data;
+
+	switch (insn->opcode.bytes[0]) {
+	/* MMIO Write */
+	case 0x88:
+		bytes = 1;
+		/* Fallthrough */
+	case 0x89:
+		if (!bytes)
+			bytes = insn->opnd_bytes;
+
+		reg_data = insn_get_reg(ctxt);
+		memcpy(ghcb->shared_buffer, reg_data, bytes);
+
+		ret = do_mmio(ghcb, ctxt, bytes, false);
+		break;
+
+	case 0xc6:
+		bytes = 1;
+		/* Fallthrough */
+	case 0xc7:
+		if (!bytes)
+			bytes = insn->opnd_bytes;
+
+		memcpy(ghcb->shared_buffer, insn->immediate1.bytes, bytes);
+
+		ret = do_mmio(ghcb, ctxt, bytes, false);
+		break;
+
+		/* MMIO Read */
+	case 0x8a:
+		bytes = 1;
+		/* Fallthrough */
+	case 0x8b:
+		if (!bytes)
+			bytes = insn->opnd_bytes;
+
+		ret = do_mmio(ghcb, ctxt, bytes, true);
+		if (ret)
+			break;
+
+		reg_data = insn_get_reg(ctxt);
+		if (bytes == 4)
+			*reg_data = 0;  /* Zero-extend for 32-bit operation */
+
+		memcpy(reg_data, ghcb->shared_buffer, bytes);
+		break;
+
+		/* Two-Byte Opcodes */
+	case 0x0f:
+		ret = handle_mmio_twobyte_ops(ghcb, ctxt);
+		break;
+	default:
+		ret = ES_UNSUPPORTED;
+	}
+
+	return ret;
+}
-- 
2.17.1


WARNING: multiple messages have this Message-ID (diff)
From: Joerg Roedel <joro@8bytes.org>
To: x86@kernel.org
Cc: Juergen Gross <jgross@suse.com>,
	Tom Lendacky <thomas.lendacky@amd.com>,
	Thomas Hellstrom <thellstrom@vmware.com>,
	Joerg Roedel <jroedel@suse.de>, Kees Cook <keescook@chromium.org>,
	kvm@vger.kernel.org, Peter Zijlstra <peterz@infradead.org>,
	Joerg Roedel <joro@8bytes.org>,
	Dave Hansen <dave.hansen@linux.intel.com>,
	linux-kernel@vger.kernel.org,
	virtualization@lists.linux-foundation.org,
	Andy Lutomirski <luto@kernel.org>,
	hpa@zytor.com, Dan Williams <dan.j.williams@intel.com>,
	Jiri Slaby <jslaby@suse.cz>
Subject: [PATCH 22/62] x86/sev-es: Add handler for MMIO events
Date: Tue, 11 Feb 2020 14:52:16 +0100	[thread overview]
Message-ID: <20200211135256.24617-23-joro@8bytes.org> (raw)
In-Reply-To: <20200211135256.24617-1-joro@8bytes.org>

From: Tom Lendacky <thomas.lendacky@amd.com>

Add handler for VC exceptions caused by MMIO intercepts. These
intercepts come along as nested page faults on pages with reserved
bits set.

TODO:
	- Add return values of helper functions
	- Check permissions on page-table walks
	- Check data segments
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
[ jroedel@suse.de: Adapt to VC handling framework ]
Co-developed-by: Joerg Roedel <jroedel@suse.de>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
---
 arch/x86/boot/compressed/sev-es.c |   8 +
 arch/x86/include/uapi/asm/svm.h   |   5 +
 arch/x86/kernel/sev-es-shared.c   | 236 ++++++++++++++++++++++++++++++
 3 files changed, 249 insertions(+)

diff --git a/arch/x86/boot/compressed/sev-es.c b/arch/x86/boot/compressed/sev-es.c
index 270a23c05f53..55a78b42a2f2 100644
--- a/arch/x86/boot/compressed/sev-es.c
+++ b/arch/x86/boot/compressed/sev-es.c
@@ -67,6 +67,11 @@ static enum es_result es_read_mem(struct es_em_ctxt *ctxt,
 	return ES_OK;
 }
 
+static phys_addr_t es_slow_virt_to_phys(struct ghcb *ghcb, long vaddr)
+{
+	return (phys_addr_t)vaddr;
+}
+
 #undef __init
 #undef __pa
 #define __init
@@ -121,6 +126,9 @@ void boot_vc_handler(struct pt_regs *regs)
 	case SVM_EXIT_CPUID:
 		result = handle_cpuid(boot_ghcb, &ctxt);
 		break;
+	case SVM_EXIT_NPF:
+		result = handle_mmio(boot_ghcb, &ctxt);
+		break;
 	default:
 		result = ES_UNSUPPORTED;
 		break;
diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h
index c68d1618c9b0..8f36ae021a7f 100644
--- a/arch/x86/include/uapi/asm/svm.h
+++ b/arch/x86/include/uapi/asm/svm.h
@@ -81,6 +81,11 @@
 #define SVM_EXIT_AVIC_INCOMPLETE_IPI		0x401
 #define SVM_EXIT_AVIC_UNACCELERATED_ACCESS	0x402
 
+/* SEV-ES software-defined VMGEXIT events */
+#define SVM_VMGEXIT_MMIO_READ			0x80000001
+#define SVM_VMGEXIT_MMIO_WRITE			0x80000002
+#define SVM_VMGEXIT_UNSUPPORTED_EVENT		0x8000ffff
+
 #define SVM_EXIT_ERR           -1
 
 #define SVM_EXIT_REASONS \
diff --git a/arch/x86/kernel/sev-es-shared.c b/arch/x86/kernel/sev-es-shared.c
index 0f422e3b2077..14693eff9614 100644
--- a/arch/x86/kernel/sev-es-shared.c
+++ b/arch/x86/kernel/sev-es-shared.c
@@ -483,3 +483,239 @@ static enum es_result handle_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
 
 	return ES_OK;
 }
+
+/* Map from x86 register index to pt_regs offset */
+static unsigned long *register_from_idx(struct pt_regs *regs, u8 reg)
+{
+	static int reg2pt_regs[] = {
+		10, 11, 12, 5, 19, 4, 13, 14, 9, 8, 7, 6, 3, 2, 1, 0
+	};
+	unsigned long *regs_array = (unsigned long *)regs;
+
+	if (WARN_ONCE(reg > 15, "register index is not valid: %#hhx\n", reg))
+		return NULL;
+
+	return &regs_array[reg2pt_regs[reg]];
+}
+
+static u64 insn_get_eff_addr(struct es_em_ctxt *ctxt)
+{
+	u64 effective_addr;
+	u8 mod, rm;
+
+	if (!ctxt->insn.modrm.nbytes)
+		return 0;
+
+	if (insn_rip_relative(&ctxt->insn))
+		return ctxt->regs->ip + ctxt->insn.displacement.value;
+
+	mod = X86_MODRM_MOD(ctxt->insn.modrm.value);
+	rm = X86_MODRM_RM(ctxt->insn.modrm.value);
+
+	if (ctxt->insn.rex_prefix.nbytes &&
+	    X86_REX_B(ctxt->insn.rex_prefix.value))
+		rm |= 0x8;
+
+	if (mod == 3)
+		return *register_from_idx(ctxt->regs, rm);
+
+	switch (mod) {
+	case 1:
+	case 2:
+		effective_addr = ctxt->insn.displacement.value;
+		break;
+	default:
+		effective_addr = 0;
+	}
+
+	if (ctxt->insn.sib.nbytes) {
+		u8 scale, index, base;
+
+		scale = X86_SIB_SCALE(ctxt->insn.sib.value);
+		index = X86_SIB_INDEX(ctxt->insn.sib.value);
+		base = X86_SIB_BASE(ctxt->insn.sib.value);
+		if (ctxt->insn.rex_prefix.nbytes &&
+		    X86_REX_X(ctxt->insn.rex_prefix.value))
+			index |= 0x8;
+		if (ctxt->insn.rex_prefix.nbytes &&
+		    X86_REX_B(ctxt->insn.rex_prefix.value))
+			base |= 0x8;
+
+		if (index != 4)
+			effective_addr += (*register_from_idx(ctxt->regs, index)
+					   << scale);
+
+		if ((base != 5) || mod)
+			effective_addr += *register_from_idx(ctxt->regs, base);
+		else
+			effective_addr += ctxt->insn.displacement.value;
+	} else {
+		effective_addr += *register_from_idx(ctxt->regs, rm);
+	}
+
+	return effective_addr;
+}
+
+static unsigned long *insn_get_reg(struct es_em_ctxt *ctxt)
+{
+	u8 reg;
+
+	if (!ctxt->insn.modrm.nbytes)
+		return NULL;
+
+	reg = X86_MODRM_REG(ctxt->insn.modrm.value);
+	if (ctxt->insn.rex_prefix.nbytes &&
+	    X86_REX_R(ctxt->insn.rex_prefix.value))
+		reg |= 0x8;
+
+	return register_from_idx(ctxt->regs, reg);
+}
+
+static enum es_result do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
+			      unsigned int bytes, bool read)
+{
+	u64 exit_code, exit_info_1, exit_info_2;
+	unsigned long ghcb_pa = __pa(ghcb);
+
+	/* Register-direct addressing mode not supported with MMIO */
+	if (X86_MODRM_MOD(ctxt->insn.modrm.value) == 3)
+		return ES_UNSUPPORTED;
+
+	exit_code = read ? SVM_VMGEXIT_MMIO_READ : SVM_VMGEXIT_MMIO_WRITE;
+
+	exit_info_1 = insn_get_eff_addr(ctxt);
+	exit_info_1 = es_slow_virt_to_phys(ghcb, exit_info_1);
+	exit_info_2 = bytes;    /* Can never be greater than 8 */
+
+	ghcb->save.sw_scratch = ghcb_pa + offsetof(struct ghcb, shared_buffer);
+
+	return ghcb_hv_call(ghcb, ctxt, exit_code, exit_info_1, exit_info_2);
+}
+
+static enum es_result handle_mmio_twobyte_ops(struct ghcb *ghcb,
+					      struct es_em_ctxt *ctxt)
+{
+	struct insn *insn = &ctxt->insn;
+	unsigned int bytes = 0;
+	enum es_result ret;
+	int sign_byte;
+	long *reg_data;
+
+	switch (insn->opcode.bytes[1]) {
+		/* MMIO Read w/ zero-extension */
+	case 0xb6:
+		bytes = 1;
+		/* Fallthrough */
+	case 0xb7:
+		if (!bytes)
+			bytes = 2;
+
+		ret = do_mmio(ghcb, ctxt, bytes, true);
+		if (ret)
+			break;
+
+		/* Zero extend based on operand size */
+		reg_data = insn_get_reg(ctxt);
+		memset(reg_data, 0, insn->opnd_bytes);
+
+		memcpy(reg_data, ghcb->shared_buffer, bytes);
+		break;
+
+		/* MMIO Read w/ sign-extension */
+	case 0xbe:
+		bytes = 1;
+		/* Fallthrough */
+	case 0xbf:
+		if (!bytes)
+			bytes = 2;
+
+		ret = do_mmio(ghcb, ctxt, bytes, true);
+		if (ret)
+			break;
+
+		/* Sign extend based on operand size */
+		reg_data = insn_get_reg(ctxt);
+		if (bytes == 1) {
+			u8 *val = (u8 *)ghcb->shared_buffer;
+
+			sign_byte = (*val & 0x80) ? 0x00 : 0xff;
+		} else {
+			u16 *val = (u16 *)ghcb->shared_buffer;
+
+			sign_byte = (*val & 0x8000) ? 0x00 : 0xff;
+		}
+		memset(reg_data, sign_byte, insn->opnd_bytes);
+
+		memcpy(reg_data, ghcb->shared_buffer, bytes);
+		break;
+
+	default:
+		ret = ES_UNSUPPORTED;
+	}
+
+	return ret;
+}
+
+static enum es_result handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
+{
+	struct insn *insn = &ctxt->insn;
+	unsigned int bytes = 0;
+	enum es_result ret;
+	long *reg_data;
+
+	switch (insn->opcode.bytes[0]) {
+	/* MMIO Write */
+	case 0x88:
+		bytes = 1;
+		/* Fallthrough */
+	case 0x89:
+		if (!bytes)
+			bytes = insn->opnd_bytes;
+
+		reg_data = insn_get_reg(ctxt);
+		memcpy(ghcb->shared_buffer, reg_data, bytes);
+
+		ret = do_mmio(ghcb, ctxt, bytes, false);
+		break;
+
+	case 0xc6:
+		bytes = 1;
+		/* Fallthrough */
+	case 0xc7:
+		if (!bytes)
+			bytes = insn->opnd_bytes;
+
+		memcpy(ghcb->shared_buffer, insn->immediate1.bytes, bytes);
+
+		ret = do_mmio(ghcb, ctxt, bytes, false);
+		break;
+
+		/* MMIO Read */
+	case 0x8a:
+		bytes = 1;
+		/* Fallthrough */
+	case 0x8b:
+		if (!bytes)
+			bytes = insn->opnd_bytes;
+
+		ret = do_mmio(ghcb, ctxt, bytes, true);
+		if (ret)
+			break;
+
+		reg_data = insn_get_reg(ctxt);
+		if (bytes == 4)
+			*reg_data = 0;  /* Zero-extend for 32-bit operation */
+
+		memcpy(reg_data, ghcb->shared_buffer, bytes);
+		break;
+
+		/* Two-Byte Opcodes */
+	case 0x0f:
+		ret = handle_mmio_twobyte_ops(ghcb, ctxt);
+		break;
+	default:
+		ret = ES_UNSUPPORTED;
+	}
+
+	return ret;
+}
-- 
2.17.1

  parent reply	other threads:[~2020-02-11 13:57 UTC|newest]

Thread overview: 144+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-02-11 13:51 [RFC PATCH 00/62] Linux as SEV-ES Guest Support Joerg Roedel
2020-02-11 13:51 ` Joerg Roedel
2020-02-11 13:51 ` [PATCH 01/62] KVM: SVM: Add GHCB definitions Joerg Roedel
2020-02-11 13:51   ` Joerg Roedel
2020-02-11 13:51 ` [PATCH 02/62] KVM: SVM: Add GHCB Accessor functions Joerg Roedel
2020-02-11 13:51   ` Joerg Roedel
2020-02-11 13:51 ` [PATCH 03/62] x86/cpufeatures: Add SEV-ES CPU feature Joerg Roedel
2020-02-13  6:51   ` Borislav Petkov
2020-02-11 13:51 ` [PATCH 04/62] x86/traps: Move some definitions to <asm/trap_defs.h> Joerg Roedel
2020-02-11 13:51 ` [PATCH 05/62] x86/insn-decoder: Make inat-tables.c suitable for pre-decompression code Joerg Roedel
2020-02-11 13:52 ` [PATCH 06/62] x86/boot/compressed: Fix debug_puthex() parameter type Joerg Roedel
2020-02-11 13:52   ` Joerg Roedel
2020-02-11 13:52 ` [PATCH 07/62] x86/boot/compressed/64: Disable red-zone usage Joerg Roedel
2020-02-11 22:13   ` Andy Lutomirski
2020-02-11 13:52 ` [PATCH 08/62] x86/boot/compressed/64: Add IDT Infrastructure Joerg Roedel
2020-02-11 22:18   ` Andy Lutomirski
2020-02-12 11:19     ` Joerg Roedel
2020-02-14 19:40   ` Andi Kleen
2020-02-15 12:32     ` Joerg Roedel
2020-02-15 12:32       ` Joerg Roedel
2020-02-11 13:52 ` [PATCH 09/62] x86/boot/compressed/64: Rename kaslr_64.c to ident_map_64.c Joerg Roedel
2020-02-11 13:52 ` [PATCH 10/62] x86/boot/compressed/64: Add page-fault handler Joerg Roedel
2020-02-11 13:52   ` Joerg Roedel
2020-02-11 13:52 ` [PATCH 11/62] x86/boot/compressed/64: Always switch to own page-table Joerg Roedel
2020-02-11 13:52   ` Joerg Roedel
2020-02-11 13:52 ` [PATCH 12/62] x86/boot/compressed/64: Don't pre-map memory in KASLR code Joerg Roedel
2020-02-11 13:52   ` Joerg Roedel
2020-02-11 13:52 ` [PATCH 13/62] x86/boot/compressed/64: Change add_identity_map() to take start and end Joerg Roedel
2020-02-11 13:52 ` [PATCH 14/62] x86/boot/compressed/64: Add stage1 #VC handler Joerg Roedel
2020-02-11 13:52   ` Joerg Roedel
2020-02-11 22:23   ` Andy Lutomirski
2020-02-12 11:38     ` Joerg Roedel
2020-02-12 16:22       ` Andy Lutomirski
2020-02-11 13:52 ` [PATCH 15/62] x86/boot/compressed/64: Call set_sev_encryption_mask earlier Joerg Roedel
2020-02-11 13:52   ` Joerg Roedel
2020-02-11 13:52 ` [PATCH 16/62] x86/boot/compressed/64: Check return value of kernel_ident_mapping_init() Joerg Roedel
2020-02-11 13:52   ` Joerg Roedel
2020-02-11 13:52 ` [PATCH 17/62] x86/boot/compressed/64: Add function to map a page unencrypted Joerg Roedel
2020-02-11 13:52   ` Joerg Roedel
2020-02-11 13:52 ` [PATCH 18/62] x86/boot/compressed/64: Setup GHCB Based VC Exception handler Joerg Roedel
2020-02-11 13:52   ` Joerg Roedel
2020-02-11 22:25   ` Andy Lutomirski
2020-02-12 11:44     ` Joerg Roedel
2020-02-11 13:52 ` [PATCH 19/62] x86/sev-es: Add support for handling IOIO exceptions Joerg Roedel
2020-02-11 13:52   ` Joerg Roedel
2020-02-11 22:28   ` Andy Lutomirski
2020-02-12 11:49     ` Joerg Roedel
2020-02-11 13:52 ` [PATCH 20/62] x86/fpu: Move xgetbv()/xsetbv() into separate header Joerg Roedel
2020-02-11 13:52 ` [PATCH 21/62] x86/sev-es: Add CPUID handling to #VC handler Joerg Roedel
2020-02-11 13:52 ` Joerg Roedel [this message]
2020-02-11 13:52   ` [PATCH 22/62] x86/sev-es: Add handler for MMIO events Joerg Roedel
2020-02-11 13:52 ` [PATCH 23/62] x86/idt: Move IDT to data segment Joerg Roedel
2020-02-11 13:52   ` Joerg Roedel
2020-02-11 22:41   ` Andy Lutomirski
2020-02-12 11:55     ` Joerg Roedel
2020-02-12 16:23       ` Andy Lutomirski
2020-02-12 16:28         ` Jürgen Groß
2020-02-19 10:42           ` Joerg Roedel
2020-02-19 10:47             ` Jürgen Groß
2020-02-11 13:52 ` [PATCH 24/62] x86/idt: Split idt_data setup out of set_intr_gate() Joerg Roedel
2020-02-11 13:52   ` Joerg Roedel
2020-02-11 13:52 ` [PATCH 25/62] x86/head/64: Install boot GDT Joerg Roedel
2020-02-11 13:52   ` Joerg Roedel
2020-02-11 22:29   ` Andy Lutomirski
2020-02-12 12:20     ` Joerg Roedel
2020-02-11 13:52 ` [PATCH 26/62] x86/head/64: Reload GDT after switch to virtual addresses Joerg Roedel
2020-02-11 13:52   ` Joerg Roedel
2020-02-11 13:52 ` [PATCH 27/62] x86/head/64: Load segment registers earlier Joerg Roedel
2020-02-11 13:52   ` Joerg Roedel
2020-02-11 13:52 ` [PATCH 28/62] x86/head/64: Switch to initial stack earlier Joerg Roedel
2020-02-11 13:52   ` Joerg Roedel
2020-02-11 13:52 ` [PATCH 29/62] x86/head/64: Load IDT earlier Joerg Roedel
2020-02-11 13:52   ` Joerg Roedel
2020-02-11 13:52 ` [PATCH 30/62] x86/head/64: Move early exception dispatch to C code Joerg Roedel
2020-02-11 13:52   ` Joerg Roedel
2020-02-11 22:44   ` Andy Lutomirski
2020-02-12 12:39     ` Joerg Roedel
2020-02-11 13:52 ` [PATCH 31/62] x86/sev-es: Add SEV-ES Feature Detection Joerg Roedel
2020-02-11 13:52   ` Joerg Roedel
2020-02-11 13:52 ` [PATCH 32/62] x86/sev-es: Compile early handler code into kernel image Joerg Roedel
2020-02-11 13:52   ` Joerg Roedel
2020-02-11 13:52 ` [PATCH 33/62] x86/sev-es: Setup early #VC handler Joerg Roedel
2020-02-11 13:52 ` [PATCH 34/62] x86/sev-es: Setup GHCB based boot " Joerg Roedel
2020-02-11 13:52   ` Joerg Roedel
2020-02-11 13:52 ` [PATCH 35/62] x86/sev-es: Setup per-cpu GHCBs for the runtime handler Joerg Roedel
2020-02-11 13:52   ` Joerg Roedel
2020-02-11 22:46   ` Andy Lutomirski
2020-02-12 15:16     ` Joerg Roedel
2020-02-11 13:52 ` [PATCH 36/62] x86/sev-es: Add Runtime #VC Exception Handler Joerg Roedel
2020-02-11 13:52   ` Joerg Roedel
2020-02-11 13:52 ` [PATCH 37/62] x86/sev-es: Wire up existing #VC exit-code handlers Joerg Roedel
2020-02-11 13:52   ` Joerg Roedel
2020-02-11 13:52 ` [PATCH 38/62] x86/sev-es: Handle instruction fetches from user-space Joerg Roedel
2020-02-12 21:42   ` Andy Lutomirski
2020-03-13  9:12     ` Joerg Roedel
2020-03-17 21:34       ` Andy Lutomirski
2020-02-11 13:52 ` [PATCH 39/62] x86/sev-es: Harden runtime #VC handler for exceptions " Joerg Roedel
2020-02-11 13:52   ` Joerg Roedel
2020-02-11 22:47   ` Andy Lutomirski
2020-02-12 13:16     ` Joerg Roedel
2020-02-11 13:52 ` [PATCH 40/62] x86/sev-es: Filter exceptions not supported " Joerg Roedel
2020-02-11 13:52 ` [PATCH 41/62] x86/sev-es: Handle MSR events Joerg Roedel
2020-02-13 15:45   ` Dave Hansen
2020-02-14  7:23     ` Joerg Roedel
2020-02-14 16:59       ` Dave Hansen
2020-02-14 16:59         ` Dave Hansen
2020-02-15 12:45         ` Joerg Roedel
2020-02-11 13:52 ` [PATCH 42/62] x86/sev-es: Handle DR7 read/write events Joerg Roedel
2020-02-11 13:52 ` [PATCH 43/62] x86/sev-es: Handle WBINVD Events Joerg Roedel
2020-02-11 13:52 ` [PATCH 44/62] x86/sev-es: Handle RDTSC Events Joerg Roedel
2020-02-11 13:52 ` [PATCH 45/62] x86/sev-es: Handle RDPMC Events Joerg Roedel
2020-02-11 13:52 ` [PATCH 46/62] x86/sev-es: Handle INVD Events Joerg Roedel
2020-02-12  0:12   ` Andy Lutomirski
2020-02-12  0:12     ` Andy Lutomirski
2020-02-12 15:36     ` Joerg Roedel
2020-02-12 15:36       ` Joerg Roedel
2020-02-11 13:52 ` [PATCH 47/62] x86/sev-es: Handle RDTSCP Events Joerg Roedel
2020-02-11 13:52 ` [PATCH 48/62] x86/sev-es: Handle MONITOR/MONITORX Events Joerg Roedel
2020-02-11 13:52 ` [PATCH 49/62] x86/sev-es: Handle MWAIT/MWAITX Events Joerg Roedel
2020-02-11 13:52 ` [PATCH 50/62] x86/sev-es: Handle VMMCALL Events Joerg Roedel
2020-02-12  0:14   ` Andy Lutomirski
2020-02-12 13:22     ` Joerg Roedel
2020-02-11 13:52 ` [PATCH 51/62] x86/sev-es: Handle #AC Events Joerg Roedel
2020-02-11 13:52 ` [PATCH 52/62] x86/sev-es: Handle #DB Events Joerg Roedel
2020-02-11 13:52 ` [PATCH 53/62] x86/paravirt: Allow hypervisor specific VMMCALL handling under SEV-ES Joerg Roedel
2020-02-11 13:52 ` [PATCH 54/62] x86/kvm: Add KVM " Joerg Roedel
2020-02-11 13:52 ` [PATCH 55/62] x86/vmware: Add VMware specific handling for VMMCALL " Joerg Roedel
2020-02-17 10:49   ` kbuild test robot
2020-02-11 13:52 ` [PATCH 56/62] x86/realmode: Add SEV-ES specific trampoline entry point Joerg Roedel
2020-02-11 13:52 ` [PATCH 57/62] x86/realmode: Setup AP jump table Joerg Roedel
2020-02-11 13:52 ` [PATCH 58/62] x86/head/64: Don't call verify_cpu() on starting APs Joerg Roedel
2020-02-11 13:52 ` [PATCH 59/62] x86/head/64: Rename start_cpu0 Joerg Roedel
2020-02-11 13:52 ` [PATCH 60/62] x86/sev-es: Support CPU offline/online Joerg Roedel
2020-02-17 11:03   ` kbuild test robot
2020-02-11 13:52 ` [PATCH 61/62] x86/cpufeature: Add SEV_ES_GUEST CPU Feature Joerg Roedel
2020-02-11 13:52 ` [PATCH 62/62] x86/sev-es: Add NMI state tracking Joerg Roedel
2020-02-11 22:50   ` Andy Lutomirski
2020-02-12 13:56     ` Joerg Roedel
2020-02-11 14:50 ` [RFC PATCH 00/62] Linux as SEV-ES Guest Support Peter Zijlstra
2020-02-11 15:43   ` Joerg Roedel
2020-02-11 22:12     ` Andy Lutomirski
2020-02-12 13:54       ` Joerg Roedel
2020-02-12  3:48 ` Andy Lutomirski
2020-02-12 13:59   ` Joerg Roedel

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200211135256.24617-23-joro@8bytes.org \
    --to=joro@8bytes.org \
    --cc=dan.j.williams@intel.com \
    --cc=dave.hansen@linux.intel.com \
    --cc=hpa@zytor.com \
    --cc=jgross@suse.com \
    --cc=jroedel@suse.de \
    --cc=jslaby@suse.cz \
    --cc=keescook@chromium.org \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=luto@kernel.org \
    --cc=peterz@infradead.org \
    --cc=thellstrom@vmware.com \
    --cc=thomas.lendacky@amd.com \
    --cc=virtualization@lists.linux-foundation.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.