From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-9.8 required=3.0 tests=HEADER_FROM_DIFFERENT_DOMAINS, INCLUDES_PATCH,MAILING_LIST_MULTI,SIGNED_OFF_BY,SPF_HELO_NONE,SPF_PASS, URIBL_BLOCKED,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 84A40C43332 for ; Thu, 19 Mar 2020 09:17:09 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 5963E20722 for ; Thu, 19 Mar 2020 09:17:09 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1727803AbgCSJRH (ORCPT ); Thu, 19 Mar 2020 05:17:07 -0400 Received: from 8bytes.org ([81.169.241.247]:52214 "EHLO theia.8bytes.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1727420AbgCSJOl (ORCPT ); Thu, 19 Mar 2020 05:14:41 -0400 Received: by theia.8bytes.org (Postfix, from userid 1000) id 2D159C11; Thu, 19 Mar 2020 10:14:25 +0100 (CET) From: Joerg Roedel To: x86@kernel.org Cc: hpa@zytor.com, Andy Lutomirski , Dave Hansen , Peter Zijlstra , Thomas Hellstrom , Jiri Slaby , Dan Williams , Tom Lendacky , Juergen Gross , Kees Cook , linux-kernel@vger.kernel.org, kvm@vger.kernel.org, virtualization@lists.linux-foundation.org, Joerg Roedel , Joerg Roedel Subject: [PATCH 47/70] x86/sev-es: Handle MMIO events Date: Thu, 19 Mar 2020 10:13:44 +0100 Message-Id: <20200319091407.1481-48-joro@8bytes.org> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200319091407.1481-1-joro@8bytes.org> References: <20200319091407.1481-1-joro@8bytes.org> Sender: linux-kernel-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org From: Tom Lendacky Add handler for VC exceptions caused by MMIO intercepts. These intercepts come along as nested page faults on pages with reserved bits set. TODO: - Add return values of helper functions - Check permissions on page-table walks - Check data segments Signed-off-by: Tom Lendacky [ jroedel@suse.de: Adapt to VC handling framework ] Co-developed-by: Joerg Roedel Signed-off-by: Joerg Roedel --- arch/x86/boot/compressed/sev-es.c | 5 + arch/x86/include/uapi/asm/svm.h | 5 + arch/x86/kernel/sev-es.c | 188 ++++++++++++++++++++++++++++++ 3 files changed, 198 insertions(+) diff --git a/arch/x86/boot/compressed/sev-es.c b/arch/x86/boot/compressed/sev-es.c index 40eaf24db641..53c65fc09341 100644 --- a/arch/x86/boot/compressed/sev-es.c +++ b/arch/x86/boot/compressed/sev-es.c @@ -100,6 +100,11 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt, return ES_OK; } +static phys_addr_t vc_slow_virt_to_phys(struct ghcb *ghcb, long vaddr) +{ + return (phys_addr_t)vaddr; +} + #undef __init #undef __pa #define __init diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h index c68d1618c9b0..8f36ae021a7f 100644 --- a/arch/x86/include/uapi/asm/svm.h +++ b/arch/x86/include/uapi/asm/svm.h @@ -81,6 +81,11 @@ #define SVM_EXIT_AVIC_INCOMPLETE_IPI 0x401 #define SVM_EXIT_AVIC_UNACCELERATED_ACCESS 0x402 +/* SEV-ES software-defined VMGEXIT events */ +#define SVM_VMGEXIT_MMIO_READ 0x80000001 +#define SVM_VMGEXIT_MMIO_WRITE 0x80000002 +#define SVM_VMGEXIT_UNSUPPORTED_EVENT 0x8000ffff + #define SVM_EXIT_ERR -1 #define SVM_EXIT_REASONS \ diff --git a/arch/x86/kernel/sev-es.c b/arch/x86/kernel/sev-es.c index 6d45a2499460..6b0f82f81229 100644 --- a/arch/x86/kernel/sev-es.c +++ b/arch/x86/kernel/sev-es.c @@ -240,6 +240,25 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt, return ES_EXCEPTION; } +static phys_addr_t vc_slow_virt_to_phys(struct ghcb *ghcb, long vaddr) +{ + unsigned long va = (unsigned long)vaddr; + unsigned int level; + phys_addr_t pa; + pgd_t *pgd; + pte_t *pte; + + pgd = pgd_offset(current->active_mm, va); + pte = lookup_address_in_pgd(pgd, va, &level); + if (!pte) + return 0; + + pa = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT; + pa |= va & ~page_level_mask(level); + + return pa; +} + /* Include code shared with pre-decompression boot stage */ #include "sev-es-shared.c" @@ -295,6 +314,172 @@ static void __init vc_early_vc_forward_exception(struct es_em_ctxt *ctxt) early_exception(ctxt->regs, trapnr); } +static long *vc_insn_get_reg(struct es_em_ctxt *ctxt) +{ + long *reg_array; + int offset; + + reg_array = (long *)ctxt->regs; + offset = insn_get_modrm_reg_off(&ctxt->insn, ctxt->regs); + + if (offset < 0) + return NULL; + + offset /= sizeof(long); + + return reg_array + offset; +} + +static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt, + unsigned int bytes, bool read) +{ + u64 exit_code, exit_info_1, exit_info_2; + unsigned long ghcb_pa = __pa(ghcb); + void __user *ref; + + ref = insn_get_addr_ref(&ctxt->insn, ctxt->regs); + if (ref == (void __user *)-1L) + return ES_UNSUPPORTED; + + exit_code = read ? SVM_VMGEXIT_MMIO_READ : SVM_VMGEXIT_MMIO_WRITE; + + exit_info_1 = vc_slow_virt_to_phys(ghcb, (long)ref); + exit_info_2 = bytes; /* Can never be greater than 8 */ + + ghcb->save.sw_scratch = ghcb_pa + offsetof(struct ghcb, shared_buffer); + + return sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, exit_info_1, exit_info_2); +} + +static enum es_result vc_handle_mmio_twobyte_ops(struct ghcb *ghcb, + struct es_em_ctxt *ctxt) +{ + struct insn *insn = &ctxt->insn; + unsigned int bytes = 0; + enum es_result ret; + int sign_byte; + long *reg_data; + + switch (insn->opcode.bytes[1]) { + /* MMIO Read w/ zero-extension */ + case 0xb6: + bytes = 1; + /* Fallthrough */ + case 0xb7: + if (!bytes) + bytes = 2; + + ret = vc_do_mmio(ghcb, ctxt, bytes, true); + if (ret) + break; + + /* Zero extend based on operand size */ + reg_data = vc_insn_get_reg(ctxt); + memset(reg_data, 0, insn->opnd_bytes); + + memcpy(reg_data, ghcb->shared_buffer, bytes); + break; + + /* MMIO Read w/ sign-extension */ + case 0xbe: + bytes = 1; + /* Fallthrough */ + case 0xbf: + if (!bytes) + bytes = 2; + + ret = vc_do_mmio(ghcb, ctxt, bytes, true); + if (ret) + break; + + /* Sign extend based on operand size */ + reg_data = vc_insn_get_reg(ctxt); + if (bytes == 1) { + u8 *val = (u8 *)ghcb->shared_buffer; + + sign_byte = (*val & 0x80) ? 0x00 : 0xff; + } else { + u16 *val = (u16 *)ghcb->shared_buffer; + + sign_byte = (*val & 0x8000) ? 0x00 : 0xff; + } + memset(reg_data, sign_byte, insn->opnd_bytes); + + memcpy(reg_data, ghcb->shared_buffer, bytes); + break; + + default: + ret = ES_UNSUPPORTED; + } + + return ret; +} + +static enum es_result vc_handle_mmio(struct ghcb *ghcb, + struct es_em_ctxt *ctxt) +{ + struct insn *insn = &ctxt->insn; + unsigned int bytes = 0; + enum es_result ret; + long *reg_data; + + switch (insn->opcode.bytes[0]) { + /* MMIO Write */ + case 0x88: + bytes = 1; + /* Fallthrough */ + case 0x89: + if (!bytes) + bytes = insn->opnd_bytes; + + reg_data = vc_insn_get_reg(ctxt); + memcpy(ghcb->shared_buffer, reg_data, bytes); + + ret = vc_do_mmio(ghcb, ctxt, bytes, false); + break; + + case 0xc6: + bytes = 1; + /* Fallthrough */ + case 0xc7: + if (!bytes) + bytes = insn->opnd_bytes; + + memcpy(ghcb->shared_buffer, insn->immediate1.bytes, bytes); + + ret = vc_do_mmio(ghcb, ctxt, bytes, false); + break; + + /* MMIO Read */ + case 0x8a: + bytes = 1; + /* Fallthrough */ + case 0x8b: + if (!bytes) + bytes = insn->opnd_bytes; + + ret = vc_do_mmio(ghcb, ctxt, bytes, true); + if (ret) + break; + + reg_data = vc_insn_get_reg(ctxt); + if (bytes == 4) + *reg_data = 0; /* Zero-extend for 32-bit operation */ + + memcpy(reg_data, ghcb->shared_buffer, bytes); + break; + + /* Two-Byte Opcodes */ + case 0x0f: + ret = vc_handle_mmio_twobyte_ops(ghcb, ctxt); + break; + default: + ret = ES_UNSUPPORTED; + } + + return ret; +} + static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt, struct ghcb *ghcb, unsigned long exit_code) @@ -308,6 +493,9 @@ static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt, case SVM_EXIT_IOIO: result = vc_handle_ioio(ghcb, ctxt); break; + case SVM_EXIT_NPF: + result = vc_handle_mmio(ghcb, ctxt); + break; default: /* * Unexpected #VC exception -- 2.17.1