linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Alexander Graf <graf@amazon.com>
To: Anup Patel <anup@brainfault.org>
Cc: Anup Patel <Anup.Patel@wdc.com>,
	Palmer Dabbelt <palmer@sifive.com>,
	"Paul Walmsley" <paul.walmsley@sifive.com>,
	Paolo Bonzini <pbonzini@redhat.com>, Radim K <rkrcmar@redhat.com>,
	Daniel Lezcano <daniel.lezcano@linaro.org>,
	Thomas Gleixner <tglx@linutronix.de>,
	Atish Patra <Atish.Patra@wdc.com>,
	Alistair Francis <Alistair.Francis@wdc.com>,
	Damien Le Moal <Damien.LeMoal@wdc.com>,
	Christoph Hellwig <hch@infradead.org>,
	"kvm@vger.kernel.org" <kvm@vger.kernel.org>,
	"linux-riscv@lists.infradead.org"
	<linux-riscv@lists.infradead.org>,
	"linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>
Subject: Re: [PATCH v5 10/20] RISC-V: KVM: Handle MMIO exits for VCPU
Date: Thu, 22 Aug 2019 15:25:41 +0200	[thread overview]
Message-ID: <4fe83f28-3a55-e74c-0d40-1cd556015fea@amazon.com> (raw)
In-Reply-To: <CAAhSdy2QtZRKvs0Hr-mZuVsb7sVkweeW-RpvhObZR009UbA7KA@mail.gmail.com>



On 22.08.19 14:33, Anup Patel wrote:
> On Thu, Aug 22, 2019 at 5:44 PM Alexander Graf <graf@amazon.com> wrote:
>>
>> On 22.08.19 10:44, Anup Patel wrote:
>>> We will get stage2 page faults whenever Guest/VM access SW emulated
>>> MMIO device or unmapped Guest RAM.
>>>
>>> This patch implements MMIO read/write emulation by extracting MMIO
>>> details from the trapped load/store instruction and forwarding the
>>> MMIO read/write to user-space. The actual MMIO emulation will happen
>>> in user-space and KVM kernel module will only take care of register
>>> updates before resuming the trapped VCPU.
>>>
>>> The handling for stage2 page faults for unmapped Guest RAM will be
>>> implemeted by a separate patch later.
>>>
>>> Signed-off-by: Anup Patel <anup.patel@wdc.com>
>>> Acked-by: Paolo Bonzini <pbonzini@redhat.com>
>>> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
>>> ---
>>>    arch/riscv/include/asm/kvm_host.h |  11 +
>>>    arch/riscv/kvm/mmu.c              |   7 +
>>>    arch/riscv/kvm/vcpu_exit.c        | 436 +++++++++++++++++++++++++++++-
>>>    3 files changed, 451 insertions(+), 3 deletions(-)
>>>
>>> diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
>>> index 18f1097f1d8d..4388bace6d70 100644
>>> --- a/arch/riscv/include/asm/kvm_host.h
>>> +++ b/arch/riscv/include/asm/kvm_host.h
>>> @@ -53,6 +53,12 @@ struct kvm_arch {
>>>        phys_addr_t pgd_phys;
>>>    };
>>>
>>> +struct kvm_mmio_decode {
>>> +     unsigned long insn;
>>> +     int len;
>>> +     int shift;
>>> +};
>>> +
>>>    struct kvm_cpu_context {
>>>        unsigned long zero;
>>>        unsigned long ra;
>>> @@ -141,6 +147,9 @@ struct kvm_vcpu_arch {
>>>        unsigned long irqs_pending;
>>>        unsigned long irqs_pending_mask;
>>>
>>> +     /* MMIO instruction details */
>>> +     struct kvm_mmio_decode mmio_decode;
>>> +
>>>        /* VCPU power-off state */
>>>        bool power_off;
>>>
>>> @@ -160,6 +169,8 @@ static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
>>>    int kvm_riscv_setup_vsip(void);
>>>    void kvm_riscv_cleanup_vsip(void);
>>>
>>> +int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned long hva,
>>> +                      bool is_write);
>>>    void kvm_riscv_stage2_flush_cache(struct kvm_vcpu *vcpu);
>>>    int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm);
>>>    void kvm_riscv_stage2_free_pgd(struct kvm *kvm);
>>> diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
>>> index 04dd089b86ff..2b965f9aac07 100644
>>> --- a/arch/riscv/kvm/mmu.c
>>> +++ b/arch/riscv/kvm/mmu.c
>>> @@ -61,6 +61,13 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
>>>        return 0;
>>>    }
>>>
>>> +int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned long hva,
>>> +                      bool is_write)
>>> +{
>>> +     /* TODO: */
>>> +     return 0;
>>> +}
>>> +
>>>    void kvm_riscv_stage2_flush_cache(struct kvm_vcpu *vcpu)
>>>    {
>>>        /* TODO: */
>>> diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c
>>> index e4d7c8f0807a..efc06198c259 100644
>>> --- a/arch/riscv/kvm/vcpu_exit.c
>>> +++ b/arch/riscv/kvm/vcpu_exit.c
>>> @@ -6,9 +6,371 @@
>>>     *     Anup Patel <anup.patel@wdc.com>
>>>     */
>>>
>>> +#include <linux/bitops.h>
>>>    #include <linux/errno.h>
>>>    #include <linux/err.h>
>>>    #include <linux/kvm_host.h>
>>> +#include <asm/csr.h>
>>> +
>>> +#define INSN_MATCH_LB                0x3
>>> +#define INSN_MASK_LB         0x707f
>>> +#define INSN_MATCH_LH                0x1003
>>> +#define INSN_MASK_LH         0x707f
>>> +#define INSN_MATCH_LW                0x2003
>>> +#define INSN_MASK_LW         0x707f
>>> +#define INSN_MATCH_LD                0x3003
>>> +#define INSN_MASK_LD         0x707f
>>> +#define INSN_MATCH_LBU               0x4003
>>> +#define INSN_MASK_LBU                0x707f
>>> +#define INSN_MATCH_LHU               0x5003
>>> +#define INSN_MASK_LHU                0x707f
>>> +#define INSN_MATCH_LWU               0x6003
>>> +#define INSN_MASK_LWU                0x707f
>>> +#define INSN_MATCH_SB                0x23
>>> +#define INSN_MASK_SB         0x707f
>>> +#define INSN_MATCH_SH                0x1023
>>> +#define INSN_MASK_SH         0x707f
>>> +#define INSN_MATCH_SW                0x2023
>>> +#define INSN_MASK_SW         0x707f
>>> +#define INSN_MATCH_SD                0x3023
>>> +#define INSN_MASK_SD         0x707f
>>> +
>>> +#define INSN_MATCH_C_LD              0x6000
>>> +#define INSN_MASK_C_LD               0xe003
>>> +#define INSN_MATCH_C_SD              0xe000
>>> +#define INSN_MASK_C_SD               0xe003
>>> +#define INSN_MATCH_C_LW              0x4000
>>> +#define INSN_MASK_C_LW               0xe003
>>> +#define INSN_MATCH_C_SW              0xc000
>>> +#define INSN_MASK_C_SW               0xe003
>>> +#define INSN_MATCH_C_LDSP    0x6002
>>> +#define INSN_MASK_C_LDSP     0xe003
>>> +#define INSN_MATCH_C_SDSP    0xe002
>>> +#define INSN_MASK_C_SDSP     0xe003
>>> +#define INSN_MATCH_C_LWSP    0x4002
>>> +#define INSN_MASK_C_LWSP     0xe003
>>> +#define INSN_MATCH_C_SWSP    0xc002
>>> +#define INSN_MASK_C_SWSP     0xe003
>>> +
>>> +#define INSN_LEN(insn)               ((((insn) & 0x3) < 0x3) ? 2 : 4)
>>> +
>>> +#ifdef CONFIG_64BIT
>>> +#define LOG_REGBYTES         3
>>> +#else
>>> +#define LOG_REGBYTES         2
>>> +#endif
>>> +#define REGBYTES             (1 << LOG_REGBYTES)
>>> +
>>> +#define SH_RD                        7
>>> +#define SH_RS1                       15
>>> +#define SH_RS2                       20
>>> +#define SH_RS2C                      2
>>> +
>>> +#define RV_X(x, s, n)                (((x) >> (s)) & ((1 << (n)) - 1))
>>> +#define RVC_LW_IMM(x)                ((RV_X(x, 6, 1) << 2) | \
>>> +                              (RV_X(x, 10, 3) << 3) | \
>>> +                              (RV_X(x, 5, 1) << 6))
>>> +#define RVC_LD_IMM(x)                ((RV_X(x, 10, 3) << 3) | \
>>> +                              (RV_X(x, 5, 2) << 6))
>>> +#define RVC_LWSP_IMM(x)              ((RV_X(x, 4, 3) << 2) | \
>>> +                              (RV_X(x, 12, 1) << 5) | \
>>> +                              (RV_X(x, 2, 2) << 6))
>>> +#define RVC_LDSP_IMM(x)              ((RV_X(x, 5, 2) << 3) | \
>>> +                              (RV_X(x, 12, 1) << 5) | \
>>> +                              (RV_X(x, 2, 3) << 6))
>>> +#define RVC_SWSP_IMM(x)              ((RV_X(x, 9, 4) << 2) | \
>>> +                              (RV_X(x, 7, 2) << 6))
>>> +#define RVC_SDSP_IMM(x)              ((RV_X(x, 10, 3) << 3) | \
>>> +                              (RV_X(x, 7, 3) << 6))
>>> +#define RVC_RS1S(insn)               (8 + RV_X(insn, SH_RD, 3))
>>> +#define RVC_RS2S(insn)               (8 + RV_X(insn, SH_RS2C, 3))
>>> +#define RVC_RS2(insn)                RV_X(insn, SH_RS2C, 5)
>>> +
>>> +#define SHIFT_RIGHT(x, y)            \
>>> +     ((y) < 0 ? ((x) << -(y)) : ((x) >> (y)))
>>> +
>>> +#define REG_MASK                     \
>>> +     ((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES))
>>> +
>>> +#define REG_OFFSET(insn, pos)                \
>>> +     (SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK)
>>> +
>>> +#define REG_PTR(insn, pos, regs)     \
>>> +     (ulong *)((ulong)(regs) + REG_OFFSET(insn, pos))
>>> +
>>> +#define GET_RM(insn)         (((insn) >> 12) & 7)
>>> +
>>> +#define GET_RS1(insn, regs)  (*REG_PTR(insn, SH_RS1, regs))
>>> +#define GET_RS2(insn, regs)  (*REG_PTR(insn, SH_RS2, regs))
>>> +#define GET_RS1S(insn, regs) (*REG_PTR(RVC_RS1S(insn), 0, regs))
>>> +#define GET_RS2S(insn, regs) (*REG_PTR(RVC_RS2S(insn), 0, regs))
>>> +#define GET_RS2C(insn, regs) (*REG_PTR(insn, SH_RS2C, regs))
>>> +#define GET_SP(regs)         (*REG_PTR(2, 0, regs))
>>> +#define SET_RD(insn, regs, val)      (*REG_PTR(insn, SH_RD, regs) = (val))
>>> +#define IMM_I(insn)          ((s32)(insn) >> 20)
>>> +#define IMM_S(insn)          (((s32)(insn) >> 25 << 5) | \
>>> +                              (s32)(((insn) >> 7) & 0x1f))
>>> +#define MASK_FUNCT3          0x7000
>>> +
>>> +#define STR(x)                       XSTR(x)
>>> +#define XSTR(x)                      #x
>>> +
>>> +/* TODO: Handle traps due to unpriv load and redirect it back to VS-mode */
>>> +static ulong get_insn(struct kvm_vcpu *vcpu)
>>> +{
>>> +     ulong __sepc = vcpu->arch.guest_context.sepc;
>>> +     ulong __hstatus, __sstatus, __vsstatus;
>>> +#ifdef CONFIG_RISCV_ISA_C
>>> +     ulong rvc_mask = 3, tmp;
>>> +#endif
>>> +     ulong flags, val;
>>> +
>>> +     local_irq_save(flags);
>>> +
>>> +     __vsstatus = csr_read(CSR_VSSTATUS);
>>> +     __sstatus = csr_read(CSR_SSTATUS);
>>> +     __hstatus = csr_read(CSR_HSTATUS);
>>> +
>>> +     csr_write(CSR_VSSTATUS, __vsstatus | SR_MXR);
>>> +     csr_write(CSR_SSTATUS, vcpu->arch.guest_context.sstatus | SR_MXR);
>>> +     csr_write(CSR_HSTATUS, vcpu->arch.guest_context.hstatus | HSTATUS_SPRV);
>>
>> What happens when the insn load triggers a page fault, maybe because the
>> guest was malicious and did
>>
>>     1) Run on page 0x1000
>>     2) Remove map for 0x1000, do *not* flush TLB
>>     3) Trigger MMIO
>>
>> That would DOS the host here, as the host kernel would continue running
>> in guest address space, right?
> 
> Yes, we can certainly fault while accessing Guest instruction. We will
> be fixing this issue in a followup series. We have mentioned this in cover
> letter as well.

I don't think the cover letter is the right place for such a comment. 
Please definitely put it into the code as well, pointing out that this 
is a known bug. Or even better yet: Fix it up properly :).

In fact, with a bug that dramatic, I'm not even sure we can safely 
include the code. We're consciously allowing user space to DOS the kernel.

> 
> BTW, RISC-V spec is going to further improve to provide easy
> access of faulting instruction to Hypervisor.
> (Refer, https://github.com/riscv/riscv-isa-manual/issues/431)

Yes, we have similar extensions on other archs. Is this going to be an 
optional addition or a mandatory bit of the hypervisor spec? If it's not 
mandatory, we can not rely on it, so the current path has to be safe.


Alex

  reply	other threads:[~2019-08-22 13:25 UTC|newest]

Thread overview: 61+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-08-22  8:42 [PATCH v5 00/20] KVM RISC-V Support Anup Patel
2019-08-22  8:42 ` [PATCH v5 01/20] KVM: RISC-V: Add KVM_REG_RISCV for ONE_REG interface Anup Patel
2019-08-22  8:42 ` [PATCH v5 02/20] RISC-V: Add bitmap reprensenting ISA features common across CPUs Anup Patel
2019-08-22  8:43 ` [PATCH v5 03/20] RISC-V: Export few kernel symbols Anup Patel
2019-08-22  8:43 ` [PATCH v5 04/20] RISC-V: Add hypervisor extension related CSR defines Anup Patel
2019-08-22  8:43 ` [PATCH v5 05/20] RISC-V: Add initial skeletal KVM support Anup Patel
2019-08-22  8:43 ` [PATCH v5 06/20] RISC-V: KVM: Implement VCPU create, init and destroy functions Anup Patel
2019-08-22  8:44 ` [PATCH v5 07/20] RISC-V: KVM: Implement VCPU interrupts and requests handling Anup Patel
2019-08-22  8:44 ` [PATCH v5 08/20] RISC-V: KVM: Implement KVM_GET_ONE_REG/KVM_SET_ONE_REG ioctls Anup Patel
2019-08-22 12:01   ` Alexander Graf
2019-08-22 14:00     ` Anup Patel
2019-08-22 14:12       ` Alexander Graf
2019-08-23 11:20         ` Anup Patel
2019-08-23 11:42           ` Graf (AWS), Alexander
2019-08-22 14:05     ` Anup Patel
2019-08-22  8:44 ` [PATCH v5 09/20] RISC-V: KVM: Implement VCPU world-switch Anup Patel
2019-08-22  8:44 ` [PATCH v5 10/20] RISC-V: KVM: Handle MMIO exits for VCPU Anup Patel
2019-08-22 12:10   ` Alexander Graf
2019-08-22 12:21     ` Andrew Jones
2019-08-22 12:27     ` Anup Patel
2019-08-22 12:14   ` Alexander Graf
2019-08-22 12:33     ` Anup Patel
2019-08-22 13:25       ` Alexander Graf [this message]
2019-08-22 13:55         ` Anup Patel
2019-08-22  8:45 ` [PATCH v5 11/20] RISC-V: KVM: Handle WFI " Anup Patel
2019-08-22 12:19   ` Alexander Graf
2019-08-22 12:50     ` Anup Patel
2019-08-22  8:45 ` [PATCH v5 12/20] RISC-V: KVM: Implement VMID allocator Anup Patel
2019-08-22  8:45 ` [PATCH v5 13/20] RISC-V: KVM: Implement stage2 page table programming Anup Patel
2019-08-22 12:28   ` Alexander Graf
2019-08-22 12:38     ` Anup Patel
2019-08-22 13:27       ` Alexander Graf
2019-08-22 13:58         ` Anup Patel
2019-08-22 14:09           ` Alexander Graf
2019-08-23 11:21             ` Anup Patel
2019-08-22  8:45 ` [PATCH v5 14/20] RISC-V: KVM: Implement MMU notifiers Anup Patel
2019-08-22  8:46 ` [PATCH v5 15/20] RISC-V: KVM: Add timer functionality Anup Patel
2019-08-23  7:52   ` Alexander Graf
2019-08-23 11:04     ` Anup Patel
2019-08-23 11:33       ` Graf (AWS), Alexander
2019-08-23 11:46         ` Anup Patel
2019-08-23 11:49           ` Alexander Graf
2019-08-23 12:11             ` Anup Patel
2019-08-23 12:25               ` Alexander Graf
2019-08-22  8:46 ` [PATCH v5 16/20] RISC-V: KVM: FP lazy save/restore Anup Patel
2019-08-22  8:46 ` [PATCH v5 17/20] RISC-V: KVM: Implement ONE REG interface for FP registers Anup Patel
2019-08-22  8:46 ` [PATCH v5 18/20] RISC-V: KVM: Add SBI v0.1 support Anup Patel
2019-08-23  8:04   ` Alexander Graf
2019-08-23 11:17     ` Anup Patel
2019-08-23 11:38       ` Graf (AWS), Alexander
2019-08-23 12:00         ` Anup Patel
2019-08-23 12:19           ` Alexander Graf
2019-08-23 12:28             ` Anup Patel
2019-08-22  8:47 ` [PATCH v5 19/20] RISC-V: Enable VIRTIO drivers in RV64 and RV32 defconfig Anup Patel
2019-08-22  8:47 ` [PATCH v5 20/20] RISC-V: KVM: Add MAINTAINERS entry Anup Patel
2019-08-23  8:08 ` [PATCH v5 00/20] KVM RISC-V Support Alexander Graf
2019-08-23 11:25   ` Anup Patel
2019-08-23 11:44     ` Graf (AWS), Alexander
2019-08-23 12:10       ` Paolo Bonzini
2019-08-23 12:19         ` Anup Patel
2019-08-23 12:28           ` Alexander Graf

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=4fe83f28-3a55-e74c-0d40-1cd556015fea@amazon.com \
    --to=graf@amazon.com \
    --cc=Alistair.Francis@wdc.com \
    --cc=Anup.Patel@wdc.com \
    --cc=Atish.Patra@wdc.com \
    --cc=Damien.LeMoal@wdc.com \
    --cc=anup@brainfault.org \
    --cc=daniel.lezcano@linaro.org \
    --cc=hch@infradead.org \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-riscv@lists.infradead.org \
    --cc=palmer@sifive.com \
    --cc=paul.walmsley@sifive.com \
    --cc=pbonzini@redhat.com \
    --cc=rkrcmar@redhat.com \
    --cc=tglx@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).