From mboxrd@z Thu Jan 1 00:00:00 1970 From: =?UTF-8?q?Adalbert=20Laz=C4=83r?= Subject: [PATCH v9 35/84] KVM: x86: wire in the preread/prewrite/preexec page trackers Date: Wed, 22 Jul 2020 00:08:33 +0300 Message-ID: <20200721210922.7646-36-alazar@bitdefender.com> References: <20200721210922.7646-1-alazar@bitdefender.com> Mime-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Return-path: In-Reply-To: <20200721210922.7646-1-alazar@bitdefender.com> Sender: kvm-owner@vger.kernel.org To: kvm@vger.kernel.org Cc: virtualization@lists.linux-foundation.org, Paolo Bonzini , =?UTF-8?q?Mihai=20Don=C8=9Bu?= , Marian Rotariu , Stefan Sicleru , =?UTF-8?q?Adalbert=20Laz=C4=83r?= List-Id: virtualization@lists.linuxfoundation.org From: Mihai Donțu These are needed in order to notify the introspection tool when read/write/execute access happens on one of the tracked memory pages. Also, this patch adds the case when the introspection tool requests that the vCPU re-enter in guest (and abort the emulation of the current instruction). Signed-off-by: Mihai Donțu Co-developed-by: Marian Rotariu Signed-off-by: Marian Rotariu Co-developed-by: Stefan Sicleru Signed-off-by: Stefan Sicleru Signed-off-by: Adalbert Lazăr --- arch/x86/kvm/emulate.c | 4 +++ arch/x86/kvm/kvm_emulate.h | 1 + arch/x86/kvm/mmu/mmu.c | 58 +++++++++++++++++++++++++++++--------- arch/x86/kvm/x86.c | 45 +++++++++++++++++++++++------ 4 files changed, 85 insertions(+), 23 deletions(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index d0e2825ae617..15a005a3b3f5 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -5442,6 +5442,8 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) ctxt->memopp->addr.mem.ea + ctxt->_eip); done: + if (rc == X86EMUL_RETRY_INSTR) + return EMULATION_RETRY_INSTR; if (rc == X86EMUL_PROPAGATE_FAULT) ctxt->have_exception = true; return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK; @@ -5813,6 +5815,8 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) if (rc == X86EMUL_INTERCEPTED) return EMULATION_INTERCEPTED; + if (rc == X86EMUL_RETRY_INSTR) + return EMULATION_RETRY_INSTR; if (rc == X86EMUL_CONTINUE) writeback_registers(ctxt); diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h index 43c93ffa76ed..5bfab8d65cd1 100644 --- a/arch/x86/kvm/kvm_emulate.h +++ b/arch/x86/kvm/kvm_emulate.h @@ -496,6 +496,7 @@ bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt); #define EMULATION_OK 0 #define EMULATION_RESTART 1 #define EMULATION_INTERCEPTED 2 +#define EMULATION_RETRY_INSTR 3 void init_decode_cache(struct x86_emulate_ctxt *ctxt); int x86_emulate_insn(struct x86_emulate_ctxt *ctxt); int emulator_task_switch(struct x86_emulate_ctxt *ctxt, diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index ede8ef6d1e34..da57321e0cec 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1226,9 +1226,13 @@ static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) slot = __gfn_to_memslot(slots, gfn); /* the non-leaf shadow pages are keeping readonly. */ - if (sp->role.level > PG_LEVEL_4K) - return kvm_slot_page_track_add_page(kvm, slot, gfn, - KVM_PAGE_TRACK_WRITE); + if (sp->role.level > PG_LEVEL_4K) { + kvm_slot_page_track_add_page(kvm, slot, gfn, + KVM_PAGE_TRACK_PREWRITE); + kvm_slot_page_track_add_page(kvm, slot, gfn, + KVM_PAGE_TRACK_WRITE); + return; + } kvm_mmu_gfn_disallow_lpage(slot, gfn); } @@ -1254,9 +1258,13 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) gfn = sp->gfn; slots = kvm_memslots_for_spte_role(kvm, sp->role); slot = __gfn_to_memslot(slots, gfn); - if (sp->role.level > PG_LEVEL_4K) - return kvm_slot_page_track_remove_page(kvm, slot, gfn, - KVM_PAGE_TRACK_WRITE); + if (sp->role.level > PG_LEVEL_4K) { + kvm_slot_page_track_remove_page(kvm, slot, gfn, + KVM_PAGE_TRACK_PREWRITE); + kvm_slot_page_track_remove_page(kvm, slot, gfn, + KVM_PAGE_TRACK_WRITE); + return; + } kvm_mmu_gfn_allow_lpage(slot, gfn); } @@ -2987,7 +2995,8 @@ static bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, { struct kvm_mmu_page *sp; - if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE)) + if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_PREWRITE) || + kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE)) return true; for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { @@ -3405,6 +3414,21 @@ static void disallowed_hugepage_adjust(struct kvm_shadow_walk_iterator it, } } +static unsigned int kvm_mmu_apply_introspection_access(struct kvm_vcpu *vcpu, + gfn_t gfn, + unsigned int acc) +{ + if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_PREREAD)) + acc &= ~ACC_USER_MASK; + if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_PREWRITE) || + kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE)) + acc &= ~ACC_WRITE_MASK; + if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_PREEXEC)) + acc &= ~ACC_EXEC_MASK; + + return acc; +} + static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write, int map_writable, int max_level, kvm_pfn_t pfn, bool prefault, bool account_disallowed_nx_lpage) @@ -3414,6 +3438,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write, int level, ret; gfn_t gfn = gpa >> PAGE_SHIFT; gfn_t base_gfn = gfn; + unsigned int acc; if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa))) return RET_PF_RETRY; @@ -3443,7 +3468,9 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write, } } - ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL, + acc = kvm_mmu_apply_introspection_access(vcpu, base_gfn, ACC_ALL); + + ret = mmu_set_spte(vcpu, it.sptep, acc, write, level, base_gfn, pfn, prefault, map_writable); direct_pte_prefetch(vcpu, it.sptep); @@ -4098,15 +4125,18 @@ static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu, if (unlikely(error_code & PFERR_RSVD_MASK)) return false; - if (!(error_code & PFERR_PRESENT_MASK) || - !(error_code & PFERR_WRITE_MASK)) - return false; - /* - * guest is writing the page which is write tracked which can + * guest is reading/writing/fetching the page which is + * read/write/execute tracked which can * not be fixed by page fault handler. */ - if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE)) + if (((error_code & PFERR_USER_MASK) + && kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_PREREAD)) + || ((error_code & PFERR_WRITE_MASK) + && (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_PREWRITE) + || kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))) + || ((error_code & PFERR_FETCH_MASK) + && kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_PREEXEC))) return true; return false; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 83424339ea9d..7668ca5b8a7a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5519,6 +5519,8 @@ static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, if (gpa == UNMAPPED_GVA) return X86EMUL_PROPAGATE_FAULT; + if (!kvm_page_track_preread(vcpu, gpa, addr, toread)) + return X86EMUL_RETRY_INSTR; ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data, offset, toread); if (ret < 0) { @@ -5550,6 +5552,9 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, if (unlikely(gpa == UNMAPPED_GVA)) return X86EMUL_PROPAGATE_FAULT; + if (!kvm_page_track_preexec(vcpu, gpa, addr)) + return X86EMUL_RETRY_INSTR; + offset = addr & (PAGE_SIZE-1); if (WARN_ON(offset + bytes > PAGE_SIZE)) bytes = (unsigned)PAGE_SIZE - offset; @@ -5618,11 +5623,14 @@ static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes if (gpa == UNMAPPED_GVA) return X86EMUL_PROPAGATE_FAULT; + if (!kvm_page_track_prewrite(vcpu, gpa, addr, data, towrite)) + return X86EMUL_RETRY_INSTR; ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite); if (ret < 0) { r = X86EMUL_IO_NEEDED; goto out; } + kvm_page_track_write(vcpu, gpa, addr, data, towrite); bytes -= towrite; data += towrite; @@ -5723,13 +5731,22 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, gva_t gva, const void *val, int bytes) { - int ret; - - ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes); - if (ret < 0) - return 0; + if (!kvm_page_track_prewrite(vcpu, gpa, gva, val, bytes)) + return X86EMUL_RETRY_INSTR; + if (kvm_vcpu_write_guest(vcpu, gpa, val, bytes) < 0) + return X86EMUL_UNHANDLEABLE; kvm_page_track_write(vcpu, gpa, gva, val, bytes); - return 1; + return X86EMUL_CONTINUE; +} + +static int emulator_read_phys(struct kvm_vcpu *vcpu, gpa_t gpa, gva_t gva, + void *val, int bytes) +{ + if (!kvm_page_track_preread(vcpu, gpa, gva, bytes)) + return X86EMUL_RETRY_INSTR; + if (kvm_vcpu_read_guest(vcpu, gpa, val, bytes) < 0) + return X86EMUL_UNHANDLEABLE; + return X86EMUL_CONTINUE; } struct read_write_emulator_ops { @@ -5759,7 +5776,7 @@ static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, gva_t gva, void *val, int bytes) { - return !kvm_vcpu_read_guest(vcpu, gpa, val, bytes); + return emulator_read_phys(vcpu, gpa, gva, val, bytes); } static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, gva_t gva, @@ -5833,8 +5850,11 @@ static int emulator_read_write_onepage(unsigned long addr, void *val, return X86EMUL_PROPAGATE_FAULT; } - if (!ret && ops->read_write_emulate(vcpu, gpa, addr, val, bytes)) - return X86EMUL_CONTINUE; + if (!ret) { + ret = ops->read_write_emulate(vcpu, gpa, addr, val, bytes); + if (ret == X86EMUL_CONTINUE || ret == X86EMUL_RETRY_INSTR) + return ret; + } /* * Is this MMIO handled locally? @@ -5978,6 +5998,9 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, if (kvm_vcpu_map(vcpu, gpa_to_gfn(gpa), &map)) goto emul_write; + if (!kvm_page_track_prewrite(vcpu, gpa, addr, new, bytes)) + return X86EMUL_RETRY_INSTR; + kaddr = map.hva + offset_in_page(gpa); switch (bytes) { @@ -6904,6 +6927,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, trace_kvm_emulate_insn_start(vcpu); ++vcpu->stat.insn_emulation; + if (r == EMULATION_RETRY_INSTR) + return 1; if (r != EMULATION_OK) { if ((emulation_type & EMULTYPE_TRAP_UD) || (emulation_type & EMULTYPE_TRAP_UD_FORCED)) { @@ -6973,6 +6998,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, r = x86_emulate_insn(ctxt); + if (r == EMULATION_RETRY_INSTR) + return 1; if (r == EMULATION_INTERCEPTED) return 1;