From mboxrd@z Thu Jan 1 00:00:00 1970 From: Razvan Cojocaru Subject: [PATCH V4 2/5] xen: Optimize introspection access to guest state Date: Fri, 5 Sep 2014 13:01:34 +0300 Message-ID: <1409911297-3360-3-git-send-email-rcojocaru@bitdefender.com> References: <1409911297-3360-1-git-send-email-rcojocaru@bitdefender.com> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: Received: from mail6.bemta4.messagelabs.com ([85.158.143.247]) by lists.xen.org with esmtp (Exim 4.72) (envelope-from ) id 1XPqKz-0001KT-4r for xen-devel@lists.xenproject.org; Fri, 05 Sep 2014 10:01:49 +0000 Received: from smtp01.buh.bitdefender.com (smtp.bitdefender.biz [10.17.80.75]) by mx-sr.buh.bitdefender.com (Postfix) with ESMTP id 749F78002B for ; Fri, 5 Sep 2014 13:01:43 +0300 (EEST) In-Reply-To: <1409911297-3360-1-git-send-email-rcojocaru@bitdefender.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: xen-devel@lists.xenproject.org Cc: kevin.tian@intel.com, keir@xen.org, ian.campbell@citrix.com, Razvan Cojocaru , stefano.stabellini@eu.citrix.com, eddie.dong@intel.com, ian.jackson@eu.citrix.com, tim@xen.org, jbeulich@suse.com, jun.nakajima@intel.com, andrew.cooper3@citrix.com List-Id: xen-devel@lists.xenproject.org Speed optimization for introspection purposes: a handful of registers are sent along with each mem_event. This requires enlargement of the mem_event_request / mem_event_response stuctures, and additional code to fill in relevant values. Since the EPT event processing code needs more data than CR3 or MSR event processors, hvm_mem_event_fill_regs() fills in less data than p2m_mem_event_fill_regs(), in order to avoid overhead. Struct hvm_hw_cpu has been considered instead of the custom struct mem_event_regs_st, but its size would cause quick filling up of the mem_event ring buffer. Signed-off-by: Razvan Cojocaru Acked-by: Jan Beulich --- xen/arch/x86/hvm/hvm.c | 33 +++++++++++++++++++++++ xen/arch/x86/mm/p2m.c | 57 ++++++++++++++++++++++++++++++++++++++++ xen/include/public/mem_event.h | 39 +++++++++++++++++++++++++++ 3 files changed, 129 insertions(+) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 8d905d3..bb45593 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -6149,6 +6149,38 @@ int hvm_debug_op(struct vcpu *v, int32_t op) return rc; } +static void hvm_mem_event_fill_regs(mem_event_request_t *req) +{ + const struct cpu_user_regs *regs = guest_cpu_user_regs(); + const struct vcpu *curr = current; + + req->x86_regs.rax = regs->eax; + req->x86_regs.rcx = regs->ecx; + req->x86_regs.rdx = regs->edx; + req->x86_regs.rbx = regs->ebx; + req->x86_regs.rsp = regs->esp; + req->x86_regs.rbp = regs->ebp; + req->x86_regs.rsi = regs->esi; + req->x86_regs.rdi = regs->edi; + + req->x86_regs.r8 = regs->r8; + req->x86_regs.r9 = regs->r9; + req->x86_regs.r10 = regs->r10; + req->x86_regs.r11 = regs->r11; + req->x86_regs.r12 = regs->r12; + req->x86_regs.r13 = regs->r13; + req->x86_regs.r14 = regs->r14; + req->x86_regs.r15 = regs->r15; + + req->x86_regs.rflags = regs->eflags; + req->x86_regs.rip = regs->eip; + + req->x86_regs.msr_efer = curr->arch.hvm_vcpu.guest_efer; + req->x86_regs.cr0 = curr->arch.hvm_vcpu.guest_cr[0]; + req->x86_regs.cr3 = curr->arch.hvm_vcpu.guest_cr[3]; + req->x86_regs.cr4 = curr->arch.hvm_vcpu.guest_cr[4]; +} + static int hvm_memory_event_traps(long p, uint32_t reason, unsigned long value, unsigned long old, bool_t gla_valid, unsigned long gla) @@ -6193,6 +6225,7 @@ static int hvm_memory_event_traps(long p, uint32_t reason, req.gla = old; } + hvm_mem_event_fill_regs(&req); mem_event_put_request(d, &d->mem_event->access, &req); return 1; diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c index 32776c3..d0962aa 100644 --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -1327,6 +1327,61 @@ void p2m_mem_paging_resume(struct domain *d) } } +static void p2m_mem_event_fill_regs(mem_event_request_t *req) +{ + const struct cpu_user_regs *regs = guest_cpu_user_regs(); + struct segment_register seg; + struct hvm_hw_cpu ctxt; + struct vcpu *curr = current; + + /* Architecture-specific vmcs/vmcb bits */ + hvm_funcs.save_cpu_ctxt(curr, &ctxt); + + req->x86_regs.rax = regs->eax; + req->x86_regs.rcx = regs->ecx; + req->x86_regs.rdx = regs->edx; + req->x86_regs.rbx = regs->ebx; + req->x86_regs.rsp = regs->esp; + req->x86_regs.rbp = regs->ebp; + req->x86_regs.rsi = regs->esi; + req->x86_regs.rdi = regs->edi; + + req->x86_regs.r8 = regs->r8; + req->x86_regs.r9 = regs->r9; + req->x86_regs.r10 = regs->r10; + req->x86_regs.r11 = regs->r11; + req->x86_regs.r12 = regs->r12; + req->x86_regs.r13 = regs->r13; + req->x86_regs.r14 = regs->r14; + req->x86_regs.r15 = regs->r15; + + req->x86_regs.rflags = regs->eflags; + req->x86_regs.rip = regs->eip; + + req->x86_regs.dr7 = curr->arch.debugreg[7]; + req->x86_regs.cr0 = ctxt.cr0; + req->x86_regs.cr2 = ctxt.cr2; + req->x86_regs.cr3 = ctxt.cr3; + req->x86_regs.cr4 = ctxt.cr4; + + req->x86_regs.sysenter_cs = ctxt.sysenter_cs; + req->x86_regs.sysenter_esp = ctxt.sysenter_esp; + req->x86_regs.sysenter_eip = ctxt.sysenter_eip; + + req->x86_regs.msr_efer = ctxt.msr_efer; + req->x86_regs.msr_star = ctxt.msr_star; + req->x86_regs.msr_lstar = ctxt.msr_lstar; + + hvm_get_segment_register(curr, x86_seg_fs, &seg); + req->x86_regs.fs_base = seg.base; + + hvm_get_segment_register(curr, x86_seg_gs, &seg); + req->x86_regs.gs_base = seg.base; + + hvm_get_segment_register(curr, x86_seg_cs, &seg); + req->x86_regs.cs_arbytes = seg.attr.bytes; +} + bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla, struct npfec npfec, mem_event_request_t **req_ptr) @@ -1417,6 +1472,8 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla, req->access_w = npfec.write_access; req->access_x = npfec.insn_fetch; req->vcpu_id = v->vcpu_id; + + p2m_mem_event_fill_regs(req); } /* Pause the current VCPU */ diff --git a/xen/include/public/mem_event.h b/xen/include/public/mem_event.h index fc12697..d3dd9c6 100644 --- a/xen/include/public/mem_event.h +++ b/xen/include/public/mem_event.h @@ -48,6 +48,44 @@ #define MEM_EVENT_REASON_MSR 7 /* MSR was hit: gfn is MSR value, gla is MSR address; does NOT honour HVMPME_onchangeonly */ +/* Using a custom struct (not hvm_hw_cpu) so as to not fill + * the mem_event ring buffer too quickly. */ +struct mem_event_regs_x86 { + uint64_t rax; + uint64_t rcx; + uint64_t rdx; + uint64_t rbx; + uint64_t rsp; + uint64_t rbp; + uint64_t rsi; + uint64_t rdi; + uint64_t r8; + uint64_t r9; + uint64_t r10; + uint64_t r11; + uint64_t r12; + uint64_t r13; + uint64_t r14; + uint64_t r15; + uint64_t rflags; + uint64_t dr7; + uint64_t rip; + uint64_t cr0; + uint64_t cr2; + uint64_t cr3; + uint64_t cr4; + uint64_t sysenter_cs; + uint64_t sysenter_esp; + uint64_t sysenter_eip; + uint64_t msr_efer; + uint64_t msr_star; + uint64_t msr_lstar; + uint64_t fs_base; + uint64_t gs_base; + uint32_t cs_arbytes; + uint32_t _pad; +}; + typedef struct mem_event_st { uint32_t flags; uint32_t vcpu_id; @@ -67,6 +105,7 @@ typedef struct mem_event_st { uint16_t available:10; uint16_t reason; + struct mem_event_regs_x86 x86_regs; } mem_event_request_t, mem_event_response_t; DEFINE_RING_TYPES(mem_event, mem_event_request_t, mem_event_response_t); -- 1.7.9.5