All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH RFC v3 1/2] x86/emulate: Move hvmemul_linear_to_phys
@ 2019-02-06 12:53 Alexandru Stefan ISAILA
  2019-02-06 12:53 ` [PATCH RFC v3 2/2] x86/emulate: Send vm_event from emulate Alexandru Stefan ISAILA
  0 siblings, 1 reply; 12+ messages in thread
From: Alexandru Stefan ISAILA @ 2019-02-06 12:53 UTC (permalink / raw)
  To: xen-devel
  Cc: tamas, wei.liu2, rcojocaru, george.dunlap, andrew.cooper3,
	paul.durrant, jbeulich, Alexandru Stefan ISAILA, roger.pau

This is done so hvmemul_linear_to_phys() can be called from
hvmemul_map_linear_addr().

There is no functional change.

Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>
---
 xen/arch/x86/hvm/emulate.c | 181 ++++++++++++++++++-------------------
 1 file changed, 90 insertions(+), 91 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 2d02ef1521..a766eecc8e 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -530,6 +530,95 @@ static int hvmemul_do_mmio_addr(paddr_t mmio_gpa,
     return hvmemul_do_io_addr(1, mmio_gpa, reps, size, dir, df, ram_gpa);
 }
 
+/*
+ * Convert addr from linear to physical form, valid over the range
+ * [addr, addr + *reps * bytes_per_rep]. *reps is adjusted according to
+ * the valid computed range. It is always >0 when X86EMUL_OKAY is returned.
+ * @pfec indicates the access checks to be performed during page-table walks.
+ */
+static int hvmemul_linear_to_phys(
+    unsigned long addr,
+    paddr_t *paddr,
+    unsigned int bytes_per_rep,
+    unsigned long *reps,
+    uint32_t pfec,
+    struct hvm_emulate_ctxt *hvmemul_ctxt)
+{
+    struct vcpu *curr = current;
+    unsigned long pfn, npfn, done, todo, i, offset = addr & ~PAGE_MASK;
+    int reverse;
+
+    /*
+     * Clip repetitions to a sensible maximum. This avoids extensive looping in
+     * this function while still amortising the cost of I/O trap-and-emulate.
+     */
+    *reps = min_t(unsigned long, *reps, 4096);
+
+    /* With no paging it's easy: linear == physical. */
+    if ( !(curr->arch.hvm.guest_cr[0] & X86_CR0_PG) )
+    {
+        *paddr = addr;
+        return X86EMUL_OKAY;
+    }
+
+    /* Reverse mode if this is a backwards multi-iteration string operation. */
+    reverse = (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (*reps > 1);
+
+    if ( reverse && ((PAGE_SIZE - offset) < bytes_per_rep) )
+    {
+        /* Do page-straddling first iteration forwards via recursion. */
+        paddr_t _paddr;
+        unsigned long one_rep = 1;
+        int rc = hvmemul_linear_to_phys(
+            addr, &_paddr, bytes_per_rep, &one_rep, pfec, hvmemul_ctxt);
+        if ( rc != X86EMUL_OKAY )
+            return rc;
+        pfn = _paddr >> PAGE_SHIFT;
+    }
+    else if ( (pfn = paging_gva_to_gfn(curr, addr, &pfec)) == gfn_x(INVALID_GFN) )
+    {
+        if ( pfec & (PFEC_page_paged | PFEC_page_shared) )
+            return X86EMUL_RETRY;
+        *reps = 0;
+        x86_emul_pagefault(pfec, addr, &hvmemul_ctxt->ctxt);
+        return X86EMUL_EXCEPTION;
+    }
+
+    done = reverse ? bytes_per_rep + offset : PAGE_SIZE - offset;
+    todo = *reps * bytes_per_rep;
+    for ( i = 1; done < todo; i++ )
+    {
+        /* Get the next PFN in the range. */
+        addr += reverse ? -PAGE_SIZE : PAGE_SIZE;
+        npfn = paging_gva_to_gfn(curr, addr, &pfec);
+
+        /* Is it contiguous with the preceding PFNs? If not then we're done. */
+        if ( (npfn == gfn_x(INVALID_GFN)) ||
+             (npfn != (pfn + (reverse ? -i : i))) )
+        {
+            if ( pfec & (PFEC_page_paged | PFEC_page_shared) )
+                return X86EMUL_RETRY;
+            done /= bytes_per_rep;
+            if ( done == 0 )
+            {
+                ASSERT(!reverse);
+                if ( npfn != gfn_x(INVALID_GFN) )
+                    return X86EMUL_UNHANDLEABLE;
+                *reps = 0;
+                x86_emul_pagefault(pfec, addr & PAGE_MASK, &hvmemul_ctxt->ctxt);
+                return X86EMUL_EXCEPTION;
+            }
+            *reps = done;
+            break;
+        }
+
+        done += PAGE_SIZE;
+    }
+
+    *paddr = ((paddr_t)pfn << PAGE_SHIFT) | offset;
+    return X86EMUL_OKAY;
+}
+
 /*
  * Map the frame(s) covering an individual linear access, for writeable
  * access.  May return NULL for MMIO, or ERR_PTR(~X86EMUL_*) for other errors
@@ -692,97 +781,7 @@ static void hvmemul_unmap_linear_addr(
         *mfn++ = _mfn(0);
     }
 #endif
-}
-
-/*
- * Convert addr from linear to physical form, valid over the range
- * [addr, addr + *reps * bytes_per_rep]. *reps is adjusted according to
- * the valid computed range. It is always >0 when X86EMUL_OKAY is returned.
- * @pfec indicates the access checks to be performed during page-table walks.
- */
-static int hvmemul_linear_to_phys(
-    unsigned long addr,
-    paddr_t *paddr,
-    unsigned int bytes_per_rep,
-    unsigned long *reps,
-    uint32_t pfec,
-    struct hvm_emulate_ctxt *hvmemul_ctxt)
-{
-    struct vcpu *curr = current;
-    unsigned long pfn, npfn, done, todo, i, offset = addr & ~PAGE_MASK;
-    int reverse;
-
-    /*
-     * Clip repetitions to a sensible maximum. This avoids extensive looping in
-     * this function while still amortising the cost of I/O trap-and-emulate.
-     */
-    *reps = min_t(unsigned long, *reps, 4096);
-
-    /* With no paging it's easy: linear == physical. */
-    if ( !(curr->arch.hvm.guest_cr[0] & X86_CR0_PG) )
-    {
-        *paddr = addr;
-        return X86EMUL_OKAY;
-    }
-
-    /* Reverse mode if this is a backwards multi-iteration string operation. */
-    reverse = (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (*reps > 1);
-
-    if ( reverse && ((PAGE_SIZE - offset) < bytes_per_rep) )
-    {
-        /* Do page-straddling first iteration forwards via recursion. */
-        paddr_t _paddr;
-        unsigned long one_rep = 1;
-        int rc = hvmemul_linear_to_phys(
-            addr, &_paddr, bytes_per_rep, &one_rep, pfec, hvmemul_ctxt);
-        if ( rc != X86EMUL_OKAY )
-            return rc;
-        pfn = _paddr >> PAGE_SHIFT;
-    }
-    else if ( (pfn = paging_gva_to_gfn(curr, addr, &pfec)) == gfn_x(INVALID_GFN) )
-    {
-        if ( pfec & (PFEC_page_paged | PFEC_page_shared) )
-            return X86EMUL_RETRY;
-        *reps = 0;
-        x86_emul_pagefault(pfec, addr, &hvmemul_ctxt->ctxt);
-        return X86EMUL_EXCEPTION;
-    }
-
-    done = reverse ? bytes_per_rep + offset : PAGE_SIZE - offset;
-    todo = *reps * bytes_per_rep;
-    for ( i = 1; done < todo; i++ )
-    {
-        /* Get the next PFN in the range. */
-        addr += reverse ? -PAGE_SIZE : PAGE_SIZE;
-        npfn = paging_gva_to_gfn(curr, addr, &pfec);
-
-        /* Is it contiguous with the preceding PFNs? If not then we're done. */
-        if ( (npfn == gfn_x(INVALID_GFN)) ||
-             (npfn != (pfn + (reverse ? -i : i))) )
-        {
-            if ( pfec & (PFEC_page_paged | PFEC_page_shared) )
-                return X86EMUL_RETRY;
-            done /= bytes_per_rep;
-            if ( done == 0 )
-            {
-                ASSERT(!reverse);
-                if ( npfn != gfn_x(INVALID_GFN) )
-                    return X86EMUL_UNHANDLEABLE;
-                *reps = 0;
-                x86_emul_pagefault(pfec, addr & PAGE_MASK, &hvmemul_ctxt->ctxt);
-                return X86EMUL_EXCEPTION;
-            }
-            *reps = done;
-            break;
-        }
-
-        done += PAGE_SIZE;
-    }
-
-    *paddr = ((paddr_t)pfn << PAGE_SHIFT) | offset;
-    return X86EMUL_OKAY;
-}
-    
+}  
 
 static int hvmemul_virtual_to_linear(
     enum x86_segment seg,
-- 
2.17.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH RFC v3 2/2] x86/emulate: Send vm_event from emulate
  2019-02-06 12:53 [PATCH RFC v3 1/2] x86/emulate: Move hvmemul_linear_to_phys Alexandru Stefan ISAILA
@ 2019-02-06 12:53 ` Alexandru Stefan ISAILA
  2019-04-08 15:32     ` [Xen-devel] " Jan Beulich
  0 siblings, 1 reply; 12+ messages in thread
From: Alexandru Stefan ISAILA @ 2019-02-06 12:53 UTC (permalink / raw)
  To: xen-devel
  Cc: tamas, wei.liu2, rcojocaru, george.dunlap, andrew.cooper3,
	paul.durrant, jbeulich, Alexandru Stefan ISAILA, roger.pau

This patch aims to have mem access vm events sent from the emulator.
This is useful in the case of page-walks that have to emulate
instructions in access denied pages.

We use hvmemul_map_linear_addr() ro intercept r/w access and
hvmemul_insn_fetch() to intercept exec access.

First we try to send a vm event and if the event is sent then emulation
returns X86EMUL_ACCESS_EXCEPTION. If the event is not sent then the
emulation goes on as expected.

Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>

---
Changes since V2:
	- Join "if" statements where possible
	- Correct indentation where needed
	- Remove rc initialization.
---
 xen/arch/x86/hvm/emulate.c             | 106 ++++++++++++++++++++++++-
 xen/arch/x86/hvm/vm_event.c            |   2 +-
 xen/arch/x86/mm/mem_access.c           |   3 +-
 xen/arch/x86/x86_emulate/x86_emulate.h |   1 +
 xen/include/asm-x86/hvm/emulate.h      |   4 +-
 5 files changed, 109 insertions(+), 7 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index a766eecc8e..e4acbdf271 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -15,6 +15,7 @@
 #include <xen/paging.h>
 #include <xen/trace.h>
 #include <xen/vm_event.h>
+#include <xen/monitor.h>
 #include <asm/event.h>
 #include <asm/i387.h>
 #include <asm/xstate.h>
@@ -26,6 +27,7 @@
 #include <asm/hvm/support.h>
 #include <asm/hvm/svm/svm.h>
 #include <asm/vm_event.h>
+#include <asm/altp2m.h>
 
 static void hvmtrace_io_assist(const ioreq_t *p)
 {
@@ -530,6 +532,55 @@ static int hvmemul_do_mmio_addr(paddr_t mmio_gpa,
     return hvmemul_do_io_addr(1, mmio_gpa, reps, size, dir, df, ram_gpa);
 }
 
+static bool hvmemul_send_vm_event(paddr_t gpa, unsigned long gla, gfn_t gfn,
+                                  uint32_t pfec, struct hvm_emulate_ctxt *ctxt)
+{
+    xenmem_access_t access;
+    vm_event_request_t req = {};
+
+    if ( !ctxt->send_event || !pfec ||
+        p2m_get_mem_access(current->domain, gfn, &access,
+                           altp2m_vcpu_idx(current)) != 0 )
+        return false;
+
+    switch ( access )
+    {
+    case XENMEM_access_x:
+    case XENMEM_access_rx:
+        if ( pfec & PFEC_write_access )
+            req.u.mem_access.flags = MEM_ACCESS_R | MEM_ACCESS_W;
+        break;
+
+    case XENMEM_access_w:
+    case XENMEM_access_rw:
+        if ( pfec & PFEC_insn_fetch )
+            req.u.mem_access.flags = MEM_ACCESS_X;
+        break;
+
+    case XENMEM_access_r:
+    case XENMEM_access_n:
+        if ( pfec & PFEC_write_access )
+            req.u.mem_access.flags |= MEM_ACCESS_R | MEM_ACCESS_W;
+        if ( pfec & PFEC_insn_fetch )
+            req.u.mem_access.flags |= MEM_ACCESS_X;
+        break;
+
+    default:
+        return false;
+    }
+
+    if ( !req.u.mem_access.flags )
+        return false; /* No violation. */
+
+    req.reason = VM_EVENT_REASON_MEM_ACCESS;
+    req.u.mem_access.gfn = gfn_x(gfn);
+    req.u.mem_access.flags |= MEM_ACCESS_FAULT_WITH_GLA | MEM_ACCESS_GLA_VALID;
+    req.u.mem_access.gla = gla;
+    req.u.mem_access.offset = gpa & ((1 << PAGE_SHIFT) - 1);
+
+    return monitor_traps(current, true, &req) >= 0;
+}
+
 /*
  * Convert addr from linear to physical form, valid over the range
  * [addr, addr + *reps * bytes_per_rep]. *reps is adjusted according to
@@ -636,6 +687,7 @@ static void *hvmemul_map_linear_addr(
     unsigned int nr_frames = ((linear + bytes - !!bytes) >> PAGE_SHIFT) -
         (linear >> PAGE_SHIFT) + 1;
     unsigned int i;
+    gfn_t gfn;
 
     /*
      * mfn points to the next free slot.  All used slots have a page reference
@@ -674,7 +726,7 @@ static void *hvmemul_map_linear_addr(
         ASSERT(mfn_x(*mfn) == 0);
 
         res = hvm_translate_get_page(curr, addr, true, pfec,
-                                     &pfinfo, &page, NULL, &p2mt);
+                                     &pfinfo, &page, &gfn, &p2mt);
 
         switch ( res )
         {
@@ -704,6 +756,23 @@ static void *hvmemul_map_linear_addr(
 
         if ( pfec & PFEC_write_access )
         {
+            unsigned long reps = 1;
+            struct hvm_emulate_ctxt old;
+            int rc = 0;
+            paddr_t gpa;
+
+            old = *hvmemul_ctxt;
+            rc = hvmemul_linear_to_phys(addr, &gpa, bytes, &reps,
+                                        pfec, hvmemul_ctxt);
+            if ( rc == X86EMUL_EXCEPTION )
+                *hvmemul_ctxt = old;
+
+            if ( hvmemul_send_vm_event(gpa, addr, gfn, pfec, hvmemul_ctxt) )
+            {
+                err = ERR_PTR(~X86EMUL_ACCESS_EXCEPTION);
+                goto out;
+            }
+
             if ( p2m_is_discard_write(p2mt) )
             {
                 err = ERR_PTR(~X86EMUL_OKAY);
@@ -1224,7 +1293,35 @@ int hvmemul_insn_fetch(
         container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
     /* Careful, as offset can wrap or truncate WRT insn_buf_eip. */
     uint8_t insn_off = offset - hvmemul_ctxt->insn_buf_eip;
+    paddr_t gpa;
+    uint32_t pfec = PFEC_page_present | PFEC_insn_fetch;
+    unsigned long addr, reps = 1;
+    int rc;
+    struct hvm_emulate_ctxt old;
+
+    rc = hvmemul_virtual_to_linear(seg, offset, bytes, &reps,
+                                   hvm_access_insn_fetch, hvmemul_ctxt, &addr);
+    if ( rc == X86EMUL_EXCEPTION )
+    {
+        x86_emul_reset_event(ctxt);
+        rc = X86EMUL_OKAY;
+    }
+
+    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 )
+        pfec |= PFEC_user_mode;
+
+    old = *hvmemul_ctxt;
+    rc = hvmemul_linear_to_phys(addr, &gpa, bytes, &reps,
+                                pfec, hvmemul_ctxt);
+    if ( rc == X86EMUL_EXCEPTION )
+    {
+        *hvmemul_ctxt = old;
+        rc = X86EMUL_OKAY;
+    }
 
+    if ( gpa && hvmemul_send_vm_event(gpa, addr, gaddr_to_gfn(gpa),
+                                      pfec, hvmemul_ctxt) )
+        return X86EMUL_ACCESS_EXCEPTION;
     /*
      * Fall back if requested bytes are not in the prefetch cache.
      * But always perform the (fake) read when bytes == 0.
@@ -1232,8 +1329,8 @@ int hvmemul_insn_fetch(
     if ( !bytes ||
          unlikely((insn_off + bytes) > hvmemul_ctxt->insn_buf_bytes) )
     {
-        int rc = __hvmemul_read(seg, offset, p_data, bytes,
-                                hvm_access_insn_fetch, hvmemul_ctxt);
+        rc = __hvmemul_read(seg, offset, p_data, bytes,
+                            hvm_access_insn_fetch, hvmemul_ctxt);
 
         if ( rc == X86EMUL_OKAY && bytes )
         {
@@ -2492,12 +2589,13 @@ int hvm_emulate_one_mmio(unsigned long mfn, unsigned long gla)
 }
 
 void hvm_emulate_one_vm_event(enum emul_kind kind, unsigned int trapnr,
-    unsigned int errcode)
+    unsigned int errcode, bool send_event)
 {
     struct hvm_emulate_ctxt ctx = {{ 0 }};
     int rc;
 
     hvm_emulate_init_once(&ctx, NULL, guest_cpu_user_regs());
+    ctx.send_event = send_event;
 
     switch ( kind )
     {
diff --git a/xen/arch/x86/hvm/vm_event.c b/xen/arch/x86/hvm/vm_event.c
index 0df8ab40e6..bdc65da3ed 100644
--- a/xen/arch/x86/hvm/vm_event.c
+++ b/xen/arch/x86/hvm/vm_event.c
@@ -87,7 +87,7 @@ void hvm_vm_event_do_resume(struct vcpu *v)
             kind = EMUL_KIND_SET_CONTEXT_INSN;
 
         hvm_emulate_one_vm_event(kind, TRAP_invalid_op,
-                                 X86_EVENT_NO_EC);
+                                 X86_EVENT_NO_EC, false);
 
         v->arch.vm_event->emulate_flags = 0;
     }
diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c
index 56c06a4fc6..536ad6367b 100644
--- a/xen/arch/x86/mm/mem_access.c
+++ b/xen/arch/x86/mm/mem_access.c
@@ -214,7 +214,8 @@ bool p2m_mem_access_check(paddr_t gpa, unsigned long gla,
          d->arch.monitor.inguest_pagefault_disabled &&
          npfec.kind != npfec_kind_with_gla ) /* don't send a mem_event */
     {
-        hvm_emulate_one_vm_event(EMUL_KIND_NORMAL, TRAP_invalid_op, X86_EVENT_NO_EC);
+        hvm_emulate_one_vm_event(EMUL_KIND_NORMAL, TRAP_invalid_op,
+                                 X86_EVENT_NO_EC, true);
 
         return true;
     }
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.h b/xen/arch/x86/x86_emulate/x86_emulate.h
index 55a9e0ed51..a9829913a4 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.h
+++ b/xen/arch/x86/x86_emulate/x86_emulate.h
@@ -162,6 +162,7 @@ struct x86_emul_fpu_aux {
 #define X86EMUL_UNRECOGNIZED   X86EMUL_UNIMPLEMENTED
  /* (cmpxchg accessor): CMPXCHG failed. */
 #define X86EMUL_CMPXCHG_FAILED 7
+#define X86EMUL_ACCESS_EXCEPTION 8
 
 /* FPU sub-types which may be requested via ->get_fpu(). */
 enum x86_emulate_fpu_type {
diff --git a/xen/include/asm-x86/hvm/emulate.h b/xen/include/asm-x86/hvm/emulate.h
index 26a01e83a4..721e175b04 100644
--- a/xen/include/asm-x86/hvm/emulate.h
+++ b/xen/include/asm-x86/hvm/emulate.h
@@ -47,6 +47,7 @@ struct hvm_emulate_ctxt {
     uint32_t intr_shadow;
 
     bool_t set_context;
+    bool send_event;
 };
 
 enum emul_kind {
@@ -63,7 +64,8 @@ int hvm_emulate_one(
     struct hvm_emulate_ctxt *hvmemul_ctxt);
 void hvm_emulate_one_vm_event(enum emul_kind kind,
     unsigned int trapnr,
-    unsigned int errcode);
+    unsigned int errcode,
+    bool send_event);
 /* Must be called once to set up hvmemul state. */
 void hvm_emulate_init_once(
     struct hvm_emulate_ctxt *hvmemul_ctxt,
-- 
2.17.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH RFC v3 2/2] x86/emulate: Send vm_event from emulate
@ 2019-04-08 15:32     ` Jan Beulich
  0 siblings, 0 replies; 12+ messages in thread
From: Jan Beulich @ 2019-04-08 15:32 UTC (permalink / raw)
  To: aisaila
  Cc: Tamas K Lengyel, Wei Liu, Razvan Cojocaru, George Dunlap,
	Andrew Cooper, Paul Durrant, xen-devel, Roger Pau Monne

>>> On 06.02.19 at 13:53, <aisaila@bitdefender.com> wrote:
> This patch aims to have mem access vm events sent from the emulator.
> This is useful in the case of page-walks that have to emulate
> instructions in access denied pages.

I'm afraid that I can't make sense of this: How could "page-walks
have to emulate instructions"? Instructions can (and actually will)
cause page walks to occur. And page walks hitting access denied
pages may trigger emulation of the insn having initiated the walk.

> We use hvmemul_map_linear_addr() ro intercept r/w access and
> hvmemul_insn_fetch() to intercept exec access.
> 
> First we try to send a vm event and if the event is sent then emulation
> returns X86EMUL_ACCESS_EXCEPTION. If the event is not sent then the
> emulation goes on as expected.

The meaning of this new emulator return value needs explanation.
I notice its #define is also not accompanied by any comment. And
any addition of a new emulator return code should come with a
discussion of how existing users are affected. I'm not going to
exclude that indeed no other adjustments are necessary, but that's
far from obvious. You may recall that it had taken several iterations
to get the addition of X86EMUL_UNIMPLEMENTED right throughout
the code base.

Overall I guess I'm simply not deeply enough into vm-event to
be able to judge whether / how all of this makes sense.

> @@ -530,6 +532,55 @@ static int hvmemul_do_mmio_addr(paddr_t mmio_gpa,
>      return hvmemul_do_io_addr(1, mmio_gpa, reps, size, dir, df, ram_gpa);
>  }
>  
> +static bool hvmemul_send_vm_event(paddr_t gpa, unsigned long gla, gfn_t gfn,
> +                                  uint32_t pfec, struct hvm_emulate_ctxt *ctxt)

Why both gpa and gfn?

> @@ -704,6 +756,23 @@ static void *hvmemul_map_linear_addr(
>  
>          if ( pfec & PFEC_write_access )
>          {
> +            unsigned long reps = 1;
> +            struct hvm_emulate_ctxt old;
> +            int rc = 0;
> +            paddr_t gpa;
> +
> +            old = *hvmemul_ctxt;
> +            rc = hvmemul_linear_to_phys(addr, &gpa, bytes, &reps,
> +                                        pfec, hvmemul_ctxt);
> +            if ( rc == X86EMUL_EXCEPTION )
> +                *hvmemul_ctxt = old;

Something like this - if it is _really_ needed - has to be accompanied
by a justification. I find it questionable though that you really should
need to save/restore the entire context structure.

But I also don't understand why you need to re-do the translation
here: Just above of this hunk you've altered the call to
hvm_translate_get_page() to give you the gfn. And if there was
a reason to re-do it for the sending of the event, then it should
be restricted to that cases, i.e. un-monitored VMs should not be
impacted.

> @@ -1224,7 +1293,35 @@ int hvmemul_insn_fetch(
>          container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
>      /* Careful, as offset can wrap or truncate WRT insn_buf_eip. */
>      uint8_t insn_off = offset - hvmemul_ctxt->insn_buf_eip;
> +    paddr_t gpa;
> +    uint32_t pfec = PFEC_page_present | PFEC_insn_fetch;
> +    unsigned long addr, reps = 1;
> +    int rc;
> +    struct hvm_emulate_ctxt old;
> +
> +    rc = hvmemul_virtual_to_linear(seg, offset, bytes, &reps,
> +                                   hvm_access_insn_fetch, hvmemul_ctxt, &addr);

The double translation is as problematic here, but what's worse:

> +    if ( rc == X86EMUL_EXCEPTION )
> +    {
> +        x86_emul_reset_event(ctxt);
> +        rc = X86EMUL_OKAY;
> +    }

You zap an error here, but ...

> +    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 )
> +        pfec |= PFEC_user_mode;
> +
> +    old = *hvmemul_ctxt;
> +    rc = hvmemul_linear_to_phys(addr, &gpa, bytes, &reps,
> +                                pfec, hvmemul_ctxt);

... you happily use "addr" here anyway.

> +    if ( rc == X86EMUL_EXCEPTION )
> +    {
> +        *hvmemul_ctxt = old;
> +        rc = X86EMUL_OKAY;
> +    }
>  
> +    if ( gpa && hvmemul_send_vm_event(gpa, addr, gaddr_to_gfn(gpa),
> +                                      pfec, hvmemul_ctxt) )
> +        return X86EMUL_ACCESS_EXCEPTION;

Is there anything rendering gpa being zero an invalid / impossible
situation?

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [Xen-devel] [PATCH RFC v3 2/2] x86/emulate: Send vm_event from emulate
@ 2019-04-08 15:32     ` Jan Beulich
  0 siblings, 0 replies; 12+ messages in thread
From: Jan Beulich @ 2019-04-08 15:32 UTC (permalink / raw)
  To: aisaila
  Cc: Tamas K Lengyel, Wei Liu, Razvan Cojocaru, George Dunlap,
	Andrew Cooper, Paul Durrant, xen-devel, Roger Pau Monne

>>> On 06.02.19 at 13:53, <aisaila@bitdefender.com> wrote:
> This patch aims to have mem access vm events sent from the emulator.
> This is useful in the case of page-walks that have to emulate
> instructions in access denied pages.

I'm afraid that I can't make sense of this: How could "page-walks
have to emulate instructions"? Instructions can (and actually will)
cause page walks to occur. And page walks hitting access denied
pages may trigger emulation of the insn having initiated the walk.

> We use hvmemul_map_linear_addr() ro intercept r/w access and
> hvmemul_insn_fetch() to intercept exec access.
> 
> First we try to send a vm event and if the event is sent then emulation
> returns X86EMUL_ACCESS_EXCEPTION. If the event is not sent then the
> emulation goes on as expected.

The meaning of this new emulator return value needs explanation.
I notice its #define is also not accompanied by any comment. And
any addition of a new emulator return code should come with a
discussion of how existing users are affected. I'm not going to
exclude that indeed no other adjustments are necessary, but that's
far from obvious. You may recall that it had taken several iterations
to get the addition of X86EMUL_UNIMPLEMENTED right throughout
the code base.

Overall I guess I'm simply not deeply enough into vm-event to
be able to judge whether / how all of this makes sense.

> @@ -530,6 +532,55 @@ static int hvmemul_do_mmio_addr(paddr_t mmio_gpa,
>      return hvmemul_do_io_addr(1, mmio_gpa, reps, size, dir, df, ram_gpa);
>  }
>  
> +static bool hvmemul_send_vm_event(paddr_t gpa, unsigned long gla, gfn_t gfn,
> +                                  uint32_t pfec, struct hvm_emulate_ctxt *ctxt)

Why both gpa and gfn?

> @@ -704,6 +756,23 @@ static void *hvmemul_map_linear_addr(
>  
>          if ( pfec & PFEC_write_access )
>          {
> +            unsigned long reps = 1;
> +            struct hvm_emulate_ctxt old;
> +            int rc = 0;
> +            paddr_t gpa;
> +
> +            old = *hvmemul_ctxt;
> +            rc = hvmemul_linear_to_phys(addr, &gpa, bytes, &reps,
> +                                        pfec, hvmemul_ctxt);
> +            if ( rc == X86EMUL_EXCEPTION )
> +                *hvmemul_ctxt = old;

Something like this - if it is _really_ needed - has to be accompanied
by a justification. I find it questionable though that you really should
need to save/restore the entire context structure.

But I also don't understand why you need to re-do the translation
here: Just above of this hunk you've altered the call to
hvm_translate_get_page() to give you the gfn. And if there was
a reason to re-do it for the sending of the event, then it should
be restricted to that cases, i.e. un-monitored VMs should not be
impacted.

> @@ -1224,7 +1293,35 @@ int hvmemul_insn_fetch(
>          container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
>      /* Careful, as offset can wrap or truncate WRT insn_buf_eip. */
>      uint8_t insn_off = offset - hvmemul_ctxt->insn_buf_eip;
> +    paddr_t gpa;
> +    uint32_t pfec = PFEC_page_present | PFEC_insn_fetch;
> +    unsigned long addr, reps = 1;
> +    int rc;
> +    struct hvm_emulate_ctxt old;
> +
> +    rc = hvmemul_virtual_to_linear(seg, offset, bytes, &reps,
> +                                   hvm_access_insn_fetch, hvmemul_ctxt, &addr);

The double translation is as problematic here, but what's worse:

> +    if ( rc == X86EMUL_EXCEPTION )
> +    {
> +        x86_emul_reset_event(ctxt);
> +        rc = X86EMUL_OKAY;
> +    }

You zap an error here, but ...

> +    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 )
> +        pfec |= PFEC_user_mode;
> +
> +    old = *hvmemul_ctxt;
> +    rc = hvmemul_linear_to_phys(addr, &gpa, bytes, &reps,
> +                                pfec, hvmemul_ctxt);

... you happily use "addr" here anyway.

> +    if ( rc == X86EMUL_EXCEPTION )
> +    {
> +        *hvmemul_ctxt = old;
> +        rc = X86EMUL_OKAY;
> +    }
>  
> +    if ( gpa && hvmemul_send_vm_event(gpa, addr, gaddr_to_gfn(gpa),
> +                                      pfec, hvmemul_ctxt) )
> +        return X86EMUL_ACCESS_EXCEPTION;

Is there anything rendering gpa being zero an invalid / impossible
situation?

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH RFC v3 2/2] x86/emulate: Send vm_event from emulate
@ 2019-05-07 11:45       ` Alexandru Stefan ISAILA
  0 siblings, 0 replies; 12+ messages in thread
From: Alexandru Stefan ISAILA @ 2019-05-07 11:45 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Tamas K Lengyel, Wei Liu, Razvan Cojocaru, George Dunlap,
	Andrew Cooper, Paul Durrant, xen-devel, Roger Pau Monne



On 08.04.2019 18:32, Jan Beulich wrote:
>>>> On 06.02.19 at 13:53, <aisaila@bitdefender.com> wrote:
>> This patch aims to have mem access vm events sent from the emulator.
>> This is useful in the case of page-walks that have to emulate
>> instructions in access denied pages.
> 
> I'm afraid that I can't make sense of this: How could "page-walks
> have to emulate instructions"? Instructions can (and actually will)
> cause page walks to occur. And page walks hitting access denied
> pages may trigger emulation of the insn having initiated the walk.

I aimed for the idea that an emulated instruction could cause a 
page-walk that in the end hits protected pages. I will correct that part 
of the comment.

> 
>> We use hvmemul_map_linear_addr() ro intercept r/w access and
>> hvmemul_insn_fetch() to intercept exec access.
>>
>> First we try to send a vm event and if the event is sent then emulation
>> returns X86EMUL_ACCESS_EXCEPTION. If the event is not sent then the
>> emulation goes on as expected.
> 
> The meaning of this new emulator return value needs explanation.
> I notice its #define is also not accompanied by any comment. And
> any addition of a new emulator return code should come with a
> discussion of how existing users are affected. I'm not going to
> exclude that indeed no other adjustments are necessary, but that's
> far from obvious. You may recall that it had taken several iterations
> to get the addition of X86EMUL_UNIMPLEMENTED right throughout
> the code base.

This new feature is activated by "bool send_event" when calling 
hvm_emulate_one_vm_event(). Events will be sent in specific moments and 
only if it's an intention for that.

> 
> Overall I guess I'm simply not deeply enough into vm-event to
> be able to judge whether / how all of this makes sense.
> 
>> @@ -530,6 +532,55 @@ static int hvmemul_do_mmio_addr(paddr_t mmio_gpa,
>>       return hvmemul_do_io_addr(1, mmio_gpa, reps, size, dir, df, ram_gpa);
>>   }
>>   
>> +static bool hvmemul_send_vm_event(paddr_t gpa, unsigned long gla, gfn_t gfn,
>> +                                  uint32_t pfec, struct hvm_emulate_ctxt *ctxt)
> 
> Why both gpa and gfn?

If the gpa can be calculated from gfn then the code will be simplified.
Is this what you had in mind?

gpa = gfn_to_gaddr(gfn);

Alex
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [Xen-devel] [PATCH RFC v3 2/2] x86/emulate: Send vm_event from emulate
@ 2019-05-07 11:45       ` Alexandru Stefan ISAILA
  0 siblings, 0 replies; 12+ messages in thread
From: Alexandru Stefan ISAILA @ 2019-05-07 11:45 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Tamas K Lengyel, Wei Liu, Razvan Cojocaru, George Dunlap,
	Andrew Cooper, Paul Durrant, xen-devel, Roger Pau Monne



On 08.04.2019 18:32, Jan Beulich wrote:
>>>> On 06.02.19 at 13:53, <aisaila@bitdefender.com> wrote:
>> This patch aims to have mem access vm events sent from the emulator.
>> This is useful in the case of page-walks that have to emulate
>> instructions in access denied pages.
> 
> I'm afraid that I can't make sense of this: How could "page-walks
> have to emulate instructions"? Instructions can (and actually will)
> cause page walks to occur. And page walks hitting access denied
> pages may trigger emulation of the insn having initiated the walk.

I aimed for the idea that an emulated instruction could cause a 
page-walk that in the end hits protected pages. I will correct that part 
of the comment.

> 
>> We use hvmemul_map_linear_addr() ro intercept r/w access and
>> hvmemul_insn_fetch() to intercept exec access.
>>
>> First we try to send a vm event and if the event is sent then emulation
>> returns X86EMUL_ACCESS_EXCEPTION. If the event is not sent then the
>> emulation goes on as expected.
> 
> The meaning of this new emulator return value needs explanation.
> I notice its #define is also not accompanied by any comment. And
> any addition of a new emulator return code should come with a
> discussion of how existing users are affected. I'm not going to
> exclude that indeed no other adjustments are necessary, but that's
> far from obvious. You may recall that it had taken several iterations
> to get the addition of X86EMUL_UNIMPLEMENTED right throughout
> the code base.

This new feature is activated by "bool send_event" when calling 
hvm_emulate_one_vm_event(). Events will be sent in specific moments and 
only if it's an intention for that.

> 
> Overall I guess I'm simply not deeply enough into vm-event to
> be able to judge whether / how all of this makes sense.
> 
>> @@ -530,6 +532,55 @@ static int hvmemul_do_mmio_addr(paddr_t mmio_gpa,
>>       return hvmemul_do_io_addr(1, mmio_gpa, reps, size, dir, df, ram_gpa);
>>   }
>>   
>> +static bool hvmemul_send_vm_event(paddr_t gpa, unsigned long gla, gfn_t gfn,
>> +                                  uint32_t pfec, struct hvm_emulate_ctxt *ctxt)
> 
> Why both gpa and gfn?

If the gpa can be calculated from gfn then the code will be simplified.
Is this what you had in mind?

gpa = gfn_to_gaddr(gfn);

Alex
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH RFC v3 2/2] x86/emulate: Send vm_event from emulate
@ 2019-05-07 12:03         ` Jan Beulich
  0 siblings, 0 replies; 12+ messages in thread
From: Jan Beulich @ 2019-05-07 12:03 UTC (permalink / raw)
  To: aisaila
  Cc: Tamas K Lengyel, Wei Liu, Razvan Cojocaru, George Dunlap,
	Andrew Cooper, Paul Durrant, xen-devel, Roger Pau Monne

>>> On 07.05.19 at 13:45, <aisaila@bitdefender.com> wrote:
> On 08.04.2019 18:32, Jan Beulich wrote:
>>>>> On 06.02.19 at 13:53, <aisaila@bitdefender.com> wrote:
>>> First we try to send a vm event and if the event is sent then emulation
>>> returns X86EMUL_ACCESS_EXCEPTION. If the event is not sent then the
>>> emulation goes on as expected.
>> 
>> The meaning of this new emulator return value needs explanation.
>> I notice its #define is also not accompanied by any comment. And
>> any addition of a new emulator return code should come with a
>> discussion of how existing users are affected. I'm not going to
>> exclude that indeed no other adjustments are necessary, but that's
>> far from obvious. You may recall that it had taken several iterations
>> to get the addition of X86EMUL_UNIMPLEMENTED right throughout
>> the code base.
> 
> This new feature is activated by "bool send_event" when calling 
> hvm_emulate_one_vm_event(). Events will be sent in specific moments and 
> only if it's an intention for that.

That's understood. But the various emulation code paths _all_
need to be aware of this potential new return value nevertheless.
Even if some may not be reachable today in the specific case of
the new feature being active, they may become reachable eventually,
and no-one would notice the omission at that point.

>>> @@ -530,6 +532,55 @@ static int hvmemul_do_mmio_addr(paddr_t mmio_gpa,
>>>       return hvmemul_do_io_addr(1, mmio_gpa, reps, size, dir, df, ram_gpa);
>>>   }
>>>   
>>> +static bool hvmemul_send_vm_event(paddr_t gpa, unsigned long gla, gfn_t gfn,
>>> +                                  uint32_t pfec, struct hvm_emulate_ctxt *ctxt)
>> 
>> Why both gpa and gfn?
> 
> If the gpa can be calculated from gfn then the code will be simplified.
> Is this what you had in mind?
> 
> gpa = gfn_to_gaddr(gfn);

The other way around actually, as the calculation you suggest
discards bits (the low 12 ones).

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [Xen-devel] [PATCH RFC v3 2/2] x86/emulate: Send vm_event from emulate
@ 2019-05-07 12:03         ` Jan Beulich
  0 siblings, 0 replies; 12+ messages in thread
From: Jan Beulich @ 2019-05-07 12:03 UTC (permalink / raw)
  To: aisaila
  Cc: Tamas K Lengyel, Wei Liu, Razvan Cojocaru, George Dunlap,
	Andrew Cooper, Paul Durrant, xen-devel, Roger Pau Monne

>>> On 07.05.19 at 13:45, <aisaila@bitdefender.com> wrote:
> On 08.04.2019 18:32, Jan Beulich wrote:
>>>>> On 06.02.19 at 13:53, <aisaila@bitdefender.com> wrote:
>>> First we try to send a vm event and if the event is sent then emulation
>>> returns X86EMUL_ACCESS_EXCEPTION. If the event is not sent then the
>>> emulation goes on as expected.
>> 
>> The meaning of this new emulator return value needs explanation.
>> I notice its #define is also not accompanied by any comment. And
>> any addition of a new emulator return code should come with a
>> discussion of how existing users are affected. I'm not going to
>> exclude that indeed no other adjustments are necessary, but that's
>> far from obvious. You may recall that it had taken several iterations
>> to get the addition of X86EMUL_UNIMPLEMENTED right throughout
>> the code base.
> 
> This new feature is activated by "bool send_event" when calling 
> hvm_emulate_one_vm_event(). Events will be sent in specific moments and 
> only if it's an intention for that.

That's understood. But the various emulation code paths _all_
need to be aware of this potential new return value nevertheless.
Even if some may not be reachable today in the specific case of
the new feature being active, they may become reachable eventually,
and no-one would notice the omission at that point.

>>> @@ -530,6 +532,55 @@ static int hvmemul_do_mmio_addr(paddr_t mmio_gpa,
>>>       return hvmemul_do_io_addr(1, mmio_gpa, reps, size, dir, df, ram_gpa);
>>>   }
>>>   
>>> +static bool hvmemul_send_vm_event(paddr_t gpa, unsigned long gla, gfn_t gfn,
>>> +                                  uint32_t pfec, struct hvm_emulate_ctxt *ctxt)
>> 
>> Why both gpa and gfn?
> 
> If the gpa can be calculated from gfn then the code will be simplified.
> Is this what you had in mind?
> 
> gpa = gfn_to_gaddr(gfn);

The other way around actually, as the calculation you suggest
discards bits (the low 12 ones).

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH RFC v3 2/2] x86/emulate: Send vm_event from emulate
@ 2019-05-08 15:42       ` Alexandru Stefan ISAILA
  0 siblings, 0 replies; 12+ messages in thread
From: Alexandru Stefan ISAILA @ 2019-05-08 15:42 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Tamas K Lengyel, Wei Liu, Razvan Cojocaru, George Dunlap,
	Andrew Cooper, Paul Durrant, xen-devel, Roger Pau Monne


>> @@ -530,6 +532,55 @@ static int hvmemul_do_mmio_addr(paddr_t mmio_gpa,
>>       return hvmemul_do_io_addr(1, mmio_gpa, reps, size, dir, df, ram_gpa);
>>   }
>>   
>> +static bool hvmemul_send_vm_event(paddr_t gpa, unsigned long gla, gfn_t gfn,
>> +                                  uint32_t pfec, struct hvm_emulate_ctxt *ctxt)
> 
> Why both gpa and gfn?
> 
>> @@ -704,6 +756,23 @@ static void *hvmemul_map_linear_addr(
>>   
>>           if ( pfec & PFEC_write_access )
>>           {
>> +            unsigned long reps = 1;
>> +            struct hvm_emulate_ctxt old;
>> +            int rc = 0;
>> +            paddr_t gpa;
>> +
>> +            old = *hvmemul_ctxt;
>> +            rc = hvmemul_linear_to_phys(addr, &gpa, bytes, &reps,
>> +                                        pfec, hvmemul_ctxt);
>> +            if ( rc == X86EMUL_EXCEPTION )
>> +                *hvmemul_ctxt = old;
> 
> Something like this - if it is _really_ needed - has to be accompanied
> by a justification. I find it questionable though that you really should
> need to save/restore the entire context structure.
> 
> But I also don't understand why you need to re-do the translation
> here: Just above of this hunk you've altered the call to
> hvm_translate_get_page() to give you the gfn. And if there was
> a reason to re-do it for the sending of the event, then it should
> be restricted to that cases, i.e. un-monitored VMs should not be
> impacted.

I will refactor the code here so as not to have the 
hvmemul_linear_to_phys() here but rather in the send_event function.

> 
>> @@ -1224,7 +1293,35 @@ int hvmemul_insn_fetch(
>>           container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
>>       /* Careful, as offset can wrap or truncate WRT insn_buf_eip. */
>>       uint8_t insn_off = offset - hvmemul_ctxt->insn_buf_eip;
>> +    paddr_t gpa;
>> +    uint32_t pfec = PFEC_page_present | PFEC_insn_fetch;
>> +    unsigned long addr, reps = 1;
>> +    int rc;
>> +    struct hvm_emulate_ctxt old;
>> +
>> +    rc = hvmemul_virtual_to_linear(seg, offset, bytes, &reps,
>> +                                   hvm_access_insn_fetch, hvmemul_ctxt, &addr);
> 
> The double translation is as problematic here, but what's worse:
> 
>> +    if ( rc == X86EMUL_EXCEPTION )
>> +    {
>> +        x86_emul_reset_event(ctxt);
>> +        rc = X86EMUL_OKAY;
>> +    }
> 
> You zap an error here, but ...

In this case hvmemul_virtual_to_linear() can call
x86_emul_hw_exception() and then signals the caller to inject the 
exception. I don;t want to inject a non-user segment here and so I leave 
the ctxt as it was before.

> 
>> +    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 )
>> +        pfec |= PFEC_user_mode;
>> +
>> +    old = *hvmemul_ctxt;
>> +    rc = hvmemul_linear_to_phys(addr, &gpa, bytes, &reps,
>> +                                pfec, hvmemul_ctxt);
> 
> ... you happily use "addr" here anyway.

The address here is ok to be used or maybe a if ( rc != X86EMUL_OKAY ) 
check can be put after getting the address.

> 
>> +    if ( rc == X86EMUL_EXCEPTION )
>> +    {
>> +        *hvmemul_ctxt = old;
>> +        rc = X86EMUL_OKAY;
>> +    }
>>   
>> +    if ( gpa && hvmemul_send_vm_event(gpa, addr, gaddr_to_gfn(gpa),
>> +                                      pfec, hvmemul_ctxt) )
>> +        return X86EMUL_ACCESS_EXCEPTION;
> 
> Is there anything rendering gpa being zero an invalid / impossible
> situation?

In tests I came across gpa == 0 so that is why the check was there I 
will have to test is this can be solved with the X86EMUL_OKAY check from 
above.

Alex
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [Xen-devel] [PATCH RFC v3 2/2] x86/emulate: Send vm_event from emulate
@ 2019-05-08 15:42       ` Alexandru Stefan ISAILA
  0 siblings, 0 replies; 12+ messages in thread
From: Alexandru Stefan ISAILA @ 2019-05-08 15:42 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Tamas K Lengyel, Wei Liu, Razvan Cojocaru, George Dunlap,
	Andrew Cooper, Paul Durrant, xen-devel, Roger Pau Monne


>> @@ -530,6 +532,55 @@ static int hvmemul_do_mmio_addr(paddr_t mmio_gpa,
>>       return hvmemul_do_io_addr(1, mmio_gpa, reps, size, dir, df, ram_gpa);
>>   }
>>   
>> +static bool hvmemul_send_vm_event(paddr_t gpa, unsigned long gla, gfn_t gfn,
>> +                                  uint32_t pfec, struct hvm_emulate_ctxt *ctxt)
> 
> Why both gpa and gfn?
> 
>> @@ -704,6 +756,23 @@ static void *hvmemul_map_linear_addr(
>>   
>>           if ( pfec & PFEC_write_access )
>>           {
>> +            unsigned long reps = 1;
>> +            struct hvm_emulate_ctxt old;
>> +            int rc = 0;
>> +            paddr_t gpa;
>> +
>> +            old = *hvmemul_ctxt;
>> +            rc = hvmemul_linear_to_phys(addr, &gpa, bytes, &reps,
>> +                                        pfec, hvmemul_ctxt);
>> +            if ( rc == X86EMUL_EXCEPTION )
>> +                *hvmemul_ctxt = old;
> 
> Something like this - if it is _really_ needed - has to be accompanied
> by a justification. I find it questionable though that you really should
> need to save/restore the entire context structure.
> 
> But I also don't understand why you need to re-do the translation
> here: Just above of this hunk you've altered the call to
> hvm_translate_get_page() to give you the gfn. And if there was
> a reason to re-do it for the sending of the event, then it should
> be restricted to that cases, i.e. un-monitored VMs should not be
> impacted.

I will refactor the code here so as not to have the 
hvmemul_linear_to_phys() here but rather in the send_event function.

> 
>> @@ -1224,7 +1293,35 @@ int hvmemul_insn_fetch(
>>           container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
>>       /* Careful, as offset can wrap or truncate WRT insn_buf_eip. */
>>       uint8_t insn_off = offset - hvmemul_ctxt->insn_buf_eip;
>> +    paddr_t gpa;
>> +    uint32_t pfec = PFEC_page_present | PFEC_insn_fetch;
>> +    unsigned long addr, reps = 1;
>> +    int rc;
>> +    struct hvm_emulate_ctxt old;
>> +
>> +    rc = hvmemul_virtual_to_linear(seg, offset, bytes, &reps,
>> +                                   hvm_access_insn_fetch, hvmemul_ctxt, &addr);
> 
> The double translation is as problematic here, but what's worse:
> 
>> +    if ( rc == X86EMUL_EXCEPTION )
>> +    {
>> +        x86_emul_reset_event(ctxt);
>> +        rc = X86EMUL_OKAY;
>> +    }
> 
> You zap an error here, but ...

In this case hvmemul_virtual_to_linear() can call
x86_emul_hw_exception() and then signals the caller to inject the 
exception. I don;t want to inject a non-user segment here and so I leave 
the ctxt as it was before.

> 
>> +    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 )
>> +        pfec |= PFEC_user_mode;
>> +
>> +    old = *hvmemul_ctxt;
>> +    rc = hvmemul_linear_to_phys(addr, &gpa, bytes, &reps,
>> +                                pfec, hvmemul_ctxt);
> 
> ... you happily use "addr" here anyway.

The address here is ok to be used or maybe a if ( rc != X86EMUL_OKAY ) 
check can be put after getting the address.

> 
>> +    if ( rc == X86EMUL_EXCEPTION )
>> +    {
>> +        *hvmemul_ctxt = old;
>> +        rc = X86EMUL_OKAY;
>> +    }
>>   
>> +    if ( gpa && hvmemul_send_vm_event(gpa, addr, gaddr_to_gfn(gpa),
>> +                                      pfec, hvmemul_ctxt) )
>> +        return X86EMUL_ACCESS_EXCEPTION;
> 
> Is there anything rendering gpa being zero an invalid / impossible
> situation?

In tests I came across gpa == 0 so that is why the check was there I 
will have to test is this can be solved with the X86EMUL_OKAY check from 
above.

Alex
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH RFC v3 2/2] x86/emulate: Send vm_event from emulate
@ 2019-05-09  9:06         ` Jan Beulich
  0 siblings, 0 replies; 12+ messages in thread
From: Jan Beulich @ 2019-05-09  9:06 UTC (permalink / raw)
  To: aisaila
  Cc: Tamas K Lengyel, Wei Liu, Razvan Cojocaru, George Dunlap,
	Andrew Cooper, Paul Durrant, xen-devel, Roger Pau Monne

>>> On 08.05.19 at 17:42, <aisaila@bitdefender.com> wrote:
>>> @@ -1224,7 +1293,35 @@ int hvmemul_insn_fetch(
>>>           container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
>>>       /* Careful, as offset can wrap or truncate WRT insn_buf_eip. */
>>>       uint8_t insn_off = offset - hvmemul_ctxt->insn_buf_eip;
>>> +    paddr_t gpa;
>>> +    uint32_t pfec = PFEC_page_present | PFEC_insn_fetch;
>>> +    unsigned long addr, reps = 1;
>>> +    int rc;
>>> +    struct hvm_emulate_ctxt old;
>>> +
>>> +    rc = hvmemul_virtual_to_linear(seg, offset, bytes, &reps,
>>> +                                   hvm_access_insn_fetch, hvmemul_ctxt, &addr);
>> 
>> The double translation is as problematic here, but what's worse:
>> 
>>> +    if ( rc == X86EMUL_EXCEPTION )
>>> +    {
>>> +        x86_emul_reset_event(ctxt);
>>> +        rc = X86EMUL_OKAY;
>>> +    }
>> 
>> You zap an error here, but ...
> 
> In this case hvmemul_virtual_to_linear() can call
> x86_emul_hw_exception() and then signals the caller to inject the 
> exception. I don;t want to inject a non-user segment here and so I leave 
> the ctxt as it was before.
> 
>> 
>>> +    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 )
>>> +        pfec |= PFEC_user_mode;
>>> +
>>> +    old = *hvmemul_ctxt;
>>> +    rc = hvmemul_linear_to_phys(addr, &gpa, bytes, &reps,
>>> +                                pfec, hvmemul_ctxt);
>> 
>> ... you happily use "addr" here anyway.
> 
> The address here is ok to be used or maybe a if ( rc != X86EMUL_OKAY ) 
> check can be put after getting the address.

addr is definitely not okay to be used if it wasn't initialized.

>>> +    if ( rc == X86EMUL_EXCEPTION )
>>> +    {
>>> +        *hvmemul_ctxt = old;
>>> +        rc = X86EMUL_OKAY;
>>> +    }
>>>   
>>> +    if ( gpa && hvmemul_send_vm_event(gpa, addr, gaddr_to_gfn(gpa),
>>> +                                      pfec, hvmemul_ctxt) )
>>> +        return X86EMUL_ACCESS_EXCEPTION;
>> 
>> Is there anything rendering gpa being zero an invalid / impossible
>> situation?
> 
> In tests I came across gpa == 0 so that is why the check was there I 
> will have to test is this can be solved with the X86EMUL_OKAY check from 
> above.

Even if you came across gpa 0 while testing, the solution to avoid
it (in case your possible solution turns out to not work) cannot be
to make it special in any way.

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [Xen-devel] [PATCH RFC v3 2/2] x86/emulate: Send vm_event from emulate
@ 2019-05-09  9:06         ` Jan Beulich
  0 siblings, 0 replies; 12+ messages in thread
From: Jan Beulich @ 2019-05-09  9:06 UTC (permalink / raw)
  To: aisaila
  Cc: Tamas K Lengyel, Wei Liu, Razvan Cojocaru, George Dunlap,
	Andrew Cooper, Paul Durrant, xen-devel, Roger Pau Monne

>>> On 08.05.19 at 17:42, <aisaila@bitdefender.com> wrote:
>>> @@ -1224,7 +1293,35 @@ int hvmemul_insn_fetch(
>>>           container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
>>>       /* Careful, as offset can wrap or truncate WRT insn_buf_eip. */
>>>       uint8_t insn_off = offset - hvmemul_ctxt->insn_buf_eip;
>>> +    paddr_t gpa;
>>> +    uint32_t pfec = PFEC_page_present | PFEC_insn_fetch;
>>> +    unsigned long addr, reps = 1;
>>> +    int rc;
>>> +    struct hvm_emulate_ctxt old;
>>> +
>>> +    rc = hvmemul_virtual_to_linear(seg, offset, bytes, &reps,
>>> +                                   hvm_access_insn_fetch, hvmemul_ctxt, &addr);
>> 
>> The double translation is as problematic here, but what's worse:
>> 
>>> +    if ( rc == X86EMUL_EXCEPTION )
>>> +    {
>>> +        x86_emul_reset_event(ctxt);
>>> +        rc = X86EMUL_OKAY;
>>> +    }
>> 
>> You zap an error here, but ...
> 
> In this case hvmemul_virtual_to_linear() can call
> x86_emul_hw_exception() and then signals the caller to inject the 
> exception. I don;t want to inject a non-user segment here and so I leave 
> the ctxt as it was before.
> 
>> 
>>> +    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 )
>>> +        pfec |= PFEC_user_mode;
>>> +
>>> +    old = *hvmemul_ctxt;
>>> +    rc = hvmemul_linear_to_phys(addr, &gpa, bytes, &reps,
>>> +                                pfec, hvmemul_ctxt);
>> 
>> ... you happily use "addr" here anyway.
> 
> The address here is ok to be used or maybe a if ( rc != X86EMUL_OKAY ) 
> check can be put after getting the address.

addr is definitely not okay to be used if it wasn't initialized.

>>> +    if ( rc == X86EMUL_EXCEPTION )
>>> +    {
>>> +        *hvmemul_ctxt = old;
>>> +        rc = X86EMUL_OKAY;
>>> +    }
>>>   
>>> +    if ( gpa && hvmemul_send_vm_event(gpa, addr, gaddr_to_gfn(gpa),
>>> +                                      pfec, hvmemul_ctxt) )
>>> +        return X86EMUL_ACCESS_EXCEPTION;
>> 
>> Is there anything rendering gpa being zero an invalid / impossible
>> situation?
> 
> In tests I came across gpa == 0 so that is why the check was there I 
> will have to test is this can be solved with the X86EMUL_OKAY check from 
> above.

Even if you came across gpa 0 while testing, the solution to avoid
it (in case your possible solution turns out to not work) cannot be
to make it special in any way.

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2019-05-09  9:06 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-02-06 12:53 [PATCH RFC v3 1/2] x86/emulate: Move hvmemul_linear_to_phys Alexandru Stefan ISAILA
2019-02-06 12:53 ` [PATCH RFC v3 2/2] x86/emulate: Send vm_event from emulate Alexandru Stefan ISAILA
2019-04-08 15:32   ` Jan Beulich
2019-04-08 15:32     ` [Xen-devel] " Jan Beulich
2019-05-07 11:45     ` Alexandru Stefan ISAILA
2019-05-07 11:45       ` [Xen-devel] " Alexandru Stefan ISAILA
2019-05-07 12:03       ` Jan Beulich
2019-05-07 12:03         ` [Xen-devel] " Jan Beulich
2019-05-08 15:42     ` Alexandru Stefan ISAILA
2019-05-08 15:42       ` [Xen-devel] " Alexandru Stefan ISAILA
2019-05-09  9:06       ` Jan Beulich
2019-05-09  9:06         ` [Xen-devel] " Jan Beulich

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.