All of lore.kernel.org
 help / color / mirror / Atom feed
From: Alexandru Stefan ISAILA <aisaila@bitdefender.com>
To: "xen-devel@lists.xenproject.org" <xen-devel@lists.xenproject.org>
Cc: "tamas@tklengyel.com" <tamas@tklengyel.com>,
	"wei.liu2@citrix.com" <wei.liu2@citrix.com>,
	"rcojocaru@bitdefender.com" <rcojocaru@bitdefender.com>,
	"george.dunlap@eu.citrix.com" <george.dunlap@eu.citrix.com>,
	"andrew.cooper3@citrix.com" <andrew.cooper3@citrix.com>,
	"paul.durrant@citrix.com" <paul.durrant@citrix.com>,
	"jbeulich@suse.com" <jbeulich@suse.com>,
	Alexandru Stefan ISAILA <aisaila@bitdefender.com>,
	"roger.pau@citrix.com" <roger.pau@citrix.com>
Subject: [PATCH v4 2/2] x86/emulate: Send vm_event from emulate
Date: Mon, 20 May 2019 12:55:17 +0000	[thread overview]
Message-ID: <20190520125454.14805-2-aisaila@bitdefender.com> (raw)
In-Reply-To: <20190520125454.14805-1-aisaila@bitdefender.com>

This patch aims to have mem access vm events sent from the emulator.
This is useful in the case of emulated instructions that cause
page-walks on access protected pages.

We use hvmemul_map_linear_addr() ro intercept r/w access and
hvmemul_insn_fetch() to intercept exec access.

First we try to send a vm event and if the event is sent then emulation
returns X86EMUL_ACCESS_EXCEPTION. If the event is not sent then the
emulation goes on as expected.

Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>

---
Changes since V3:
	- Calculate gpa in hvmemul_send_vm_event()
	- Move hvmemul_linear_to_phys() call inside
	hvmemul_send_vm_event()
	- Check only if hvmemul_virtual_to_linear() returns X86EMUL_OKAY
	- Add commnet for X86EMUL_ACCESS_EXCEPTION.
---
 xen/arch/x86/hvm/emulate.c             | 89 +++++++++++++++++++++++++-
 xen/arch/x86/hvm/vm_event.c            |  2 +-
 xen/arch/x86/mm/mem_access.c           |  3 +-
 xen/arch/x86/x86_emulate/x86_emulate.h |  2 +
 xen/include/asm-x86/hvm/emulate.h      |  4 +-
 5 files changed, 95 insertions(+), 5 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 254ff6515d..75403ebc9b 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -15,6 +15,7 @@
 #include <xen/paging.h>
 #include <xen/trace.h>
 #include <xen/vm_event.h>
+#include <xen/monitor.h>
 #include <asm/event.h>
 #include <asm/i387.h>
 #include <asm/xstate.h>
@@ -26,6 +27,7 @@
 #include <asm/hvm/support.h>
 #include <asm/hvm/svm/svm.h>
 #include <asm/vm_event.h>
+#include <asm/altp2m.h>
 
 static void hvmtrace_io_assist(const ioreq_t *p)
 {
@@ -619,6 +621,68 @@ static int hvmemul_linear_to_phys(
     return X86EMUL_OKAY;
 }
 
+static bool hvmemul_send_vm_event(unsigned long gla,
+                                  uint32_t pfec, unsigned int bytes,
+                                  struct hvm_emulate_ctxt ctxt)
+{
+    xenmem_access_t access;
+    vm_event_request_t req = {};
+    gfn_t gfn;
+    paddr_t gpa;
+    unsigned long reps = 1;
+    int rc;
+
+    if ( !ctxt.send_event || !pfec )
+        return false;
+
+    rc = hvmemul_linear_to_phys(gla, &gpa, bytes, &reps, pfec, &ctxt);
+
+    if ( rc != X86EMUL_OKAY )
+        return false;
+
+    gfn = gaddr_to_gfn(gpa);
+
+    if ( p2m_get_mem_access(current->domain, gfn, &access,
+                            altp2m_vcpu_idx(current)) != 0 )
+        return false;
+
+    switch ( access ) {
+    case XENMEM_access_x:
+    case XENMEM_access_rx:
+        if ( pfec & PFEC_write_access )
+            req.u.mem_access.flags = MEM_ACCESS_R | MEM_ACCESS_W;
+        break;
+
+    case XENMEM_access_w:
+    case XENMEM_access_rw:
+        if ( pfec & PFEC_insn_fetch )
+            req.u.mem_access.flags = MEM_ACCESS_X;
+        break;
+
+    case XENMEM_access_r:
+    case XENMEM_access_n:
+        if ( pfec & PFEC_write_access )
+            req.u.mem_access.flags |= MEM_ACCESS_R | MEM_ACCESS_W;
+        if ( pfec & PFEC_insn_fetch )
+            req.u.mem_access.flags |= MEM_ACCESS_X;
+        break;
+
+    default:
+        return false;
+    }
+
+    if ( !req.u.mem_access.flags )
+        return false; /* no violation */
+
+    req.reason = VM_EVENT_REASON_MEM_ACCESS;
+    req.u.mem_access.gfn = gfn_x(gfn);
+    req.u.mem_access.flags |= MEM_ACCESS_FAULT_WITH_GLA | MEM_ACCESS_GLA_VALID;
+    req.u.mem_access.gla = gla;
+    req.u.mem_access.offset = gpa & ((1 << PAGE_SHIFT) - 1);
+
+    return monitor_traps(current, true, &req) >= 0;
+}
+
 /*
  * Map the frame(s) covering an individual linear access, for writeable
  * access.  May return NULL for MMIO, or ERR_PTR(~X86EMUL_*) for other errors
@@ -636,6 +700,7 @@ static void *hvmemul_map_linear_addr(
     unsigned int nr_frames = ((linear + bytes - !!bytes) >> PAGE_SHIFT) -
         (linear >> PAGE_SHIFT) + 1;
     unsigned int i;
+    gfn_t gfn;
 
     /*
      * mfn points to the next free slot.  All used slots have a page reference
@@ -674,7 +739,7 @@ static void *hvmemul_map_linear_addr(
         ASSERT(mfn_x(*mfn) == 0);
 
         res = hvm_translate_get_page(curr, addr, true, pfec,
-                                     &pfinfo, &page, NULL, &p2mt);
+                                     &pfinfo, &page, &gfn, &p2mt);
 
         switch ( res )
         {
@@ -704,6 +769,11 @@ static void *hvmemul_map_linear_addr(
 
         if ( pfec & PFEC_write_access )
         {
+            if ( hvmemul_send_vm_event(addr, pfec, bytes, *hvmemul_ctxt) )
+            {
+                err = ERR_PTR(~X86EMUL_ACCESS_EXCEPTION);
+                goto out;
+            }
             if ( p2m_is_discard_write(p2mt) )
             {
                 err = ERR_PTR(~X86EMUL_OKAY);
@@ -1248,7 +1318,21 @@ int hvmemul_insn_fetch(
         container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
     /* Careful, as offset can wrap or truncate WRT insn_buf_eip. */
     uint8_t insn_off = offset - hvmemul_ctxt->insn_buf_eip;
+    uint32_t pfec = PFEC_page_present | PFEC_insn_fetch;
+    unsigned long addr, reps = 1;
+    int rc = 0;
+
+    rc = hvmemul_virtual_to_linear(
+        seg, offset, bytes, &reps, hvm_access_insn_fetch, hvmemul_ctxt, &addr);
+
+    if ( rc != X86EMUL_OKAY || !bytes )
+        return rc;
+
+    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 )
+        pfec |= PFEC_user_mode;
 
+    if ( hvmemul_send_vm_event(addr, pfec, bytes, *hvmemul_ctxt) )
+        return X86EMUL_ACCESS_EXCEPTION;
     /*
      * Fall back if requested bytes are not in the prefetch cache.
      * But always perform the (fake) read when bytes == 0.
@@ -2508,12 +2592,13 @@ int hvm_emulate_one_mmio(unsigned long mfn, unsigned long gla)
 }
 
 void hvm_emulate_one_vm_event(enum emul_kind kind, unsigned int trapnr,
-    unsigned int errcode)
+    unsigned int errcode, bool send_event)
 {
     struct hvm_emulate_ctxt ctx = {{ 0 }};
     int rc;
 
     hvm_emulate_init_once(&ctx, NULL, guest_cpu_user_regs());
+    ctx.send_event = send_event;
 
     switch ( kind )
     {
diff --git a/xen/arch/x86/hvm/vm_event.c b/xen/arch/x86/hvm/vm_event.c
index 121de23071..6d203e8db5 100644
--- a/xen/arch/x86/hvm/vm_event.c
+++ b/xen/arch/x86/hvm/vm_event.c
@@ -87,7 +87,7 @@ void hvm_vm_event_do_resume(struct vcpu *v)
             kind = EMUL_KIND_SET_CONTEXT_INSN;
 
         hvm_emulate_one_vm_event(kind, TRAP_invalid_op,
-                                 X86_EVENT_NO_EC);
+                                 X86_EVENT_NO_EC, false);
 
         v->arch.vm_event->emulate_flags = 0;
     }
diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c
index 0144f92b98..c9972bab8c 100644
--- a/xen/arch/x86/mm/mem_access.c
+++ b/xen/arch/x86/mm/mem_access.c
@@ -214,7 +214,8 @@ bool p2m_mem_access_check(paddr_t gpa, unsigned long gla,
          d->arch.monitor.inguest_pagefault_disabled &&
          npfec.kind != npfec_kind_with_gla ) /* don't send a mem_event */
     {
-        hvm_emulate_one_vm_event(EMUL_KIND_NORMAL, TRAP_invalid_op, X86_EVENT_NO_EC);
+        hvm_emulate_one_vm_event(EMUL_KIND_NORMAL, TRAP_invalid_op,
+                                 X86_EVENT_NO_EC, true);
 
         return true;
     }
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.h b/xen/arch/x86/x86_emulate/x86_emulate.h
index 08645762cc..8a20e733fa 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.h
+++ b/xen/arch/x86/x86_emulate/x86_emulate.h
@@ -162,6 +162,8 @@ struct x86_emul_fpu_aux {
 #define X86EMUL_UNRECOGNIZED   X86EMUL_UNIMPLEMENTED
  /* (cmpxchg accessor): CMPXCHG failed. */
 #define X86EMUL_CMPXCHG_FAILED 7
+/* Emulator tried to access a protected page. */
+#define X86EMUL_ACCESS_EXCEPTION 8
 
 /* FPU sub-types which may be requested via ->get_fpu(). */
 enum x86_emulate_fpu_type {
diff --git a/xen/include/asm-x86/hvm/emulate.h b/xen/include/asm-x86/hvm/emulate.h
index b39a1a0331..ed22ed0baf 100644
--- a/xen/include/asm-x86/hvm/emulate.h
+++ b/xen/include/asm-x86/hvm/emulate.h
@@ -47,6 +47,7 @@ struct hvm_emulate_ctxt {
     uint32_t intr_shadow;
 
     bool_t set_context;
+    bool send_event;
 };
 
 enum emul_kind {
@@ -63,7 +64,8 @@ int hvm_emulate_one(
     struct hvm_emulate_ctxt *hvmemul_ctxt);
 void hvm_emulate_one_vm_event(enum emul_kind kind,
     unsigned int trapnr,
-    unsigned int errcode);
+    unsigned int errcode,
+    bool send_event);
 /* Must be called once to set up hvmemul state. */
 void hvm_emulate_init_once(
     struct hvm_emulate_ctxt *hvmemul_ctxt,
-- 
2.17.1

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

WARNING: multiple messages have this Message-ID (diff)
From: Alexandru Stefan ISAILA <aisaila@bitdefender.com>
To: "xen-devel@lists.xenproject.org" <xen-devel@lists.xenproject.org>
Cc: "tamas@tklengyel.com" <tamas@tklengyel.com>,
	"wei.liu2@citrix.com" <wei.liu2@citrix.com>,
	"rcojocaru@bitdefender.com" <rcojocaru@bitdefender.com>,
	"george.dunlap@eu.citrix.com" <george.dunlap@eu.citrix.com>,
	"andrew.cooper3@citrix.com" <andrew.cooper3@citrix.com>,
	"paul.durrant@citrix.com" <paul.durrant@citrix.com>,
	"jbeulich@suse.com" <jbeulich@suse.com>,
	Alexandru Stefan ISAILA <aisaila@bitdefender.com>,
	"roger.pau@citrix.com" <roger.pau@citrix.com>
Subject: [Xen-devel] [PATCH v4 2/2] x86/emulate: Send vm_event from emulate
Date: Mon, 20 May 2019 12:55:17 +0000	[thread overview]
Message-ID: <20190520125454.14805-2-aisaila@bitdefender.com> (raw)
Message-ID: <20190520125517.9ZTUVAv9hBuJM5B_gVU-2FtmKoyP-Xbh8h2XQirVwKU@z> (raw)
In-Reply-To: <20190520125454.14805-1-aisaila@bitdefender.com>

This patch aims to have mem access vm events sent from the emulator.
This is useful in the case of emulated instructions that cause
page-walks on access protected pages.

We use hvmemul_map_linear_addr() ro intercept r/w access and
hvmemul_insn_fetch() to intercept exec access.

First we try to send a vm event and if the event is sent then emulation
returns X86EMUL_ACCESS_EXCEPTION. If the event is not sent then the
emulation goes on as expected.

Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>

---
Changes since V3:
	- Calculate gpa in hvmemul_send_vm_event()
	- Move hvmemul_linear_to_phys() call inside
	hvmemul_send_vm_event()
	- Check only if hvmemul_virtual_to_linear() returns X86EMUL_OKAY
	- Add commnet for X86EMUL_ACCESS_EXCEPTION.
---
 xen/arch/x86/hvm/emulate.c             | 89 +++++++++++++++++++++++++-
 xen/arch/x86/hvm/vm_event.c            |  2 +-
 xen/arch/x86/mm/mem_access.c           |  3 +-
 xen/arch/x86/x86_emulate/x86_emulate.h |  2 +
 xen/include/asm-x86/hvm/emulate.h      |  4 +-
 5 files changed, 95 insertions(+), 5 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 254ff6515d..75403ebc9b 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -15,6 +15,7 @@
 #include <xen/paging.h>
 #include <xen/trace.h>
 #include <xen/vm_event.h>
+#include <xen/monitor.h>
 #include <asm/event.h>
 #include <asm/i387.h>
 #include <asm/xstate.h>
@@ -26,6 +27,7 @@
 #include <asm/hvm/support.h>
 #include <asm/hvm/svm/svm.h>
 #include <asm/vm_event.h>
+#include <asm/altp2m.h>
 
 static void hvmtrace_io_assist(const ioreq_t *p)
 {
@@ -619,6 +621,68 @@ static int hvmemul_linear_to_phys(
     return X86EMUL_OKAY;
 }
 
+static bool hvmemul_send_vm_event(unsigned long gla,
+                                  uint32_t pfec, unsigned int bytes,
+                                  struct hvm_emulate_ctxt ctxt)
+{
+    xenmem_access_t access;
+    vm_event_request_t req = {};
+    gfn_t gfn;
+    paddr_t gpa;
+    unsigned long reps = 1;
+    int rc;
+
+    if ( !ctxt.send_event || !pfec )
+        return false;
+
+    rc = hvmemul_linear_to_phys(gla, &gpa, bytes, &reps, pfec, &ctxt);
+
+    if ( rc != X86EMUL_OKAY )
+        return false;
+
+    gfn = gaddr_to_gfn(gpa);
+
+    if ( p2m_get_mem_access(current->domain, gfn, &access,
+                            altp2m_vcpu_idx(current)) != 0 )
+        return false;
+
+    switch ( access ) {
+    case XENMEM_access_x:
+    case XENMEM_access_rx:
+        if ( pfec & PFEC_write_access )
+            req.u.mem_access.flags = MEM_ACCESS_R | MEM_ACCESS_W;
+        break;
+
+    case XENMEM_access_w:
+    case XENMEM_access_rw:
+        if ( pfec & PFEC_insn_fetch )
+            req.u.mem_access.flags = MEM_ACCESS_X;
+        break;
+
+    case XENMEM_access_r:
+    case XENMEM_access_n:
+        if ( pfec & PFEC_write_access )
+            req.u.mem_access.flags |= MEM_ACCESS_R | MEM_ACCESS_W;
+        if ( pfec & PFEC_insn_fetch )
+            req.u.mem_access.flags |= MEM_ACCESS_X;
+        break;
+
+    default:
+        return false;
+    }
+
+    if ( !req.u.mem_access.flags )
+        return false; /* no violation */
+
+    req.reason = VM_EVENT_REASON_MEM_ACCESS;
+    req.u.mem_access.gfn = gfn_x(gfn);
+    req.u.mem_access.flags |= MEM_ACCESS_FAULT_WITH_GLA | MEM_ACCESS_GLA_VALID;
+    req.u.mem_access.gla = gla;
+    req.u.mem_access.offset = gpa & ((1 << PAGE_SHIFT) - 1);
+
+    return monitor_traps(current, true, &req) >= 0;
+}
+
 /*
  * Map the frame(s) covering an individual linear access, for writeable
  * access.  May return NULL for MMIO, or ERR_PTR(~X86EMUL_*) for other errors
@@ -636,6 +700,7 @@ static void *hvmemul_map_linear_addr(
     unsigned int nr_frames = ((linear + bytes - !!bytes) >> PAGE_SHIFT) -
         (linear >> PAGE_SHIFT) + 1;
     unsigned int i;
+    gfn_t gfn;
 
     /*
      * mfn points to the next free slot.  All used slots have a page reference
@@ -674,7 +739,7 @@ static void *hvmemul_map_linear_addr(
         ASSERT(mfn_x(*mfn) == 0);
 
         res = hvm_translate_get_page(curr, addr, true, pfec,
-                                     &pfinfo, &page, NULL, &p2mt);
+                                     &pfinfo, &page, &gfn, &p2mt);
 
         switch ( res )
         {
@@ -704,6 +769,11 @@ static void *hvmemul_map_linear_addr(
 
         if ( pfec & PFEC_write_access )
         {
+            if ( hvmemul_send_vm_event(addr, pfec, bytes, *hvmemul_ctxt) )
+            {
+                err = ERR_PTR(~X86EMUL_ACCESS_EXCEPTION);
+                goto out;
+            }
             if ( p2m_is_discard_write(p2mt) )
             {
                 err = ERR_PTR(~X86EMUL_OKAY);
@@ -1248,7 +1318,21 @@ int hvmemul_insn_fetch(
         container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
     /* Careful, as offset can wrap or truncate WRT insn_buf_eip. */
     uint8_t insn_off = offset - hvmemul_ctxt->insn_buf_eip;
+    uint32_t pfec = PFEC_page_present | PFEC_insn_fetch;
+    unsigned long addr, reps = 1;
+    int rc = 0;
+
+    rc = hvmemul_virtual_to_linear(
+        seg, offset, bytes, &reps, hvm_access_insn_fetch, hvmemul_ctxt, &addr);
+
+    if ( rc != X86EMUL_OKAY || !bytes )
+        return rc;
+
+    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 )
+        pfec |= PFEC_user_mode;
 
+    if ( hvmemul_send_vm_event(addr, pfec, bytes, *hvmemul_ctxt) )
+        return X86EMUL_ACCESS_EXCEPTION;
     /*
      * Fall back if requested bytes are not in the prefetch cache.
      * But always perform the (fake) read when bytes == 0.
@@ -2508,12 +2592,13 @@ int hvm_emulate_one_mmio(unsigned long mfn, unsigned long gla)
 }
 
 void hvm_emulate_one_vm_event(enum emul_kind kind, unsigned int trapnr,
-    unsigned int errcode)
+    unsigned int errcode, bool send_event)
 {
     struct hvm_emulate_ctxt ctx = {{ 0 }};
     int rc;
 
     hvm_emulate_init_once(&ctx, NULL, guest_cpu_user_regs());
+    ctx.send_event = send_event;
 
     switch ( kind )
     {
diff --git a/xen/arch/x86/hvm/vm_event.c b/xen/arch/x86/hvm/vm_event.c
index 121de23071..6d203e8db5 100644
--- a/xen/arch/x86/hvm/vm_event.c
+++ b/xen/arch/x86/hvm/vm_event.c
@@ -87,7 +87,7 @@ void hvm_vm_event_do_resume(struct vcpu *v)
             kind = EMUL_KIND_SET_CONTEXT_INSN;
 
         hvm_emulate_one_vm_event(kind, TRAP_invalid_op,
-                                 X86_EVENT_NO_EC);
+                                 X86_EVENT_NO_EC, false);
 
         v->arch.vm_event->emulate_flags = 0;
     }
diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c
index 0144f92b98..c9972bab8c 100644
--- a/xen/arch/x86/mm/mem_access.c
+++ b/xen/arch/x86/mm/mem_access.c
@@ -214,7 +214,8 @@ bool p2m_mem_access_check(paddr_t gpa, unsigned long gla,
          d->arch.monitor.inguest_pagefault_disabled &&
          npfec.kind != npfec_kind_with_gla ) /* don't send a mem_event */
     {
-        hvm_emulate_one_vm_event(EMUL_KIND_NORMAL, TRAP_invalid_op, X86_EVENT_NO_EC);
+        hvm_emulate_one_vm_event(EMUL_KIND_NORMAL, TRAP_invalid_op,
+                                 X86_EVENT_NO_EC, true);
 
         return true;
     }
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.h b/xen/arch/x86/x86_emulate/x86_emulate.h
index 08645762cc..8a20e733fa 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.h
+++ b/xen/arch/x86/x86_emulate/x86_emulate.h
@@ -162,6 +162,8 @@ struct x86_emul_fpu_aux {
 #define X86EMUL_UNRECOGNIZED   X86EMUL_UNIMPLEMENTED
  /* (cmpxchg accessor): CMPXCHG failed. */
 #define X86EMUL_CMPXCHG_FAILED 7
+/* Emulator tried to access a protected page. */
+#define X86EMUL_ACCESS_EXCEPTION 8
 
 /* FPU sub-types which may be requested via ->get_fpu(). */
 enum x86_emulate_fpu_type {
diff --git a/xen/include/asm-x86/hvm/emulate.h b/xen/include/asm-x86/hvm/emulate.h
index b39a1a0331..ed22ed0baf 100644
--- a/xen/include/asm-x86/hvm/emulate.h
+++ b/xen/include/asm-x86/hvm/emulate.h
@@ -47,6 +47,7 @@ struct hvm_emulate_ctxt {
     uint32_t intr_shadow;
 
     bool_t set_context;
+    bool send_event;
 };
 
 enum emul_kind {
@@ -63,7 +64,8 @@ int hvm_emulate_one(
     struct hvm_emulate_ctxt *hvmemul_ctxt);
 void hvm_emulate_one_vm_event(enum emul_kind kind,
     unsigned int trapnr,
-    unsigned int errcode);
+    unsigned int errcode,
+    bool send_event);
 /* Must be called once to set up hvmemul state. */
 void hvm_emulate_init_once(
     struct hvm_emulate_ctxt *hvmemul_ctxt,
-- 
2.17.1

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  reply	other threads:[~2019-05-20 12:55 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-05-20 12:55 [PATCH v4 1/2] x86/emulate: Move hvmemul_linear_to_phys Alexandru Stefan ISAILA
2019-05-20 12:55 ` [Xen-devel] " Alexandru Stefan ISAILA
2019-05-20 12:55 ` Alexandru Stefan ISAILA [this message]
2019-05-20 12:55   ` [Xen-devel] [PATCH v4 2/2] x86/emulate: Send vm_event from emulate Alexandru Stefan ISAILA
2019-05-22  9:56   ` Jan Beulich
2019-05-22  9:56     ` [Xen-devel] " Jan Beulich
2019-05-22 12:59     ` Alexandru Stefan ISAILA
2019-05-22 12:59       ` [Xen-devel] " Alexandru Stefan ISAILA
2019-05-22 13:34       ` Jan Beulich
2019-05-22 13:34         ` [Xen-devel] " Jan Beulich
2019-05-22 13:50         ` Alexandru Stefan ISAILA
2019-05-22 13:50           ` [Xen-devel] " Alexandru Stefan ISAILA
2019-05-22 13:57           ` Jan Beulich
2019-05-22 13:57             ` [Xen-devel] " Jan Beulich
2019-05-30  8:59     ` Alexandru Stefan ISAILA
2019-05-30  8:59       ` [Xen-devel] " Alexandru Stefan ISAILA
2019-05-31  9:16       ` Jan Beulich
2019-05-31  9:16         ` [Xen-devel] " Jan Beulich
2019-05-22 13:13 ` [PATCH v4 1/2] x86/emulate: Move hvmemul_linear_to_phys Paul Durrant
2019-05-22 13:13   ` [Xen-devel] " Paul Durrant
2019-05-22 13:55   ` George Dunlap
2019-05-22 13:55     ` [Xen-devel] " George Dunlap

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190520125454.14805-2-aisaila@bitdefender.com \
    --to=aisaila@bitdefender.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=george.dunlap@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=paul.durrant@citrix.com \
    --cc=rcojocaru@bitdefender.com \
    --cc=roger.pau@citrix.com \
    --cc=tamas@tklengyel.com \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.