All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/3] x86/mem_sharing: make fork_reset more configurable
@ 2022-04-27 15:34 Tamas K Lengyel
  2022-04-27 15:34 ` [PATCH 2/3] tools/libxc: change xc_memshr_fork_reset API to match hypervisor Tamas K Lengyel
  2022-04-27 15:34 ` [PATCH 3/3] x86/monitor: Add new monitor event to catch all vmexits Tamas K Lengyel
  0 siblings, 2 replies; 17+ messages in thread
From: Tamas K Lengyel @ 2022-04-27 15:34 UTC (permalink / raw)
  To: xen-devel
  Cc: Tamas K Lengyel, Jan Beulich, Andrew Cooper, Roger Pau Monné,
	Wei Liu, George Dunlap, Julien Grall, Stefano Stabellini,
	Tamas K Lengyel, Alexandru Isaila, Petre Pircalabu

Alow specify distinct parts of the fork VM to be reset. This is useful when a
fuzzing operation involves mapping in only a handful of pages that are known
ahead of time. Throwing these pages away just to be re-copied immediately is
expensive, thus allowing to specify partial resets can speed things up.

Also allow resetting to be initiated from vm_event responses as an
optiomization.

Signed-off-by: Tamas K Lengyel <tamas.lengyel@intel.com>
Reviewed-by: Roger Pau Monné <roger.pau@citrix.com
---
v5: split toolstack part to separate patch and use ASSERT_UNREACHABLE
v4: No change
v3: Rebase on simpler approach after dropping empty_p2m feature
v2: address review comments and add more sanity checking
---
 xen/arch/x86/include/asm/mem_sharing.h |  9 +++++++++
 xen/arch/x86/mm/mem_sharing.c          | 24 +++++++++++++++++++-----
 xen/common/vm_event.c                  | 18 ++++++++++++++++++
 xen/include/public/memory.h            |  4 +++-
 xen/include/public/vm_event.h          |  8 ++++++++
 5 files changed, 57 insertions(+), 6 deletions(-)

diff --git a/xen/arch/x86/include/asm/mem_sharing.h b/xen/arch/x86/include/asm/mem_sharing.h
index cf7a12f4d2..2c00069bc9 100644
--- a/xen/arch/x86/include/asm/mem_sharing.h
+++ b/xen/arch/x86/include/asm/mem_sharing.h
@@ -85,6 +85,9 @@ static inline bool mem_sharing_is_fork(const struct domain *d)
 int mem_sharing_fork_page(struct domain *d, gfn_t gfn,
                           bool unsharing);
 
+int mem_sharing_fork_reset(struct domain *d, bool reset_state,
+                           bool reset_memory);
+
 /*
  * If called by a foreign domain, possible errors are
  *   -EBUSY -> ring full
@@ -148,6 +151,12 @@ static inline int mem_sharing_fork_page(struct domain *d, gfn_t gfn, bool lock)
     return -EOPNOTSUPP;
 }
 
+static inline int mem_sharing_fork_reset(struct domain *d, bool reset_state,
+                                         bool reset_memory)
+{
+    return -EOPNOTSUPP;
+}
+
 #endif
 
 #endif /* __MEM_SHARING_H__ */
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index a5c16b4429..1e1fb27c1a 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -1881,15 +1881,21 @@ static int fork(struct domain *cd, struct domain *d)
  * footprints the hypercall continuation should be implemented (or if this
  * feature needs to be become "stable").
  */
-static int mem_sharing_fork_reset(struct domain *d)
+int mem_sharing_fork_reset(struct domain *d, bool reset_state,
+                           bool reset_memory)
 {
-    int rc;
+    int rc = 0;
     struct domain *pd = d->parent;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
     struct page_info *page, *tmp;
 
+    ASSERT(reset_state || reset_memory);
+
     domain_pause(d);
 
+    if ( !reset_memory )
+        goto state;
+
     /* need recursive lock because we will free pages */
     spin_lock_recursive(&d->page_alloc_lock);
     page_list_for_each_safe(page, tmp, &d->page_list)
@@ -1922,7 +1928,9 @@ static int mem_sharing_fork_reset(struct domain *d)
     }
     spin_unlock_recursive(&d->page_alloc_lock);
 
-    rc = copy_settings(d, pd);
+ state:
+    if ( reset_state )
+        rc = copy_settings(d, pd);
 
     domain_unpause(d);
 
@@ -2229,15 +2237,21 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg)
 
     case XENMEM_sharing_op_fork_reset:
     {
+        bool reset_state = mso.u.fork.flags & XENMEM_FORK_RESET_STATE;
+        bool reset_memory = mso.u.fork.flags & XENMEM_FORK_RESET_MEMORY;
+
         rc = -EINVAL;
-        if ( mso.u.fork.pad || mso.u.fork.flags )
+        if ( mso.u.fork.pad || (!reset_state && !reset_memory) )
+            goto out;
+        if ( mso.u.fork.flags &
+             ~(XENMEM_FORK_RESET_STATE | XENMEM_FORK_RESET_MEMORY) )
             goto out;
 
         rc = -ENOSYS;
         if ( !d->parent )
             goto out;
 
-        rc = mem_sharing_fork_reset(d);
+        rc = mem_sharing_fork_reset(d, reset_state, reset_memory);
         break;
     }
 
diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
index 84cf52636b..cc7d8bf565 100644
--- a/xen/common/vm_event.c
+++ b/xen/common/vm_event.c
@@ -28,6 +28,11 @@
 #include <asm/p2m.h>
 #include <asm/monitor.h>
 #include <asm/vm_event.h>
+
+#ifdef CONFIG_MEM_SHARING
+#include <asm/mem_sharing.h>
+#endif
+
 #include <xsm/xsm.h>
 #include <public/hvm/params.h>
 
@@ -394,6 +399,19 @@ static int vm_event_resume(struct domain *d, struct vm_event_domain *ved)
             if ( rsp.reason == VM_EVENT_REASON_MEM_PAGING )
                 p2m_mem_paging_resume(d, &rsp);
 #endif
+#ifdef CONFIG_MEM_SHARING
+            if ( mem_sharing_is_fork(d) )
+            {
+                bool reset_state = rsp.flags & VM_EVENT_FLAG_RESET_FORK_STATE;
+                bool reset_mem = rsp.flags & VM_EVENT_FLAG_RESET_FORK_MEMORY;
+
+                if ( (reset_state || reset_mem) &&
+                    mem_sharing_fork_reset(d, reset_state, reset_mem) )
+                {
+                    ASSERT_UNREACHABLE();
+                }
+            }
+#endif
 
             /*
              * Check emulation flags in the arch-specific handler only, as it
diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
index a1a0f0233a..f8d26fb77d 100644
--- a/xen/include/public/memory.h
+++ b/xen/include/public/memory.h
@@ -541,12 +541,14 @@ struct xen_mem_sharing_op {
                 uint32_t gref;     /* IN: gref to debug         */
             } u;
         } debug;
-        struct mem_sharing_op_fork {      /* OP_FORK */
+        struct mem_sharing_op_fork {      /* OP_FORK{,_RESET} */
             domid_t parent_domain;        /* IN: parent's domain id */
 /* Only makes sense for short-lived forks */
 #define XENMEM_FORK_WITH_IOMMU_ALLOWED (1u << 0)
 /* Only makes sense for short-lived forks */
 #define XENMEM_FORK_BLOCK_INTERRUPTS   (1u << 1)
+#define XENMEM_FORK_RESET_STATE        (1u << 2)
+#define XENMEM_FORK_RESET_MEMORY       (1u << 3)
             uint16_t flags;               /* IN: optional settings */
             uint32_t pad;                 /* Must be set to 0 */
         } fork;
diff --git a/xen/include/public/vm_event.h b/xen/include/public/vm_event.h
index bb003d21d0..1673bb8703 100644
--- a/xen/include/public/vm_event.h
+++ b/xen/include/public/vm_event.h
@@ -127,6 +127,14 @@
  * Reset the vmtrace buffer (if vmtrace is enabled)
  */
 #define VM_EVENT_FLAG_RESET_VMTRACE      (1 << 13)
+/*
+ * Reset the VM state (if VM is fork)
+ */
+#define VM_EVENT_FLAG_RESET_FORK_STATE   (1 << 14)
+/*
+ * Remove unshared entries from physmap (if VM is fork)
+ */
+#define VM_EVENT_FLAG_RESET_FORK_MEMORY  (1 << 15)
 
 /*
  * Reasons for the vm event request
-- 
2.34.1



^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH 2/3] tools/libxc: change xc_memshr_fork_reset API to match hypervisor
  2022-04-27 15:34 [PATCH 1/3] x86/mem_sharing: make fork_reset more configurable Tamas K Lengyel
@ 2022-04-27 15:34 ` Tamas K Lengyel
  2022-05-04 13:10   ` Tamas K Lengyel
  2022-05-05  8:17   ` Roger Pau Monné
  2022-04-27 15:34 ` [PATCH 3/3] x86/monitor: Add new monitor event to catch all vmexits Tamas K Lengyel
  1 sibling, 2 replies; 17+ messages in thread
From: Tamas K Lengyel @ 2022-04-27 15:34 UTC (permalink / raw)
  To: xen-devel; +Cc: Tamas K Lengyel, Wei Liu, Anthony PERARD, Juergen Gross

Need to separately specify if the reset is for the memory or for the VM state,
or both.

Signed-off-by: Tamas K Lengyel <tamas.lengyel@intel.com>
---
v5: split from the hypervisor-side patch
---
 tools/include/xenctrl.h     | 3 ++-
 tools/libs/ctrl/xc_memshr.c | 7 ++++++-
 2 files changed, 8 insertions(+), 2 deletions(-)

diff --git a/tools/include/xenctrl.h b/tools/include/xenctrl.h
index 95bd5eca67..1b089a2c02 100644
--- a/tools/include/xenctrl.h
+++ b/tools/include/xenctrl.h
@@ -2290,7 +2290,8 @@ int xc_memshr_fork(xc_interface *xch,
  *
  * With VMs that have a lot of memory this call may block for a long time.
  */
-int xc_memshr_fork_reset(xc_interface *xch, uint32_t forked_domain);
+int xc_memshr_fork_reset(xc_interface *xch, uint32_t forked_domain,
+                         bool reset_state, bool reset_memory);
 
 /* Debug calls: return the number of pages referencing the shared frame backing
  * the input argument. Should be one or greater.
diff --git a/tools/libs/ctrl/xc_memshr.c b/tools/libs/ctrl/xc_memshr.c
index a6cfd7dccf..a0d0b894e2 100644
--- a/tools/libs/ctrl/xc_memshr.c
+++ b/tools/libs/ctrl/xc_memshr.c
@@ -257,12 +257,17 @@ int xc_memshr_fork(xc_interface *xch, uint32_t pdomid, uint32_t domid,
     return xc_memshr_memop(xch, domid, &mso);
 }
 
-int xc_memshr_fork_reset(xc_interface *xch, uint32_t domid)
+int xc_memshr_fork_reset(xc_interface *xch, uint32_t domid, bool reset_state,
+                         bool reset_memory)
 {
     xen_mem_sharing_op_t mso;
 
     memset(&mso, 0, sizeof(mso));
     mso.op = XENMEM_sharing_op_fork_reset;
+    if ( reset_state )
+        mso.u.fork.flags |= XENMEM_FORK_RESET_STATE;
+    if ( reset_memory )
+        mso.u.fork.flags |= XENMEM_FORK_RESET_MEMORY;
 
     return xc_memshr_memop(xch, domid, &mso);
 }
-- 
2.34.1



^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH 3/3] x86/monitor: Add new monitor event to catch all vmexits
  2022-04-27 15:34 [PATCH 1/3] x86/mem_sharing: make fork_reset more configurable Tamas K Lengyel
  2022-04-27 15:34 ` [PATCH 2/3] tools/libxc: change xc_memshr_fork_reset API to match hypervisor Tamas K Lengyel
@ 2022-04-27 15:34 ` Tamas K Lengyel
  2022-04-28 13:55   ` Roger Pau Monné
  2022-05-04 13:12   ` Tamas K Lengyel
  1 sibling, 2 replies; 17+ messages in thread
From: Tamas K Lengyel @ 2022-04-27 15:34 UTC (permalink / raw)
  To: xen-devel
  Cc: Tamas K Lengyel, Wei Liu, Anthony PERARD, Juergen Gross,
	Andrew Cooper, George Dunlap, Jan Beulich, Julien Grall,
	Stefano Stabellini, Tamas K Lengyel, Alexandru Isaila,
	Petre Pircalabu, Roger Pau Monné,
	Jun Nakajima, Kevin Tian

Add monitor event that hooks the vmexit handler allowing for both sync and
async monitoring of events. With async monitoring an event is placed on the
monitor ring for each exit and the rest of the vmexit handler resumes normally.
If there are additional monitor events configured those will also place their
respective events on the monitor ring.

With the sync version an event is placed on the monitor ring but the handler
does not get resumed, thus the sync version is only useful when the VM is not
expected to resume normally after the vmexit. Our use-case is primarily with
the sync version with VM forks where the fork gets reset after sync vmexit
event, thus the rest of the vmexit handler can be safely skipped. This is
very useful when we want to avoid Xen crashing the VM under any circumstance,
for example during fuzzing. Collecting all vmexit information regardless of
the root cause makes it easier to reason about the state of the VM on the
monitor side, hence we opt to receive all events, even for external interrupt
and NMI exits and let the monitor agent decide how to proceed.

Signed-off-by: Tamas K Lengyel <tamas.lengyel@intel.com>
---
v5: wrap vmexit fields in arch.vmx structures in the public vm_event ABI
---
 tools/include/xenctrl.h                |  2 ++
 tools/libs/ctrl/xc_monitor.c           | 15 +++++++++++++++
 xen/arch/x86/hvm/monitor.c             | 18 ++++++++++++++++++
 xen/arch/x86/hvm/vmx/vmx.c             | 12 ++++++++++++
 xen/arch/x86/include/asm/domain.h      |  2 ++
 xen/arch/x86/include/asm/hvm/monitor.h |  2 ++
 xen/arch/x86/include/asm/monitor.h     |  3 ++-
 xen/arch/x86/monitor.c                 | 14 ++++++++++++++
 xen/include/public/domctl.h            |  6 ++++++
 xen/include/public/vm_event.h          | 12 ++++++++++++
 10 files changed, 85 insertions(+), 1 deletion(-)

diff --git a/tools/include/xenctrl.h b/tools/include/xenctrl.h
index 1b089a2c02..159eaac050 100644
--- a/tools/include/xenctrl.h
+++ b/tools/include/xenctrl.h
@@ -2096,6 +2096,8 @@ int xc_monitor_privileged_call(xc_interface *xch, uint32_t domain_id,
                                bool enable);
 int xc_monitor_emul_unimplemented(xc_interface *xch, uint32_t domain_id,
                                   bool enable);
+int xc_monitor_vmexit(xc_interface *xch, uint32_t domain_id, bool enable,
+                      bool sync);
 /**
  * This function enables / disables emulation for each REP for a
  * REP-compatible instruction.
diff --git a/tools/libs/ctrl/xc_monitor.c b/tools/libs/ctrl/xc_monitor.c
index 4ac823e775..c5fa62ff30 100644
--- a/tools/libs/ctrl/xc_monitor.c
+++ b/tools/libs/ctrl/xc_monitor.c
@@ -246,6 +246,21 @@ int xc_monitor_emul_unimplemented(xc_interface *xch, uint32_t domain_id,
     return do_domctl(xch, &domctl);
 }
 
+int xc_monitor_vmexit(xc_interface *xch, uint32_t domain_id, bool enable,
+                      bool sync)
+{
+    DECLARE_DOMCTL;
+
+    domctl.cmd = XEN_DOMCTL_monitor_op;
+    domctl.domain = domain_id;
+    domctl.u.monitor_op.op = enable ? XEN_DOMCTL_MONITOR_OP_ENABLE
+                                    : XEN_DOMCTL_MONITOR_OP_DISABLE;
+    domctl.u.monitor_op.event = XEN_DOMCTL_MONITOR_EVENT_VMEXIT;
+    domctl.u.monitor_op.u.vmexit.sync = sync;
+
+    return do_domctl(xch, &domctl);
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/x86/hvm/monitor.c b/xen/arch/x86/hvm/monitor.c
index b44a1e1dfe..a11cd76f4d 100644
--- a/xen/arch/x86/hvm/monitor.c
+++ b/xen/arch/x86/hvm/monitor.c
@@ -328,6 +328,24 @@ bool hvm_monitor_check_p2m(unsigned long gla, gfn_t gfn, uint32_t pfec,
     return monitor_traps(curr, true, &req) >= 0;
 }
 
+int hvm_monitor_vmexit(unsigned long exit_reason,
+                       unsigned long exit_qualification)
+{
+    struct vcpu *curr = current;
+    struct arch_domain *ad = &curr->domain->arch;
+    vm_event_request_t req = {};
+
+    ASSERT(ad->monitor.vmexit_enabled);
+
+    req.reason = VM_EVENT_REASON_VMEXIT;
+    req.u.vmexit.arch.vmx.reason = exit_reason;
+    req.u.vmexit.arch.vmx.qualification = exit_qualification;
+
+    set_npt_base(curr, &req);
+
+    return monitor_traps(curr, ad->monitor.vmexit_sync, &req);
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index cc8c4e9f04..4320270aae 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -4008,6 +4008,18 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
         }
     }
 
+    if ( unlikely(currd->arch.monitor.vmexit_enabled) )
+    {
+        int rc;
+
+        __vmread(EXIT_QUALIFICATION, &exit_qualification);
+        rc = hvm_monitor_vmexit(exit_reason, exit_qualification);
+        if ( rc < 0 )
+            goto exit_and_crash;
+        if ( rc )
+            return;
+    }
+
     /* XXX: This looks ugly, but we need a mechanism to ensure
      * any pending vmresume has really happened
      */
diff --git a/xen/arch/x86/include/asm/domain.h b/xen/arch/x86/include/asm/domain.h
index 35898d725f..3aa0919fa6 100644
--- a/xen/arch/x86/include/asm/domain.h
+++ b/xen/arch/x86/include/asm/domain.h
@@ -430,6 +430,8 @@ struct arch_domain
          */
         unsigned int inguest_pagefault_disabled                            : 1;
         unsigned int control_register_values                               : 1;
+        unsigned int vmexit_enabled                                        : 1;
+        unsigned int vmexit_sync                                           : 1;
         struct monitor_msr_bitmap *msr_bitmap;
         uint64_t write_ctrlreg_mask[4];
     } monitor;
diff --git a/xen/arch/x86/include/asm/hvm/monitor.h b/xen/arch/x86/include/asm/hvm/monitor.h
index a75cd8545c..639f6dfa37 100644
--- a/xen/arch/x86/include/asm/hvm/monitor.h
+++ b/xen/arch/x86/include/asm/hvm/monitor.h
@@ -51,6 +51,8 @@ bool hvm_monitor_emul_unimplemented(void);
 
 bool hvm_monitor_check_p2m(unsigned long gla, gfn_t gfn, uint32_t pfec,
                            uint16_t kind);
+int hvm_monitor_vmexit(unsigned long exit_reason,
+                       unsigned long exit_qualification);
 
 #endif /* __ASM_X86_HVM_MONITOR_H__ */
 
diff --git a/xen/arch/x86/include/asm/monitor.h b/xen/arch/x86/include/asm/monitor.h
index 01c6d63bb9..d8d54c5f23 100644
--- a/xen/arch/x86/include/asm/monitor.h
+++ b/xen/arch/x86/include/asm/monitor.h
@@ -89,7 +89,8 @@ static inline uint32_t arch_monitor_get_capabilities(struct domain *d)
                     (1U << XEN_DOMCTL_MONITOR_EVENT_DEBUG_EXCEPTION) |
                     (1U << XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG) |
                     (1U << XEN_DOMCTL_MONITOR_EVENT_EMUL_UNIMPLEMENTED) |
-                    (1U << XEN_DOMCTL_MONITOR_EVENT_INGUEST_PAGEFAULT));
+                    (1U << XEN_DOMCTL_MONITOR_EVENT_INGUEST_PAGEFAULT) |
+                    (1U << XEN_DOMCTL_MONITOR_EVENT_VMEXIT));
 
     if ( hvm_is_singlestep_supported() )
         capabilities |= (1U << XEN_DOMCTL_MONITOR_EVENT_SINGLESTEP);
diff --git a/xen/arch/x86/monitor.c b/xen/arch/x86/monitor.c
index 3079726a8b..30ca71432c 100644
--- a/xen/arch/x86/monitor.c
+++ b/xen/arch/x86/monitor.c
@@ -332,6 +332,20 @@ int arch_monitor_domctl_event(struct domain *d,
         break;
     }
 
+    case XEN_DOMCTL_MONITOR_EVENT_VMEXIT:
+    {
+        bool old_status = ad->monitor.vmexit_enabled;
+
+        if ( unlikely(old_status == requested_status) )
+            return -EEXIST;
+
+        domain_pause(d);
+        ad->monitor.vmexit_enabled = requested_status;
+        ad->monitor.vmexit_sync = mop->u.vmexit.sync;
+        domain_unpause(d);
+        break;
+    }
+
     default:
         /*
          * Should not be reached unless arch_monitor_get_capabilities() is
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index b85e6170b0..4803ed7afc 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -1057,6 +1057,7 @@ struct xen_domctl_psr_cmt_op {
 #define XEN_DOMCTL_MONITOR_EVENT_EMUL_UNIMPLEMENTED    10
 /* Enabled by default */
 #define XEN_DOMCTL_MONITOR_EVENT_INGUEST_PAGEFAULT     11
+#define XEN_DOMCTL_MONITOR_EVENT_VMEXIT                12
 
 struct xen_domctl_monitor_op {
     uint32_t op; /* XEN_DOMCTL_MONITOR_OP_* */
@@ -1107,6 +1108,11 @@ struct xen_domctl_monitor_op {
             /* Pause vCPU until response */
             uint8_t sync;
         } debug_exception;
+
+        struct {
+            /* Send event and don't process vmexit */
+            uint8_t sync;
+        } vmexit;
     } u;
 };
 
diff --git a/xen/include/public/vm_event.h b/xen/include/public/vm_event.h
index 1673bb8703..56b429a975 100644
--- a/xen/include/public/vm_event.h
+++ b/xen/include/public/vm_event.h
@@ -175,6 +175,8 @@
 #define VM_EVENT_REASON_DESCRIPTOR_ACCESS       13
 /* Current instruction is not implemented by the emulator */
 #define VM_EVENT_REASON_EMUL_UNIMPLEMENTED      14
+/* VMEXIT */
+#define VM_EVENT_REASON_VMEXIT                  15
 
 /* Supported values for the vm_event_write_ctrlreg index. */
 #define VM_EVENT_X86_CR0    0
@@ -394,6 +396,15 @@ struct vm_event_emul_insn_data {
     uint8_t data[16]; /* Has to be completely filled */
 };
 
+struct vm_event_vmexit {
+    struct {
+        struct {
+            uint64_t reason;
+            uint64_t qualification;
+        } vmx;
+    } arch;
+};
+
 typedef struct vm_event_st {
     uint32_t version;   /* VM_EVENT_INTERFACE_VERSION */
     uint32_t flags;     /* VM_EVENT_FLAG_* */
@@ -414,6 +425,7 @@ typedef struct vm_event_st {
         struct vm_event_debug                 software_breakpoint;
         struct vm_event_debug                 debug_exception;
         struct vm_event_cpuid                 cpuid;
+        struct vm_event_vmexit                vmexit;
         union {
             struct vm_event_interrupt_x86     x86;
         } interrupt;
-- 
2.34.1



^ permalink raw reply related	[flat|nested] 17+ messages in thread

* Re: [PATCH 3/3] x86/monitor: Add new monitor event to catch all vmexits
  2022-04-27 15:34 ` [PATCH 3/3] x86/monitor: Add new monitor event to catch all vmexits Tamas K Lengyel
@ 2022-04-28 13:55   ` Roger Pau Monné
  2022-04-28 14:02     ` Tamas K Lengyel
  2022-05-04 13:12   ` Tamas K Lengyel
  1 sibling, 1 reply; 17+ messages in thread
From: Roger Pau Monné @ 2022-04-28 13:55 UTC (permalink / raw)
  To: Tamas K Lengyel
  Cc: xen-devel, Wei Liu, Anthony PERARD, Juergen Gross, Andrew Cooper,
	George Dunlap, Jan Beulich, Julien Grall, Stefano Stabellini,
	Tamas K Lengyel, Alexandru Isaila, Petre Pircalabu, Jun Nakajima,
	Kevin Tian

On Wed, Apr 27, 2022 at 11:34:20AM -0400, Tamas K Lengyel wrote:
> Add monitor event that hooks the vmexit handler allowing for both sync and
> async monitoring of events. With async monitoring an event is placed on the
> monitor ring for each exit and the rest of the vmexit handler resumes normally.
> If there are additional monitor events configured those will also place their
> respective events on the monitor ring.
> 
> With the sync version an event is placed on the monitor ring but the handler
> does not get resumed, thus the sync version is only useful when the VM is not
> expected to resume normally after the vmexit. Our use-case is primarily with
> the sync version with VM forks where the fork gets reset after sync vmexit
> event, thus the rest of the vmexit handler can be safely skipped. This is
> very useful when we want to avoid Xen crashing the VM under any circumstance,
> for example during fuzzing. Collecting all vmexit information regardless of
> the root cause makes it easier to reason about the state of the VM on the
> monitor side, hence we opt to receive all events, even for external interrupt
> and NMI exits and let the monitor agent decide how to proceed.
> 
> Signed-off-by: Tamas K Lengyel <tamas.lengyel@intel.com>

Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>

Thanks, Roger.


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 3/3] x86/monitor: Add new monitor event to catch all vmexits
  2022-04-28 13:55   ` Roger Pau Monné
@ 2022-04-28 14:02     ` Tamas K Lengyel
  0 siblings, 0 replies; 17+ messages in thread
From: Tamas K Lengyel @ 2022-04-28 14:02 UTC (permalink / raw)
  To: Roger Pau Monné
  Cc: Tamas K Lengyel, xen-devel, Wei Liu, Anthony PERARD,
	Juergen Gross, Andrew Cooper, George Dunlap, Jan Beulich,
	Julien Grall, Stefano Stabellini, Alexandru Isaila,
	Petre Pircalabu, Jun Nakajima, Kevin Tian

On Thu, Apr 28, 2022 at 9:56 AM Roger Pau Monné <roger.pau@citrix.com> wrote:
>
> On Wed, Apr 27, 2022 at 11:34:20AM -0400, Tamas K Lengyel wrote:
> > Add monitor event that hooks the vmexit handler allowing for both sync and
> > async monitoring of events. With async monitoring an event is placed on the
> > monitor ring for each exit and the rest of the vmexit handler resumes normally.
> > If there are additional monitor events configured those will also place their
> > respective events on the monitor ring.
> >
> > With the sync version an event is placed on the monitor ring but the handler
> > does not get resumed, thus the sync version is only useful when the VM is not
> > expected to resume normally after the vmexit. Our use-case is primarily with
> > the sync version with VM forks where the fork gets reset after sync vmexit
> > event, thus the rest of the vmexit handler can be safely skipped. This is
> > very useful when we want to avoid Xen crashing the VM under any circumstance,
> > for example during fuzzing. Collecting all vmexit information regardless of
> > the root cause makes it easier to reason about the state of the VM on the
> > monitor side, hence we opt to receive all events, even for external interrupt
> > and NMI exits and let the monitor agent decide how to proceed.
> >
> > Signed-off-by: Tamas K Lengyel <tamas.lengyel@intel.com>
>
> Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
>
> Thanks, Roger.

Thank you!
Tamas


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 2/3] tools/libxc: change xc_memshr_fork_reset API to match hypervisor
  2022-04-27 15:34 ` [PATCH 2/3] tools/libxc: change xc_memshr_fork_reset API to match hypervisor Tamas K Lengyel
@ 2022-05-04 13:10   ` Tamas K Lengyel
  2022-05-05  8:17   ` Roger Pau Monné
  1 sibling, 0 replies; 17+ messages in thread
From: Tamas K Lengyel @ 2022-05-04 13:10 UTC (permalink / raw)
  To: Tamas K Lengyel; +Cc: xen-devel, Wei Liu, Anthony PERARD, Juergen Gross

On Wed, Apr 27, 2022 at 11:52 AM Tamas K Lengyel
<tamas.lengyel@intel.com> wrote:
>
> Need to separately specify if the reset is for the memory or for the VM state,
> or both.
>
> Signed-off-by: Tamas K Lengyel <tamas.lengyel@intel.com>
> ---
> v5: split from the hypervisor-side patch

Patch ping. Could a toolstack maintainer please take a look at this?
The hypervisor side is already merged.

Thanks,
Tamas


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 3/3] x86/monitor: Add new monitor event to catch all vmexits
  2022-04-27 15:34 ` [PATCH 3/3] x86/monitor: Add new monitor event to catch all vmexits Tamas K Lengyel
  2022-04-28 13:55   ` Roger Pau Monné
@ 2022-05-04 13:12   ` Tamas K Lengyel
  2022-05-12 13:47     ` Tamas K Lengyel
  1 sibling, 1 reply; 17+ messages in thread
From: Tamas K Lengyel @ 2022-05-04 13:12 UTC (permalink / raw)
  To: Tamas K Lengyel
  Cc: xen-devel, Wei Liu, Anthony PERARD, Juergen Gross, Andrew Cooper,
	George Dunlap, Jan Beulich, Julien Grall, Stefano Stabellini,
	Alexandru Isaila, Petre Pircalabu, Roger Pau Monné,
	Jun Nakajima, Kevin Tian

On Wed, Apr 27, 2022 at 11:51 AM Tamas K Lengyel
<tamas.lengyel@intel.com> wrote:
>
> Add monitor event that hooks the vmexit handler allowing for both sync and
> async monitoring of events. With async monitoring an event is placed on the
> monitor ring for each exit and the rest of the vmexit handler resumes normally.
> If there are additional monitor events configured those will also place their
> respective events on the monitor ring.
>
> With the sync version an event is placed on the monitor ring but the handler
> does not get resumed, thus the sync version is only useful when the VM is not
> expected to resume normally after the vmexit. Our use-case is primarily with
> the sync version with VM forks where the fork gets reset after sync vmexit
> event, thus the rest of the vmexit handler can be safely skipped. This is
> very useful when we want to avoid Xen crashing the VM under any circumstance,
> for example during fuzzing. Collecting all vmexit information regardless of
> the root cause makes it easier to reason about the state of the VM on the
> monitor side, hence we opt to receive all events, even for external interrupt
> and NMI exits and let the monitor agent decide how to proceed.
>
> Signed-off-by: Tamas K Lengyel <tamas.lengyel@intel.com>
> ---
> v5: wrap vmexit fields in arch.vmx structures in the public vm_event ABI

Patch ping. Could a toolstack maintainer please take a look at this?
The hypervisor side already has a Reviewed-by.

Thanks,
Tamas


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 2/3] tools/libxc: change xc_memshr_fork_reset API to match hypervisor
  2022-04-27 15:34 ` [PATCH 2/3] tools/libxc: change xc_memshr_fork_reset API to match hypervisor Tamas K Lengyel
  2022-05-04 13:10   ` Tamas K Lengyel
@ 2022-05-05  8:17   ` Roger Pau Monné
  2022-05-12 13:46     ` Tamas K Lengyel
  1 sibling, 1 reply; 17+ messages in thread
From: Roger Pau Monné @ 2022-05-05  8:17 UTC (permalink / raw)
  To: Tamas K Lengyel; +Cc: xen-devel, Wei Liu, Anthony PERARD, Juergen Gross

On Wed, Apr 27, 2022 at 11:34:19AM -0400, Tamas K Lengyel wrote:
> Need to separately specify if the reset is for the memory or for the VM state,
> or both.
> 
> Signed-off-by: Tamas K Lengyel <tamas.lengyel@intel.com>

Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>

Thanks.


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 2/3] tools/libxc: change xc_memshr_fork_reset API to match hypervisor
  2022-05-05  8:17   ` Roger Pau Monné
@ 2022-05-12 13:46     ` Tamas K Lengyel
  2022-05-18 15:01       ` Tamas K Lengyel
  0 siblings, 1 reply; 17+ messages in thread
From: Tamas K Lengyel @ 2022-05-12 13:46 UTC (permalink / raw)
  To: Roger Pau Monné
  Cc: Tamas K Lengyel, xen-devel, Wei Liu, Anthony PERARD, Juergen Gross

On Thu, May 5, 2022 at 4:27 AM Roger Pau Monné <roger.pau@citrix.com> wrote:
>
> On Wed, Apr 27, 2022 at 11:34:19AM -0400, Tamas K Lengyel wrote:
> > Need to separately specify if the reset is for the memory or for the VM state,
> > or both.
> >
> > Signed-off-by: Tamas K Lengyel <tamas.lengyel@intel.com>
>
> Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>

Patch ping. Can this patch be merged please?

Thanks,
Tamas


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 3/3] x86/monitor: Add new monitor event to catch all vmexits
  2022-05-04 13:12   ` Tamas K Lengyel
@ 2022-05-12 13:47     ` Tamas K Lengyel
  2022-05-18 15:02       ` Tamas K Lengyel
  0 siblings, 1 reply; 17+ messages in thread
From: Tamas K Lengyel @ 2022-05-12 13:47 UTC (permalink / raw)
  To: Tamas K Lengyel
  Cc: xen-devel, Wei Liu, Anthony PERARD, Juergen Gross, Andrew Cooper,
	George Dunlap, Jan Beulich, Julien Grall, Stefano Stabellini,
	Alexandru Isaila, Petre Pircalabu, Roger Pau Monné,
	Jun Nakajima, Kevin Tian

On Wed, May 4, 2022 at 9:12 AM Tamas K Lengyel <tamas@tklengyel.com> wrote:
>
> On Wed, Apr 27, 2022 at 11:51 AM Tamas K Lengyel
> <tamas.lengyel@intel.com> wrote:
> >
> > Add monitor event that hooks the vmexit handler allowing for both sync and
> > async monitoring of events. With async monitoring an event is placed on the
> > monitor ring for each exit and the rest of the vmexit handler resumes normally.
> > If there are additional monitor events configured those will also place their
> > respective events on the monitor ring.
> >
> > With the sync version an event is placed on the monitor ring but the handler
> > does not get resumed, thus the sync version is only useful when the VM is not
> > expected to resume normally after the vmexit. Our use-case is primarily with
> > the sync version with VM forks where the fork gets reset after sync vmexit
> > event, thus the rest of the vmexit handler can be safely skipped. This is
> > very useful when we want to avoid Xen crashing the VM under any circumstance,
> > for example during fuzzing. Collecting all vmexit information regardless of
> > the root cause makes it easier to reason about the state of the VM on the
> > monitor side, hence we opt to receive all events, even for external interrupt
> > and NMI exits and let the monitor agent decide how to proceed.
> >
> > Signed-off-by: Tamas K Lengyel <tamas.lengyel@intel.com>
> > ---
> > v5: wrap vmexit fields in arch.vmx structures in the public vm_event ABI
>
> Patch ping. Could a toolstack maintainer please take a look at this?
> The hypervisor side already has a Reviewed-by.

Patch ping.

Tamas


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 2/3] tools/libxc: change xc_memshr_fork_reset API to match hypervisor
  2022-05-12 13:46     ` Tamas K Lengyel
@ 2022-05-18 15:01       ` Tamas K Lengyel
  2022-05-18 15:48         ` Jan Beulich
  0 siblings, 1 reply; 17+ messages in thread
From: Tamas K Lengyel @ 2022-05-18 15:01 UTC (permalink / raw)
  To: Roger Pau Monné
  Cc: Tamas K Lengyel, xen-devel, Wei Liu, Anthony PERARD, Juergen Gross

On Thu, May 12, 2022 at 9:46 AM Tamas K Lengyel
<tamas.k.lengyel@gmail.com> wrote:
>
> On Thu, May 5, 2022 at 4:27 AM Roger Pau Monné <roger.pau@citrix.com> wrote:
> >
> > On Wed, Apr 27, 2022 at 11:34:19AM -0400, Tamas K Lengyel wrote:
> > > Need to separately specify if the reset is for the memory or for the VM state,
> > > or both.
> > >
> > > Signed-off-by: Tamas K Lengyel <tamas.lengyel@intel.com>
> >
> > Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
>
> Patch ping. Can this patch be merged please?

Patch ping.

Tamas


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 3/3] x86/monitor: Add new monitor event to catch all vmexits
  2022-05-12 13:47     ` Tamas K Lengyel
@ 2022-05-18 15:02       ` Tamas K Lengyel
  2022-05-20  0:35         ` Tian, Kevin
  0 siblings, 1 reply; 17+ messages in thread
From: Tamas K Lengyel @ 2022-05-18 15:02 UTC (permalink / raw)
  To: xen-devel
  Cc: Tamas K Lengyel, Wei Liu, Anthony PERARD, Juergen Gross,
	Andrew Cooper, George Dunlap, Jan Beulich, Julien Grall,
	Stefano Stabellini, Alexandru Isaila, Petre Pircalabu,
	Roger Pau Monné,
	Jun Nakajima, Kevin Tian

On Thu, May 12, 2022 at 9:47 AM Tamas K Lengyel <tamas@tklengyel.com> wrote:
>
> On Wed, May 4, 2022 at 9:12 AM Tamas K Lengyel <tamas@tklengyel.com> wrote:
> >
> > On Wed, Apr 27, 2022 at 11:51 AM Tamas K Lengyel
> > <tamas.lengyel@intel.com> wrote:
> > >
> > > Add monitor event that hooks the vmexit handler allowing for both sync and
> > > async monitoring of events. With async monitoring an event is placed on the
> > > monitor ring for each exit and the rest of the vmexit handler resumes normally.
> > > If there are additional monitor events configured those will also place their
> > > respective events on the monitor ring.
> > >
> > > With the sync version an event is placed on the monitor ring but the handler
> > > does not get resumed, thus the sync version is only useful when the VM is not
> > > expected to resume normally after the vmexit. Our use-case is primarily with
> > > the sync version with VM forks where the fork gets reset after sync vmexit
> > > event, thus the rest of the vmexit handler can be safely skipped. This is
> > > very useful when we want to avoid Xen crashing the VM under any circumstance,
> > > for example during fuzzing. Collecting all vmexit information regardless of
> > > the root cause makes it easier to reason about the state of the VM on the
> > > monitor side, hence we opt to receive all events, even for external interrupt
> > > and NMI exits and let the monitor agent decide how to proceed.
> > >
> > > Signed-off-by: Tamas K Lengyel <tamas.lengyel@intel.com>
> > > ---
> > > v5: wrap vmexit fields in arch.vmx structures in the public vm_event ABI
> >
> > Patch ping. Could a toolstack maintainer please take a look at this?
> > The hypervisor side already has a Reviewed-by.
>
> Patch ping.

Patch ping.

Tamas


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 2/3] tools/libxc: change xc_memshr_fork_reset API to match hypervisor
  2022-05-18 15:01       ` Tamas K Lengyel
@ 2022-05-18 15:48         ` Jan Beulich
  2022-05-18 17:03           ` Tamas K Lengyel
  0 siblings, 1 reply; 17+ messages in thread
From: Jan Beulich @ 2022-05-18 15:48 UTC (permalink / raw)
  To: Tamas K Lengyel
  Cc: Tamas K Lengyel, xen-devel, Wei Liu, Anthony PERARD,
	Juergen Gross, Roger Pau Monné

On 18.05.2022 17:01, Tamas K Lengyel wrote:
> On Thu, May 12, 2022 at 9:46 AM Tamas K Lengyel
> <tamas.k.lengyel@gmail.com> wrote:
>>
>> On Thu, May 5, 2022 at 4:27 AM Roger Pau Monné <roger.pau@citrix.com> wrote:
>>>
>>> On Wed, Apr 27, 2022 at 11:34:19AM -0400, Tamas K Lengyel wrote:
>>>> Need to separately specify if the reset is for the memory or for the VM state,
>>>> or both.
>>>>
>>>> Signed-off-by: Tamas K Lengyel <tamas.lengyel@intel.com>
>>>
>>> Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
>>
>> Patch ping. Can this patch be merged please?
> 
> Patch ping.

Your mail (and I guess also your earlier one) was _To_ Roger, which
is odd since he already did provide R-b. What you're missing is a
tool stack maintainer ack aiui, so it may help if you send your
pings _To_ the respective people.

Jan



^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 2/3] tools/libxc: change xc_memshr_fork_reset API to match hypervisor
  2022-05-18 15:48         ` Jan Beulich
@ 2022-05-18 17:03           ` Tamas K Lengyel
  2022-05-19  6:03             ` Jan Beulich
  0 siblings, 1 reply; 17+ messages in thread
From: Tamas K Lengyel @ 2022-05-18 17:03 UTC (permalink / raw)
  To: Jan Beulich, xen-devel, Juergen Gross, Anthony PERARD, Wei Liu
  Cc: Roger Pau Monné

On Wed, May 18, 2022 at 11:48 AM Jan Beulich <jbeulich@suse.com> wrote:
>
> On 18.05.2022 17:01, Tamas K Lengyel wrote:
> > On Thu, May 12, 2022 at 9:46 AM Tamas K Lengyel
> > <tamas.k.lengyel@gmail.com> wrote:
> >>
> >> On Thu, May 5, 2022 at 4:27 AM Roger Pau Monné <roger.pau@citrix.com> wrote:
> >>>
> >>> On Wed, Apr 27, 2022 at 11:34:19AM -0400, Tamas K Lengyel wrote:
> >>>> Need to separately specify if the reset is for the memory or for the VM state,
> >>>> or both.
> >>>>
> >>>> Signed-off-by: Tamas K Lengyel <tamas.lengyel@intel.com>
> >>>
> >>> Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
> >>
> >> Patch ping. Can this patch be merged please?
> >
> > Patch ping.
>
> Your mail (and I guess also your earlier one) was _To_ Roger, which
> is odd since he already did provide R-b. What you're missing is a
> tool stack maintainer ack aiui, so it may help if you send your
> pings _To_ the respective people.

True, but all the toolstack maintainers have been CC-d from the start.
Is it the case that CC-ing is now officially insufficient? What's the
point of ./scripts/add_maintainers.pl then which specifically adds
maintainers only as CC? How are you supposed to get their attention?
Just know you specifically have to send emails to them and not the
mailinglist? I'm getting the distinct impression that the toolstack
side has simply become unmaintained/orphaned with no one left who
actually is looking at the mailinglist.

Tamas


^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 2/3] tools/libxc: change xc_memshr_fork_reset API to match hypervisor
  2022-05-18 17:03           ` Tamas K Lengyel
@ 2022-05-19  6:03             ` Jan Beulich
  0 siblings, 0 replies; 17+ messages in thread
From: Jan Beulich @ 2022-05-19  6:03 UTC (permalink / raw)
  To: Tamas K Lengyel
  Cc: Roger Pau Monné, xen-devel, Juergen Gross, Anthony PERARD, Wei Liu

On 18.05.2022 19:03, Tamas K Lengyel wrote:
> On Wed, May 18, 2022 at 11:48 AM Jan Beulich <jbeulich@suse.com> wrote:
>>
>> On 18.05.2022 17:01, Tamas K Lengyel wrote:
>>> On Thu, May 12, 2022 at 9:46 AM Tamas K Lengyel
>>> <tamas.k.lengyel@gmail.com> wrote:
>>>>
>>>> On Thu, May 5, 2022 at 4:27 AM Roger Pau Monné <roger.pau@citrix.com> wrote:
>>>>>
>>>>> On Wed, Apr 27, 2022 at 11:34:19AM -0400, Tamas K Lengyel wrote:
>>>>>> Need to separately specify if the reset is for the memory or for the VM state,
>>>>>> or both.
>>>>>>
>>>>>> Signed-off-by: Tamas K Lengyel <tamas.lengyel@intel.com>
>>>>>
>>>>> Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
>>>>
>>>> Patch ping. Can this patch be merged please?
>>>
>>> Patch ping.
>>
>> Your mail (and I guess also your earlier one) was _To_ Roger, which
>> is odd since he already did provide R-b. What you're missing is a
>> tool stack maintainer ack aiui, so it may help if you send your
>> pings _To_ the respective people.
> 
> True, but all the toolstack maintainers have been CC-d from the start.
> Is it the case that CC-ing is now officially insufficient?

No - patch submissions should still only Cc maintainers. But I think
pings, especially repeated ones, would better go To the respective
people. (And this follows my general remark I keep making every once
in a while: There's a reason there is both To and Cc, and using them
appropriately can help. Of course there's no guarantee, as people
may not pay attention at all.)

> What's the
> point of ./scripts/add_maintainers.pl then which specifically adds
> maintainers only as CC? How are you supposed to get their attention?
> Just know you specifically have to send emails to them and not the
> mailinglist? I'm getting the distinct impression that the toolstack
> side has simply become unmaintained/orphaned with no one left who
> actually is looking at the mailinglist.

While things are far from ideal (and as you likely know we're still
looking for a 2nd tool stack maintainer), I have actually got the
impression that things have improved a little lately.

Jan



^ permalink raw reply	[flat|nested] 17+ messages in thread

* RE: [PATCH 3/3] x86/monitor: Add new monitor event to catch all vmexits
  2022-05-18 15:02       ` Tamas K Lengyel
@ 2022-05-20  0:35         ` Tian, Kevin
  2022-05-20 14:05           ` Lengyel, Tamas
  0 siblings, 1 reply; 17+ messages in thread
From: Tian, Kevin @ 2022-05-20  0:35 UTC (permalink / raw)
  To: Tamas K Lengyel, xen-devel
  Cc: Lengyel, Tamas, Wei Liu, Anthony PERARD, Gross, Jurgen, Cooper,
	Andrew, George Dunlap, Beulich, Jan, Julien Grall,
	Stefano Stabellini, Alexandru Isaila, Petre Pircalabu,
	Pau Monné,
	Roger, Nakajima, Jun

> From: Tamas K Lengyel <tamas@tklengyel.com>
> Sent: Wednesday, May 18, 2022 11:02 PM
> 
> On Thu, May 12, 2022 at 9:47 AM Tamas K Lengyel <tamas@tklengyel.com>
> wrote:
> >
> > On Wed, May 4, 2022 at 9:12 AM Tamas K Lengyel <tamas@tklengyel.com>
> wrote:
> > >
> > > On Wed, Apr 27, 2022 at 11:51 AM Tamas K Lengyel
> > > <tamas.lengyel@intel.com> wrote:
> > > >
> > > > Add monitor event that hooks the vmexit handler allowing for both sync
> and
> > > > async monitoring of events. With async monitoring an event is placed
> on the
> > > > monitor ring for each exit and the rest of the vmexit handler resumes
> normally.
> > > > If there are additional monitor events configured those will also place
> their
> > > > respective events on the monitor ring.
> > > >
> > > > With the sync version an event is placed on the monitor ring but the
> handler
> > > > does not get resumed, thus the sync version is only useful when the VM
> is not
> > > > expected to resume normally after the vmexit. Our use-case is primarily
> with
> > > > the sync version with VM forks where the fork gets reset after sync
> vmexit
> > > > event, thus the rest of the vmexit handler can be safely skipped. This is
> > > > very useful when we want to avoid Xen crashing the VM under any
> circumstance,
> > > > for example during fuzzing. Collecting all vmexit information regardless
> of
> > > > the root cause makes it easier to reason about the state of the VM on
> the
> > > > monitor side, hence we opt to receive all events, even for external
> interrupt
> > > > and NMI exits and let the monitor agent decide how to proceed.
> > > >
> > > > Signed-off-by: Tamas K Lengyel <tamas.lengyel@intel.com>
> > > > ---
> > > > v5: wrap vmexit fields in arch.vmx structures in the public vm_event ABI
> > >
> > > Patch ping. Could a toolstack maintainer please take a look at this?
> > > The hypervisor side already has a Reviewed-by.
> >
> > Patch ping.
> 
> Patch ping.
> 

I guess what you really missed is an ack from toostack maintainer, but anyway:

Reviewed-by: Kevin Tian <kevin.tian@intel.com>

^ permalink raw reply	[flat|nested] 17+ messages in thread

* RE: [PATCH 3/3] x86/monitor: Add new monitor event to catch all vmexits
  2022-05-20  0:35         ` Tian, Kevin
@ 2022-05-20 14:05           ` Lengyel, Tamas
  0 siblings, 0 replies; 17+ messages in thread
From: Lengyel, Tamas @ 2022-05-20 14:05 UTC (permalink / raw)
  To: Tian, Kevin, Tamas K Lengyel, xen-devel
  Cc: Wei Liu, Anthony PERARD, Gross, Jurgen, Cooper, Andrew,
	George Dunlap, Beulich, Jan, Julien Grall, Stefano Stabellini,
	Alexandru Isaila, Petre Pircalabu, Pau Monné,
	Roger, Nakajima, Jun



> -----Original Message-----
> From: Tian, Kevin <kevin.tian@intel.com>
> Sent: Thursday, May 19, 2022 8:35 PM
> To: Tamas K Lengyel <tamas@tklengyel.com>; xen-
> devel@lists.xenproject.org
> Cc: Lengyel, Tamas <tamas.lengyel@intel.com>; Wei Liu <wl@xen.org>;
> Anthony PERARD <anthony.perard@citrix.com>; Gross, Jurgen
> <jgross@suse.com>; Cooper, Andrew <andrew.cooper3@citrix.com>;
> George Dunlap <george.dunlap@citrix.com>; Beulich, Jan
> <JBeulich@suse.com>; Julien Grall <julien@xen.org>; Stefano Stabellini
> <sstabellini@kernel.org>; Alexandru Isaila <aisaila@bitdefender.com>; Petre
> Pircalabu <ppircalabu@bitdefender.com>; Pau Monné, Roger
> <roger.pau@citrix.com>; Nakajima, Jun <jun.nakajima@intel.com>
> Subject: RE: [PATCH 3/3] x86/monitor: Add new monitor event to catch all
> vmexits
> 
> > From: Tamas K Lengyel <tamas@tklengyel.com>
> > Sent: Wednesday, May 18, 2022 11:02 PM
> >
> > On Thu, May 12, 2022 at 9:47 AM Tamas K Lengyel <tamas@tklengyel.com>
> > wrote:
> > >
> > > On Wed, May 4, 2022 at 9:12 AM Tamas K Lengyel
> <tamas@tklengyel.com>
> > wrote:
> > > >
> > > > On Wed, Apr 27, 2022 at 11:51 AM Tamas K Lengyel
> > > > <tamas.lengyel@intel.com> wrote:
> > > > >
> > > > > Add monitor event that hooks the vmexit handler allowing for
> > > > > both sync
> > and
> > > > > async monitoring of events. With async monitoring an event is
> > > > > placed
> > on the
> > > > > monitor ring for each exit and the rest of the vmexit handler
> > > > > resumes
> > normally.
> > > > > If there are additional monitor events configured those will
> > > > > also place
> > their
> > > > > respective events on the monitor ring.
> > > > >
> > > > > With the sync version an event is placed on the monitor ring but
> > > > > the
> > handler
> > > > > does not get resumed, thus the sync version is only useful when
> > > > > the VM
> > is not
> > > > > expected to resume normally after the vmexit. Our use-case is
> > > > > primarily
> > with
> > > > > the sync version with VM forks where the fork gets reset after
> > > > > sync
> > vmexit
> > > > > event, thus the rest of the vmexit handler can be safely
> > > > > skipped. This is very useful when we want to avoid Xen crashing
> > > > > the VM under any
> > circumstance,
> > > > > for example during fuzzing. Collecting all vmexit information
> > > > > regardless
> > of
> > > > > the root cause makes it easier to reason about the state of the
> > > > > VM on
> > the
> > > > > monitor side, hence we opt to receive all events, even for
> > > > > external
> > interrupt
> > > > > and NMI exits and let the monitor agent decide how to proceed.
> > > > >
> > > > > Signed-off-by: Tamas K Lengyel <tamas.lengyel@intel.com>
> > > > > ---
> > > > > v5: wrap vmexit fields in arch.vmx structures in the public
> > > > > vm_event ABI
> > > >
> > > > Patch ping. Could a toolstack maintainer please take a look at this?
> > > > The hypervisor side already has a Reviewed-by.
> > >
> > > Patch ping.
> >
> > Patch ping.
> >
> 
> I guess what you really missed is an ack from toostack maintainer, but
> anyway:
> 
> Reviewed-by: Kevin Tian <kevin.tian@intel.com>

Thanks, the review is still appreciated!
Tamas

^ permalink raw reply	[flat|nested] 17+ messages in thread

end of thread, other threads:[~2022-05-20 14:06 UTC | newest]

Thread overview: 17+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-04-27 15:34 [PATCH 1/3] x86/mem_sharing: make fork_reset more configurable Tamas K Lengyel
2022-04-27 15:34 ` [PATCH 2/3] tools/libxc: change xc_memshr_fork_reset API to match hypervisor Tamas K Lengyel
2022-05-04 13:10   ` Tamas K Lengyel
2022-05-05  8:17   ` Roger Pau Monné
2022-05-12 13:46     ` Tamas K Lengyel
2022-05-18 15:01       ` Tamas K Lengyel
2022-05-18 15:48         ` Jan Beulich
2022-05-18 17:03           ` Tamas K Lengyel
2022-05-19  6:03             ` Jan Beulich
2022-04-27 15:34 ` [PATCH 3/3] x86/monitor: Add new monitor event to catch all vmexits Tamas K Lengyel
2022-04-28 13:55   ` Roger Pau Monné
2022-04-28 14:02     ` Tamas K Lengyel
2022-05-04 13:12   ` Tamas K Lengyel
2022-05-12 13:47     ` Tamas K Lengyel
2022-05-18 15:02       ` Tamas K Lengyel
2022-05-20  0:35         ` Tian, Kevin
2022-05-20 14:05           ` Lengyel, Tamas

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.