* [PATCH V7] x86/vm_event: Added support for VM_EVENT_REASON_INTERRUPT
@ 2016-12-14 10:00 Razvan Cojocaru
0 siblings, 0 replies; only message in thread
From: Razvan Cojocaru @ 2016-12-14 10:00 UTC (permalink / raw)
To: xen-devel
Cc: kevin.tian, tamas, suravee.suthikulpanit, Razvan Cojocaru,
andrew.cooper3, julien.grall, jbeulich, sstabellini,
jun.nakajima, boris.ostrovsky
Added support for a new event type, VM_EVENT_REASON_INTERRUPT,
which is now fired in a one-shot manner when enabled via the new
VM_EVENT_FLAG_GET_NEXT_INTERRUPT vm_event response flag.
The patch also fixes the behaviour of the xc_hvm_inject_trap()
hypercall, which would lead to non-architectural interrupts
overwriting pending (specifically reinjected) architectural ones.
Signed-off-by: Razvan Cojocaru <rcojocaru@bitdefender.com>
Acked-by: Tamas K Lengyel <tamas@tklengyel.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Kevin Tian <kevin.tian@intel.com>
Acked-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Acked-by: Julien Grall <julien.grall@arm.com>
---
Changes since V6:
- Rebased on staging.
- Fixed ARM build (just moved vm_event_monitor_next_interrupt()
from vm_event.h, where it was inlined, to vm_event.c).
---
xen/arch/arm/vm_event.c | 5 +++++
xen/arch/x86/hvm/hvm.c | 22 +++++++++++++++++++++-
xen/arch/x86/hvm/monitor.c | 14 ++++++++++++++
xen/arch/x86/hvm/svm/svm.c | 15 +++++++++++++++
xen/arch/x86/hvm/vmx/vmx.c | 20 ++++++++++++++++++++
xen/arch/x86/vm_event.c | 5 +++++
xen/common/vm_event.c | 3 +++
xen/include/asm-x86/domain.h | 4 ++++
xen/include/asm-x86/hvm/hvm.h | 1 +
xen/include/asm-x86/hvm/monitor.h | 2 ++
xen/include/asm-x86/monitor.h | 3 ++-
xen/include/public/domctl.h | 1 +
xen/include/public/vm_event.h | 18 ++++++++++++++++++
xen/include/xen/vm_event.h | 2 ++
14 files changed, 113 insertions(+), 2 deletions(-)
diff --git a/xen/arch/arm/vm_event.c b/xen/arch/arm/vm_event.c
index 47312e9..eaac920 100644
--- a/xen/arch/arm/vm_event.c
+++ b/xen/arch/arm/vm_event.c
@@ -42,6 +42,11 @@ void vm_event_set_registers(struct vcpu *v, vm_event_response_t *rsp)
regs->pc = rsp->data.regs.arm.pc;
}
+void vm_event_monitor_next_interrupt(struct vcpu *v)
+{
+ /* Not supported on ARM. */
+}
+
/*
* Local variables:
* mode: C
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 61f5029..115c86e 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -469,6 +469,12 @@ void hvm_migrate_pirqs(struct vcpu *v)
spin_unlock(&d->event_lock);
}
+static bool hvm_get_pending_event(struct vcpu *v, struct x86_event *info)
+{
+ info->cr2 = v->arch.hvm_vcpu.guest_cr[2];
+ return hvm_funcs.get_pending_event(v, info);
+}
+
void hvm_do_resume(struct vcpu *v)
{
check_wakeup_from_wait();
@@ -535,9 +541,23 @@ void hvm_do_resume(struct vcpu *v)
/* Inject pending hw/sw trap */
if ( v->arch.hvm_vcpu.inject_trap.vector != -1 )
{
- hvm_inject_event(&v->arch.hvm_vcpu.inject_trap);
+ if ( !hvm_event_pending(v) )
+ hvm_inject_event(&v->arch.hvm_vcpu.inject_trap);
+
v->arch.hvm_vcpu.inject_trap.vector = -1;
}
+
+ if ( unlikely(v->arch.vm_event) && v->arch.monitor.next_interrupt_enabled )
+ {
+ struct x86_event info;
+
+ if ( hvm_get_pending_event(v, &info) )
+ {
+ hvm_monitor_interrupt(info.vector, info.type, info.error_code,
+ info.cr2);
+ v->arch.monitor.next_interrupt_enabled = false;
+ }
+ }
}
static int hvm_print_line(
diff --git a/xen/arch/x86/hvm/monitor.c b/xen/arch/x86/hvm/monitor.c
index 401a8c6..69a88ad 100644
--- a/xen/arch/x86/hvm/monitor.c
+++ b/xen/arch/x86/hvm/monitor.c
@@ -150,6 +150,20 @@ int hvm_monitor_cpuid(unsigned long insn_length, unsigned int leaf,
return monitor_traps(curr, 1, &req);
}
+void hvm_monitor_interrupt(unsigned int vector, unsigned int type,
+ unsigned int err, uint64_t cr2)
+{
+ vm_event_request_t req = {
+ .reason = VM_EVENT_REASON_INTERRUPT,
+ .u.interrupt.x86.vector = vector,
+ .u.interrupt.x86.type = type,
+ .u.interrupt.x86.error_code = err,
+ .u.interrupt.x86.cr2 = cr2,
+ };
+
+ monitor_traps(current, 1, &req);
+}
+
/*
* Local variables:
* mode: C
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index bb8273b..9dda51b 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -2179,6 +2179,20 @@ static void svm_invlpg(struct vcpu *v, unsigned long vaddr)
svm_asid_g_invlpg(v, vaddr);
}
+static bool svm_get_pending_event(struct vcpu *v, struct x86_event *info)
+{
+ const struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+
+ if ( vmcb->eventinj.fields.v )
+ return false;
+
+ info->vector = vmcb->eventinj.fields.vector;
+ info->type = vmcb->eventinj.fields.type;
+ info->error_code = vmcb->eventinj.fields.errorcode;
+
+ return true;
+}
+
static struct hvm_function_table __initdata svm_function_table = {
.name = "SVM",
.cpu_up_prepare = svm_cpu_up_prepare,
@@ -2209,6 +2223,7 @@ static struct hvm_function_table __initdata svm_function_table = {
.inject_event = svm_inject_event,
.init_hypercall_page = svm_init_hypercall_page,
.event_pending = svm_event_pending,
+ .get_pending_event = svm_get_pending_event,
.invlpg = svm_invlpg,
.wbinvd_intercept = svm_wbinvd_intercept,
.fpu_dirty_intercept = svm_fpu_dirty_intercept,
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 350b945..13770dd 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2052,6 +2052,25 @@ static int vmx_set_mode(struct vcpu *v, int mode)
return 0;
}
+static bool vmx_get_pending_event(struct vcpu *v, struct x86_event *info)
+{
+ unsigned long intr_info, error_code;
+
+ vmx_vmcs_enter(v);
+ __vmread(VM_ENTRY_INTR_INFO, &intr_info);
+ __vmread(VM_ENTRY_EXCEPTION_ERROR_CODE, &error_code);
+ vmx_vmcs_exit(v);
+
+ if ( !(intr_info & INTR_INFO_VALID_MASK) )
+ return false;
+
+ info->vector = MASK_EXTR(intr_info, INTR_INFO_VECTOR_MASK);
+ info->type = MASK_EXTR(intr_info, INTR_INFO_INTR_TYPE_MASK);
+ info->error_code = error_code;
+
+ return true;
+}
+
static struct hvm_function_table __initdata vmx_function_table = {
.name = "VMX",
.cpu_up_prepare = vmx_cpu_up_prepare,
@@ -2081,6 +2100,7 @@ static struct hvm_function_table __initdata vmx_function_table = {
.inject_event = vmx_inject_event,
.init_hypercall_page = vmx_init_hypercall_page,
.event_pending = vmx_event_pending,
+ .get_pending_event = vmx_get_pending_event,
.invlpg = vmx_invlpg,
.cpu_up = vmx_cpu_up,
.cpu_down = vmx_cpu_down,
diff --git a/xen/arch/x86/vm_event.c b/xen/arch/x86/vm_event.c
index 1e88d67..e9a689c 100644
--- a/xen/arch/x86/vm_event.c
+++ b/xen/arch/x86/vm_event.c
@@ -134,6 +134,11 @@ void vm_event_set_registers(struct vcpu *v, vm_event_response_t *rsp)
v->arch.user_regs.eip = rsp->data.regs.x86.rip;
}
+void vm_event_monitor_next_interrupt(struct vcpu *v)
+{
+ v->arch.monitor.next_interrupt_enabled = true;
+}
+
void vm_event_fill_regs(vm_event_request_t *req)
{
const struct cpu_user_regs *regs = guest_cpu_user_regs();
diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
index c6f7d32..82ce8f1 100644
--- a/xen/common/vm_event.c
+++ b/xen/common/vm_event.c
@@ -433,6 +433,9 @@ void vm_event_resume(struct domain *d, struct vm_event_domain *ved)
if ( rsp.flags & VM_EVENT_FLAG_SET_REGISTERS )
vm_event_set_registers(v, &rsp);
+ if ( rsp.flags & VM_EVENT_FLAG_GET_NEXT_INTERRUPT )
+ vm_event_monitor_next_interrupt(v);
+
if ( rsp.flags & VM_EVENT_FLAG_VCPU_PAUSED )
vm_event_vcpu_unpause(v);
}
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 39cc658..95762cf 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -577,6 +577,10 @@ struct arch_vcpu
XEN_GUEST_HANDLE(vcpu_time_info_t) time_info_guest;
struct arch_vm_event *vm_event;
+
+ struct {
+ bool next_interrupt_enabled;
+ } monitor;
};
smap_check_policy_t smap_policy_change(struct vcpu *v,
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index b89b209..9334f17 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -149,6 +149,7 @@ struct hvm_function_table {
void (*init_hypercall_page)(struct domain *d, void *hypercall_page);
int (*event_pending)(struct vcpu *v);
+ bool (*get_pending_event)(struct vcpu *v, struct x86_event *info);
void (*invlpg)(struct vcpu *v, unsigned long vaddr);
int (*cpu_up_prepare)(unsigned int cpu);
diff --git a/xen/include/asm-x86/hvm/monitor.h b/xen/include/asm-x86/hvm/monitor.h
index 82b85ec..85ca678 100644
--- a/xen/include/asm-x86/hvm/monitor.h
+++ b/xen/include/asm-x86/hvm/monitor.h
@@ -42,6 +42,8 @@ int hvm_monitor_debug(unsigned long rip, enum hvm_monitor_debug_type type,
unsigned long trap_type, unsigned long insn_length);
int hvm_monitor_cpuid(unsigned long insn_length, unsigned int leaf,
unsigned int subleaf);
+void hvm_monitor_interrupt(unsigned int vector, unsigned int type,
+ unsigned int err, uint64_t cr2);
#endif /* __ASM_X86_HVM_MONITOR_H__ */
diff --git a/xen/include/asm-x86/monitor.h b/xen/include/asm-x86/monitor.h
index 63a994b..e409373 100644
--- a/xen/include/asm-x86/monitor.h
+++ b/xen/include/asm-x86/monitor.h
@@ -76,7 +76,8 @@ static inline uint32_t arch_monitor_get_capabilities(struct domain *d)
(1U << XEN_DOMCTL_MONITOR_EVENT_SOFTWARE_BREAKPOINT) |
(1U << XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST) |
(1U << XEN_DOMCTL_MONITOR_EVENT_DEBUG_EXCEPTION) |
- (1U << XEN_DOMCTL_MONITOR_EVENT_CPUID);
+ (1U << XEN_DOMCTL_MONITOR_EVENT_CPUID) |
+ (1U << XEN_DOMCTL_MONITOR_EVENT_INTERRUPT);
/* Since we know this is on VMX, we can just call the hvm func */
if ( hvm_is_singlestep_supported() )
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 177319d..85cbb7c 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -1086,6 +1086,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_psr_cmt_op_t);
#define XEN_DOMCTL_MONITOR_EVENT_DEBUG_EXCEPTION 5
#define XEN_DOMCTL_MONITOR_EVENT_CPUID 6
#define XEN_DOMCTL_MONITOR_EVENT_PRIVILEGED_CALL 7
+#define XEN_DOMCTL_MONITOR_EVENT_INTERRUPT 8
struct xen_domctl_monitor_op {
uint32_t op; /* XEN_DOMCTL_MONITOR_OP_* */
diff --git a/xen/include/public/vm_event.h b/xen/include/public/vm_event.h
index c28be5a..b7487a1 100644
--- a/xen/include/public/vm_event.h
+++ b/xen/include/public/vm_event.h
@@ -105,6 +105,11 @@
* if any of those flags are set, only those will be honored).
*/
#define VM_EVENT_FLAG_SET_EMUL_INSN_DATA (1 << 9)
+/*
+ * Have a one-shot VM_EVENT_REASON_INTERRUPT event sent for the first
+ * interrupt pending after resuming the VCPU.
+ */
+#define VM_EVENT_FLAG_GET_NEXT_INTERRUPT (1 << 10)
/*
* Reasons for the vm event request
@@ -139,6 +144,8 @@
* These kinds of events will be filtered out in future versions.
*/
#define VM_EVENT_REASON_PRIVILEGED_CALL 11
+/* An interrupt has been delivered. */
+#define VM_EVENT_REASON_INTERRUPT 12
/* Supported values for the vm_event_write_ctrlreg index. */
#define VM_EVENT_X86_CR0 0
@@ -259,6 +266,14 @@ struct vm_event_cpuid {
uint32_t _pad;
};
+struct vm_event_interrupt_x86 {
+ uint32_t vector;
+ uint32_t type;
+ uint32_t error_code;
+ uint32_t _pad;
+ uint64_t cr2;
+};
+
#define MEM_PAGING_DROP_PAGE (1 << 0)
#define MEM_PAGING_EVICT_FAIL (1 << 1)
@@ -302,6 +317,9 @@ typedef struct vm_event_st {
struct vm_event_debug software_breakpoint;
struct vm_event_debug debug_exception;
struct vm_event_cpuid cpuid;
+ union {
+ struct vm_event_interrupt_x86 x86;
+ } interrupt;
} u;
union {
diff --git a/xen/include/xen/vm_event.h b/xen/include/xen/vm_event.h
index 4f088c8..2fb3951 100644
--- a/xen/include/xen/vm_event.h
+++ b/xen/include/xen/vm_event.h
@@ -78,6 +78,8 @@ void vm_event_vcpu_unpause(struct vcpu *v);
void vm_event_fill_regs(vm_event_request_t *req);
void vm_event_set_registers(struct vcpu *v, vm_event_response_t *rsp);
+void vm_event_monitor_next_interrupt(struct vcpu *v);
+
#endif /* __VM_EVENT_H__ */
/*
--
1.9.1
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply related [flat|nested] only message in thread
only message in thread, other threads:[~2016-12-14 10:00 UTC | newest]
Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-12-14 10:00 [PATCH V7] x86/vm_event: Added support for VM_EVENT_REASON_INTERRUPT Razvan Cojocaru
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.