From: Wei Liu <wei.liu2@citrix.com>
To: Xen-devel <xen-devel@lists.xenproject.org>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
Wei Liu <wei.liu2@citrix.com>, Jan Beulich <JBeulich@suse.com>
Subject: [PATCH for-next v3 18/22] x86/traps: merge x86_64/compat/traps.c into pv/traps.c
Date: Thu, 18 May 2017 18:10:00 +0100 [thread overview]
Message-ID: <20170518171004.27204-19-wei.liu2@citrix.com> (raw)
In-Reply-To: <20170518171004.27204-1-wei.liu2@citrix.com>
Export hypercall_page_initialise_ring1_kernel as the code is moved.
No functional change.
Signed-off-by: Wei Liu <wei.liu2@citrix.com>
---
xen/arch/x86/pv/traps.c | 405 ++++++++++++++++++++++++++++++++++++
xen/arch/x86/x86_64/compat/traps.c | 416 -------------------------------------
xen/arch/x86/x86_64/traps.c | 2 -
xen/include/asm-x86/pv/domain.h | 2 +
4 files changed, 407 insertions(+), 418 deletions(-)
delete mode 100644 xen/arch/x86/x86_64/compat/traps.c
diff --git a/xen/arch/x86/pv/traps.c b/xen/arch/x86/pv/traps.c
index 4f52d3e4d3..db92f6d520 100644
--- a/xen/arch/x86/pv/traps.c
+++ b/xen/arch/x86/pv/traps.c
@@ -31,6 +31,9 @@
#include <asm/shared.h>
#include <asm/traps.h>
+#include <compat/callback.h>
+#include <compat/arch-x86_32.h>
+
#include <public/callback.h>
void do_entry_int82(struct cpu_user_regs *regs)
@@ -616,6 +619,408 @@ void hypercall_page_initialise_ring3_kernel(void *hypercall_page)
*(u16 *)(p+ 9) = 0x050f; /* syscall */
}
+/* Compat guest interfaces */
+
+void compat_show_guest_stack(struct vcpu *v, const struct cpu_user_regs *regs,
+ int debug_stack_lines)
+{
+ unsigned int i, *stack, addr, mask = STACK_SIZE;
+
+ stack = (unsigned int *)(unsigned long)regs->esp;
+ printk("Guest stack trace from esp=%08lx:\n ", (unsigned long)stack);
+
+ if ( !__compat_access_ok(v->domain, stack, sizeof(*stack)) )
+ {
+ printk("Guest-inaccessible memory.\n");
+ return;
+ }
+
+ if ( v != current )
+ {
+ struct vcpu *vcpu;
+ unsigned long mfn;
+
+ ASSERT(guest_kernel_mode(v, regs));
+ mfn = read_cr3() >> PAGE_SHIFT;
+ for_each_vcpu( v->domain, vcpu )
+ if ( pagetable_get_pfn(vcpu->arch.guest_table) == mfn )
+ break;
+ if ( !vcpu )
+ {
+ stack = do_page_walk(v, (unsigned long)stack);
+ if ( (unsigned long)stack < PAGE_SIZE )
+ {
+ printk("Inaccessible guest memory.\n");
+ return;
+ }
+ mask = PAGE_SIZE;
+ }
+ }
+
+ for ( i = 0; i < debug_stack_lines * 8; i++ )
+ {
+ if ( (((long)stack - 1) ^ ((long)(stack + 1) - 1)) & mask )
+ break;
+ if ( __get_user(addr, stack) )
+ {
+ if ( i != 0 )
+ printk("\n ");
+ printk("Fault while accessing guest memory.");
+ i = 1;
+ break;
+ }
+ if ( (i != 0) && ((i % 8) == 0) )
+ printk("\n ");
+ printk(" %08x", addr);
+ stack++;
+ }
+ if ( mask == PAGE_SIZE )
+ {
+ BUILD_BUG_ON(PAGE_SIZE == STACK_SIZE);
+ unmap_domain_page(stack);
+ }
+ if ( i == 0 )
+ printk("Stack empty.");
+ printk("\n");
+}
+
+unsigned int compat_iret(void)
+{
+ struct cpu_user_regs *regs = guest_cpu_user_regs();
+ struct vcpu *v = current;
+ u32 eflags;
+
+ /* Trim stack pointer to 32 bits. */
+ regs->rsp = (u32)regs->rsp;
+
+ /* Restore EAX (clobbered by hypercall). */
+ if ( unlikely(__get_user(regs->eax, (u32 *)regs->rsp)) )
+ {
+ domain_crash(v->domain);
+ return 0;
+ }
+
+ /* Restore CS and EIP. */
+ if ( unlikely(__get_user(regs->eip, (u32 *)regs->rsp + 1)) ||
+ unlikely(__get_user(regs->cs, (u32 *)regs->rsp + 2)) )
+ {
+ domain_crash(v->domain);
+ return 0;
+ }
+
+ /*
+ * Fix up and restore EFLAGS. We fix up in a local staging area
+ * to avoid firing the BUG_ON(IOPL) check in arch_get_info_guest.
+ */
+ if ( unlikely(__get_user(eflags, (u32 *)regs->rsp + 3)) )
+ {
+ domain_crash(v->domain);
+ return 0;
+ }
+
+ if ( VM_ASSIST(v->domain, architectural_iopl) )
+ v->arch.pv_vcpu.iopl = eflags & X86_EFLAGS_IOPL;
+
+ regs->eflags = (eflags & ~X86_EFLAGS_IOPL) | X86_EFLAGS_IF;
+
+ if ( unlikely(eflags & X86_EFLAGS_VM) )
+ {
+ /*
+ * Cannot return to VM86 mode: inject a GP fault instead. Note that
+ * the GP fault is reported on the first VM86 mode instruction, not on
+ * the IRET (which is why we can simply leave the stack frame as-is
+ * (except for perhaps having to copy it), which in turn seems better
+ * than teaching create_bounce_frame() to needlessly deal with vm86
+ * mode frames).
+ */
+ const struct trap_info *ti;
+ u32 x, ksp = v->arch.pv_vcpu.kernel_sp - 40;
+ unsigned int i;
+ int rc = 0;
+
+ gdprintk(XENLOG_ERR, "VM86 mode unavailable (ksp:%08X->%08X)\n",
+ regs->esp, ksp);
+ if ( ksp < regs->esp )
+ {
+ for (i = 1; i < 10; ++i)
+ {
+ rc |= __get_user(x, (u32 *)regs->rsp + i);
+ rc |= __put_user(x, (u32 *)(unsigned long)ksp + i);
+ }
+ }
+ else if ( ksp > regs->esp )
+ {
+ for ( i = 9; i > 0; --i )
+ {
+ rc |= __get_user(x, (u32 *)regs->rsp + i);
+ rc |= __put_user(x, (u32 *)(unsigned long)ksp + i);
+ }
+ }
+ if ( rc )
+ {
+ domain_crash(v->domain);
+ return 0;
+ }
+ regs->esp = ksp;
+ regs->ss = v->arch.pv_vcpu.kernel_ss;
+
+ ti = &v->arch.pv_vcpu.trap_ctxt[TRAP_gp_fault];
+ if ( TI_GET_IF(ti) )
+ eflags &= ~X86_EFLAGS_IF;
+ regs->eflags &= ~(X86_EFLAGS_VM|X86_EFLAGS_RF|
+ X86_EFLAGS_NT|X86_EFLAGS_TF);
+ if ( unlikely(__put_user(0, (u32 *)regs->rsp)) )
+ {
+ domain_crash(v->domain);
+ return 0;
+ }
+ regs->eip = ti->address;
+ regs->cs = ti->cs;
+ }
+ else if ( unlikely(ring_0(regs)) )
+ {
+ domain_crash(v->domain);
+ return 0;
+ }
+ else if ( ring_1(regs) )
+ regs->esp += 16;
+ /* Return to ring 2/3: restore ESP and SS. */
+ else if ( __get_user(regs->ss, (u32 *)regs->rsp + 5) ||
+ __get_user(regs->esp, (u32 *)regs->rsp + 4) )
+ {
+ domain_crash(v->domain);
+ return 0;
+ }
+
+ /* Restore upcall mask from supplied EFLAGS.IF. */
+ vcpu_info(v, evtchn_upcall_mask) = !(eflags & X86_EFLAGS_IF);
+
+ async_exception_cleanup(v);
+
+ /*
+ * The hypercall exit path will overwrite EAX with this return
+ * value.
+ */
+ return regs->eax;
+}
+
+static long compat_register_guest_callback(
+ struct compat_callback_register *reg)
+{
+ long ret = 0;
+ struct vcpu *v = current;
+
+ fixup_guest_code_selector(v->domain, reg->address.cs);
+
+ switch ( reg->type )
+ {
+ case CALLBACKTYPE_event:
+ v->arch.pv_vcpu.event_callback_cs = reg->address.cs;
+ v->arch.pv_vcpu.event_callback_eip = reg->address.eip;
+ break;
+
+ case CALLBACKTYPE_failsafe:
+ v->arch.pv_vcpu.failsafe_callback_cs = reg->address.cs;
+ v->arch.pv_vcpu.failsafe_callback_eip = reg->address.eip;
+ if ( reg->flags & CALLBACKF_mask_events )
+ set_bit(_VGCF_failsafe_disables_events,
+ &v->arch.vgc_flags);
+ else
+ clear_bit(_VGCF_failsafe_disables_events,
+ &v->arch.vgc_flags);
+ break;
+
+ case CALLBACKTYPE_syscall32:
+ v->arch.pv_vcpu.syscall32_callback_cs = reg->address.cs;
+ v->arch.pv_vcpu.syscall32_callback_eip = reg->address.eip;
+ v->arch.pv_vcpu.syscall32_disables_events =
+ (reg->flags & CALLBACKF_mask_events) != 0;
+ break;
+
+ case CALLBACKTYPE_sysenter:
+ v->arch.pv_vcpu.sysenter_callback_cs = reg->address.cs;
+ v->arch.pv_vcpu.sysenter_callback_eip = reg->address.eip;
+ v->arch.pv_vcpu.sysenter_disables_events =
+ (reg->flags & CALLBACKF_mask_events) != 0;
+ break;
+
+ case CALLBACKTYPE_nmi:
+ ret = register_guest_nmi_callback(reg->address.eip);
+ break;
+
+ default:
+ ret = -ENOSYS;
+ break;
+ }
+
+ return ret;
+}
+
+static long compat_unregister_guest_callback(
+ struct compat_callback_unregister *unreg)
+{
+ long ret;
+
+ switch ( unreg->type )
+ {
+ case CALLBACKTYPE_event:
+ case CALLBACKTYPE_failsafe:
+ case CALLBACKTYPE_syscall32:
+ case CALLBACKTYPE_sysenter:
+ ret = -EINVAL;
+ break;
+
+ case CALLBACKTYPE_nmi:
+ ret = unregister_guest_nmi_callback();
+ break;
+
+ default:
+ ret = -ENOSYS;
+ break;
+ }
+
+ return ret;
+}
+
+long compat_callback_op(int cmd, XEN_GUEST_HANDLE(void) arg)
+{
+ long ret;
+
+ switch ( cmd )
+ {
+ case CALLBACKOP_register:
+ {
+ struct compat_callback_register reg;
+
+ ret = -EFAULT;
+ if ( copy_from_guest(®, arg, 1) )
+ break;
+
+ ret = compat_register_guest_callback(®);
+ }
+ break;
+
+ case CALLBACKOP_unregister:
+ {
+ struct compat_callback_unregister unreg;
+
+ ret = -EFAULT;
+ if ( copy_from_guest(&unreg, arg, 1) )
+ break;
+
+ ret = compat_unregister_guest_callback(&unreg);
+ }
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+long compat_set_callbacks(unsigned long event_selector,
+ unsigned long event_address,
+ unsigned long failsafe_selector,
+ unsigned long failsafe_address)
+{
+ struct compat_callback_register event = {
+ .type = CALLBACKTYPE_event,
+ .address = {
+ .cs = event_selector,
+ .eip = event_address
+ }
+ };
+ struct compat_callback_register failsafe = {
+ .type = CALLBACKTYPE_failsafe,
+ .address = {
+ .cs = failsafe_selector,
+ .eip = failsafe_address
+ }
+ };
+
+ compat_register_guest_callback(&event);
+ compat_register_guest_callback(&failsafe);
+
+ return 0;
+}
+
+int compat_set_trap_table(XEN_GUEST_HANDLE(trap_info_compat_t) traps)
+{
+ struct compat_trap_info cur;
+ struct trap_info *dst = current->arch.pv_vcpu.trap_ctxt;
+ long rc = 0;
+
+ /* If no table is presented then clear the entire virtual IDT. */
+ if ( guest_handle_is_null(traps) )
+ {
+ memset(dst, 0, NR_VECTORS * sizeof(*dst));
+ return 0;
+ }
+
+ for ( ; ; )
+ {
+ if ( copy_from_guest(&cur, traps, 1) )
+ {
+ rc = -EFAULT;
+ break;
+ }
+
+ if ( cur.address == 0 )
+ break;
+
+ fixup_guest_code_selector(current->domain, cur.cs);
+
+ XLAT_trap_info(dst + cur.vector, &cur);
+
+ if ( cur.vector == 0x80 )
+ init_int80_direct_trap(current);
+
+ guest_handle_add_offset(traps, 1);
+
+ if ( hypercall_preempt_check() )
+ {
+ rc = hypercall_create_continuation(
+ __HYPERVISOR_set_trap_table, "h", traps);
+ break;
+ }
+ }
+
+ return rc;
+}
+
+void hypercall_page_initialise_ring1_kernel(void *hypercall_page)
+{
+ char *p;
+ int i;
+
+ /* Fill in all the transfer points with template machine code. */
+
+ for ( i = 0; i < (PAGE_SIZE / 32); i++ )
+ {
+ if ( i == __HYPERVISOR_iret )
+ continue;
+
+ p = (char *)(hypercall_page + (i * 32));
+ *(u8 *)(p+ 0) = 0xb8; /* mov $<i>,%eax */
+ *(u32 *)(p+ 1) = i;
+ *(u16 *)(p+ 5) = (HYPERCALL_VECTOR << 8) | 0xcd; /* int $xx */
+ *(u8 *)(p+ 7) = 0xc3; /* ret */
+ }
+
+ /*
+ * HYPERVISOR_iret is special because it doesn't return and expects a
+ * special stack frame. Guests jump at this transfer point instead of
+ * calling it.
+ */
+ p = (char *)(hypercall_page + (__HYPERVISOR_iret * 32));
+ *(u8 *)(p+ 0) = 0x50; /* push %eax */
+ *(u8 *)(p+ 1) = 0xb8; /* mov $__HYPERVISOR_iret,%eax */
+ *(u32 *)(p+ 2) = __HYPERVISOR_iret;
+ *(u16 *)(p+ 6) = (HYPERCALL_VECTOR << 8) | 0xcd; /* int $xx */
+}
+
/*
* Local variables:
* mode: C
diff --git a/xen/arch/x86/x86_64/compat/traps.c b/xen/arch/x86/x86_64/compat/traps.c
deleted file mode 100644
index 1751ec67e8..0000000000
--- a/xen/arch/x86/x86_64/compat/traps.c
+++ /dev/null
@@ -1,416 +0,0 @@
-#include <xen/event.h>
-#include <asm/regs.h>
-#include <compat/callback.h>
-#include <compat/arch-x86_32.h>
-
-void compat_show_guest_stack(struct vcpu *v, const struct cpu_user_regs *regs,
- int debug_stack_lines)
-{
- unsigned int i, *stack, addr, mask = STACK_SIZE;
-
- stack = (unsigned int *)(unsigned long)regs->esp;
- printk("Guest stack trace from esp=%08lx:\n ", (unsigned long)stack);
-
- if ( !__compat_access_ok(v->domain, stack, sizeof(*stack)) )
- {
- printk("Guest-inaccessible memory.\n");
- return;
- }
-
- if ( v != current )
- {
- struct vcpu *vcpu;
- unsigned long mfn;
-
- ASSERT(guest_kernel_mode(v, regs));
- mfn = read_cr3() >> PAGE_SHIFT;
- for_each_vcpu( v->domain, vcpu )
- if ( pagetable_get_pfn(vcpu->arch.guest_table) == mfn )
- break;
- if ( !vcpu )
- {
- stack = do_page_walk(v, (unsigned long)stack);
- if ( (unsigned long)stack < PAGE_SIZE )
- {
- printk("Inaccessible guest memory.\n");
- return;
- }
- mask = PAGE_SIZE;
- }
- }
-
- for ( i = 0; i < debug_stack_lines * 8; i++ )
- {
- if ( (((long)stack - 1) ^ ((long)(stack + 1) - 1)) & mask )
- break;
- if ( __get_user(addr, stack) )
- {
- if ( i != 0 )
- printk("\n ");
- printk("Fault while accessing guest memory.");
- i = 1;
- break;
- }
- if ( (i != 0) && ((i % 8) == 0) )
- printk("\n ");
- printk(" %08x", addr);
- stack++;
- }
- if ( mask == PAGE_SIZE )
- {
- BUILD_BUG_ON(PAGE_SIZE == STACK_SIZE);
- unmap_domain_page(stack);
- }
- if ( i == 0 )
- printk("Stack empty.");
- printk("\n");
-}
-
-unsigned int compat_iret(void)
-{
- struct cpu_user_regs *regs = guest_cpu_user_regs();
- struct vcpu *v = current;
- u32 eflags;
-
- /* Trim stack pointer to 32 bits. */
- regs->rsp = (u32)regs->rsp;
-
- /* Restore EAX (clobbered by hypercall). */
- if ( unlikely(__get_user(regs->eax, (u32 *)regs->rsp)) )
- {
- domain_crash(v->domain);
- return 0;
- }
-
- /* Restore CS and EIP. */
- if ( unlikely(__get_user(regs->eip, (u32 *)regs->rsp + 1)) ||
- unlikely(__get_user(regs->cs, (u32 *)regs->rsp + 2)) )
- {
- domain_crash(v->domain);
- return 0;
- }
-
- /*
- * Fix up and restore EFLAGS. We fix up in a local staging area
- * to avoid firing the BUG_ON(IOPL) check in arch_get_info_guest.
- */
- if ( unlikely(__get_user(eflags, (u32 *)regs->rsp + 3)) )
- {
- domain_crash(v->domain);
- return 0;
- }
-
- if ( VM_ASSIST(v->domain, architectural_iopl) )
- v->arch.pv_vcpu.iopl = eflags & X86_EFLAGS_IOPL;
-
- regs->eflags = (eflags & ~X86_EFLAGS_IOPL) | X86_EFLAGS_IF;
-
- if ( unlikely(eflags & X86_EFLAGS_VM) )
- {
- /*
- * Cannot return to VM86 mode: inject a GP fault instead. Note that
- * the GP fault is reported on the first VM86 mode instruction, not on
- * the IRET (which is why we can simply leave the stack frame as-is
- * (except for perhaps having to copy it), which in turn seems better
- * than teaching create_bounce_frame() to needlessly deal with vm86
- * mode frames).
- */
- const struct trap_info *ti;
- u32 x, ksp = v->arch.pv_vcpu.kernel_sp - 40;
- unsigned int i;
- int rc = 0;
-
- gdprintk(XENLOG_ERR, "VM86 mode unavailable (ksp:%08X->%08X)\n",
- regs->esp, ksp);
- if ( ksp < regs->esp )
- {
- for (i = 1; i < 10; ++i)
- {
- rc |= __get_user(x, (u32 *)regs->rsp + i);
- rc |= __put_user(x, (u32 *)(unsigned long)ksp + i);
- }
- }
- else if ( ksp > regs->esp )
- {
- for ( i = 9; i > 0; --i )
- {
- rc |= __get_user(x, (u32 *)regs->rsp + i);
- rc |= __put_user(x, (u32 *)(unsigned long)ksp + i);
- }
- }
- if ( rc )
- {
- domain_crash(v->domain);
- return 0;
- }
- regs->esp = ksp;
- regs->ss = v->arch.pv_vcpu.kernel_ss;
-
- ti = &v->arch.pv_vcpu.trap_ctxt[TRAP_gp_fault];
- if ( TI_GET_IF(ti) )
- eflags &= ~X86_EFLAGS_IF;
- regs->eflags &= ~(X86_EFLAGS_VM|X86_EFLAGS_RF|
- X86_EFLAGS_NT|X86_EFLAGS_TF);
- if ( unlikely(__put_user(0, (u32 *)regs->rsp)) )
- {
- domain_crash(v->domain);
- return 0;
- }
- regs->eip = ti->address;
- regs->cs = ti->cs;
- }
- else if ( unlikely(ring_0(regs)) )
- {
- domain_crash(v->domain);
- return 0;
- }
- else if ( ring_1(regs) )
- regs->esp += 16;
- /* Return to ring 2/3: restore ESP and SS. */
- else if ( __get_user(regs->ss, (u32 *)regs->rsp + 5) ||
- __get_user(regs->esp, (u32 *)regs->rsp + 4) )
- {
- domain_crash(v->domain);
- return 0;
- }
-
- /* Restore upcall mask from supplied EFLAGS.IF. */
- vcpu_info(v, evtchn_upcall_mask) = !(eflags & X86_EFLAGS_IF);
-
- async_exception_cleanup(v);
-
- /*
- * The hypercall exit path will overwrite EAX with this return
- * value.
- */
- return regs->eax;
-}
-
-static long compat_register_guest_callback(
- struct compat_callback_register *reg)
-{
- long ret = 0;
- struct vcpu *v = current;
-
- fixup_guest_code_selector(v->domain, reg->address.cs);
-
- switch ( reg->type )
- {
- case CALLBACKTYPE_event:
- v->arch.pv_vcpu.event_callback_cs = reg->address.cs;
- v->arch.pv_vcpu.event_callback_eip = reg->address.eip;
- break;
-
- case CALLBACKTYPE_failsafe:
- v->arch.pv_vcpu.failsafe_callback_cs = reg->address.cs;
- v->arch.pv_vcpu.failsafe_callback_eip = reg->address.eip;
- if ( reg->flags & CALLBACKF_mask_events )
- set_bit(_VGCF_failsafe_disables_events,
- &v->arch.vgc_flags);
- else
- clear_bit(_VGCF_failsafe_disables_events,
- &v->arch.vgc_flags);
- break;
-
- case CALLBACKTYPE_syscall32:
- v->arch.pv_vcpu.syscall32_callback_cs = reg->address.cs;
- v->arch.pv_vcpu.syscall32_callback_eip = reg->address.eip;
- v->arch.pv_vcpu.syscall32_disables_events =
- (reg->flags & CALLBACKF_mask_events) != 0;
- break;
-
- case CALLBACKTYPE_sysenter:
- v->arch.pv_vcpu.sysenter_callback_cs = reg->address.cs;
- v->arch.pv_vcpu.sysenter_callback_eip = reg->address.eip;
- v->arch.pv_vcpu.sysenter_disables_events =
- (reg->flags & CALLBACKF_mask_events) != 0;
- break;
-
- case CALLBACKTYPE_nmi:
- ret = register_guest_nmi_callback(reg->address.eip);
- break;
-
- default:
- ret = -ENOSYS;
- break;
- }
-
- return ret;
-}
-
-static long compat_unregister_guest_callback(
- struct compat_callback_unregister *unreg)
-{
- long ret;
-
- switch ( unreg->type )
- {
- case CALLBACKTYPE_event:
- case CALLBACKTYPE_failsafe:
- case CALLBACKTYPE_syscall32:
- case CALLBACKTYPE_sysenter:
- ret = -EINVAL;
- break;
-
- case CALLBACKTYPE_nmi:
- ret = unregister_guest_nmi_callback();
- break;
-
- default:
- ret = -ENOSYS;
- break;
- }
-
- return ret;
-}
-
-
-long compat_callback_op(int cmd, XEN_GUEST_HANDLE(void) arg)
-{
- long ret;
-
- switch ( cmd )
- {
- case CALLBACKOP_register:
- {
- struct compat_callback_register reg;
-
- ret = -EFAULT;
- if ( copy_from_guest(®, arg, 1) )
- break;
-
- ret = compat_register_guest_callback(®);
- }
- break;
-
- case CALLBACKOP_unregister:
- {
- struct compat_callback_unregister unreg;
-
- ret = -EFAULT;
- if ( copy_from_guest(&unreg, arg, 1) )
- break;
-
- ret = compat_unregister_guest_callback(&unreg);
- }
- break;
-
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-long compat_set_callbacks(unsigned long event_selector,
- unsigned long event_address,
- unsigned long failsafe_selector,
- unsigned long failsafe_address)
-{
- struct compat_callback_register event = {
- .type = CALLBACKTYPE_event,
- .address = {
- .cs = event_selector,
- .eip = event_address
- }
- };
- struct compat_callback_register failsafe = {
- .type = CALLBACKTYPE_failsafe,
- .address = {
- .cs = failsafe_selector,
- .eip = failsafe_address
- }
- };
-
- compat_register_guest_callback(&event);
- compat_register_guest_callback(&failsafe);
-
- return 0;
-}
-
-int compat_set_trap_table(XEN_GUEST_HANDLE(trap_info_compat_t) traps)
-{
- struct compat_trap_info cur;
- struct trap_info *dst = current->arch.pv_vcpu.trap_ctxt;
- long rc = 0;
-
- /* If no table is presented then clear the entire virtual IDT. */
- if ( guest_handle_is_null(traps) )
- {
- memset(dst, 0, NR_VECTORS * sizeof(*dst));
- init_int80_direct_trap(current);
- return 0;
- }
-
- for ( ; ; )
- {
- if ( copy_from_guest(&cur, traps, 1) )
- {
- rc = -EFAULT;
- break;
- }
-
- if ( cur.address == 0 )
- break;
-
- fixup_guest_code_selector(current->domain, cur.cs);
-
- XLAT_trap_info(dst + cur.vector, &cur);
-
- if ( cur.vector == 0x80 )
- init_int80_direct_trap(current);
-
- guest_handle_add_offset(traps, 1);
-
- if ( hypercall_preempt_check() )
- {
- rc = hypercall_create_continuation(
- __HYPERVISOR_set_trap_table, "h", traps);
- break;
- }
- }
-
- return rc;
-}
-
-static void hypercall_page_initialise_ring1_kernel(void *hypercall_page)
-{
- char *p;
- int i;
-
- /* Fill in all the transfer points with template machine code. */
-
- for ( i = 0; i < (PAGE_SIZE / 32); i++ )
- {
- if ( i == __HYPERVISOR_iret )
- continue;
-
- p = (char *)(hypercall_page + (i * 32));
- *(u8 *)(p+ 0) = 0xb8; /* mov $<i>,%eax */
- *(u32 *)(p+ 1) = i;
- *(u16 *)(p+ 5) = (HYPERCALL_VECTOR << 8) | 0xcd; /* int $xx */
- *(u8 *)(p+ 7) = 0xc3; /* ret */
- }
-
- /*
- * HYPERVISOR_iret is special because it doesn't return and expects a
- * special stack frame. Guests jump at this transfer point instead of
- * calling it.
- */
- p = (char *)(hypercall_page + (__HYPERVISOR_iret * 32));
- *(u8 *)(p+ 0) = 0x50; /* push %eax */
- *(u8 *)(p+ 1) = 0xb8; /* mov $__HYPERVISOR_iret,%eax */
- *(u32 *)(p+ 2) = __HYPERVISOR_iret;
- *(u16 *)(p+ 6) = (HYPERCALL_VECTOR << 8) | 0xcd; /* int $xx */
-}
-
-/*
- * Local variables:
- * mode: C
- * c-file-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/xen/arch/x86/x86_64/traps.c b/xen/arch/x86/x86_64/traps.c
index 7a4dd4458e..deca2ca1f6 100644
--- a/xen/arch/x86/x86_64/traps.c
+++ b/xen/arch/x86/x86_64/traps.c
@@ -335,8 +335,6 @@ void subarch_percpu_traps_init(void)
wrmsrl(MSR_SYSCALL_MASK, XEN_SYSCALL_MASK);
}
-#include "compat/traps.c"
-
void hypercall_page_initialise(struct domain *d, void *hypercall_page)
{
memset(hypercall_page, 0xCC, PAGE_SIZE);
diff --git a/xen/include/asm-x86/pv/domain.h b/xen/include/asm-x86/pv/domain.h
index dfa60b080c..67e370ebf3 100644
--- a/xen/include/asm-x86/pv/domain.h
+++ b/xen/include/asm-x86/pv/domain.h
@@ -30,6 +30,7 @@ int pv_domain_initialise(struct domain *d, unsigned int domcr_flags,
struct xen_arch_domainconfig *config);
void hypercall_page_initialise_ring3_kernel(void *hypercall_page);
+void hypercall_page_initialise_ring1_kernel(void *hypercall_page);
#else /* !CONFIG_PV */
@@ -46,6 +47,7 @@ static inline int pv_domain_initialise(struct domain *d,
}
void hypercall_page_initialise_ring3_kernel(void *hypercall_page) {}
+void hypercall_page_initialise_ring1_kernel(void *hypercall_page) {}
#endif /* CONFIG_PV */
--
2.11.0
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
next prev parent reply other threads:[~2017-05-18 17:16 UTC|newest]
Thread overview: 65+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-05-18 17:09 [PATCH for-next v3 00/22] x86: refactor trap handling code Wei Liu
2017-05-18 17:09 ` [PATCH for-next v3 01/22] x86/traps: move privilege instruction emulation code Wei Liu
2017-05-18 17:28 ` Wei Liu
2017-05-29 15:14 ` Jan Beulich
2017-05-30 17:27 ` Wei Liu
2017-05-30 17:30 ` Andrew Cooper
2017-05-31 5:55 ` Jan Beulich
2017-05-31 11:01 ` Wei Liu
2017-05-31 11:05 ` Andrew Cooper
2017-05-31 11:36 ` Wei Liu
2017-05-31 11:43 ` Jan Beulich
2017-05-18 17:09 ` [PATCH for-next v3 02/22] x86/traps: move gate op " Wei Liu
2017-05-29 15:15 ` Jan Beulich
2017-05-18 17:09 ` [PATCH for-next v3 03/22] x86/traps: move emulate_invalid_rdtscp Wei Liu
2017-05-29 15:18 ` Jan Beulich
2017-05-18 17:09 ` [PATCH for-next v3 04/22] x86/traps: move emulate_forced_invalid_op Wei Liu
2017-05-29 15:19 ` Jan Beulich
2017-05-18 17:09 ` [PATCH for-next v3 05/22] x86/pv: clean up emulate.c Wei Liu
2017-05-29 15:37 ` Jan Beulich
2017-05-18 17:09 ` [PATCH for-next v3 06/22] x86/traps: move PV hypercall handlers to pv/traps.c Wei Liu
2017-05-29 15:40 ` Jan Beulich
2017-05-30 17:40 ` Andrew Cooper
2017-05-31 5:59 ` Jan Beulich
2017-05-31 11:14 ` Wei Liu
2017-05-31 11:45 ` Jan Beulich
2017-06-02 11:01 ` Wei Liu
2017-06-06 7:36 ` Jan Beulich
2017-06-08 11:30 ` Andrew Cooper
2017-06-08 14:28 ` Wei Liu
2017-05-18 17:09 ` [PATCH for-next v3 07/22] x86/traps: move pv_inject_event " Wei Liu
2017-05-29 15:42 ` Jan Beulich
2017-05-18 17:09 ` [PATCH for-next v3 08/22] x86/traps: move set_guest_{machinecheck, nmi}_trapbounce Wei Liu
2017-05-29 15:43 ` Jan Beulich
2017-05-18 17:09 ` [PATCH for-next v3 09/22] x86/traps: move {un, }register_guest_nmi_callback Wei Liu
2017-05-18 17:09 ` [PATCH for-next v3 10/22] x86/traps: delcare percpu softirq_trap Wei Liu
2017-05-29 15:49 ` Jan Beulich
2017-05-31 11:35 ` Wei Liu
2017-05-31 11:46 ` Jan Beulich
2017-05-31 11:54 ` Wei Liu
2017-05-18 17:09 ` [PATCH for-next v3 11/22] x86/traps: move guest_has_trap_callback to pv/traps.c Wei Liu
2017-05-29 15:54 ` Jan Beulich
2017-05-18 17:09 ` [PATCH for-next v3 12/22] x86/traps: move send_guest_trap " Wei Liu
2017-05-29 15:55 ` Jan Beulich
2017-06-05 17:08 ` Wei Liu
2017-06-06 7:37 ` Jan Beulich
2017-05-18 17:09 ` [PATCH for-next v3 13/22] x86/traps: move toggle_guest_mode Wei Liu
2017-05-29 16:05 ` Jan Beulich
2017-05-30 17:47 ` Andrew Cooper
2017-05-31 6:00 ` Jan Beulich
2017-05-18 17:09 ` [PATCH for-next v3 14/22] x86/traps: move do_iret to pv/traps.c Wei Liu
2017-05-29 16:07 ` Jan Beulich
2017-05-18 17:09 ` [PATCH for-next v3 15/22] x86/traps: move init_int80_direct_trap Wei Liu
2017-05-29 16:07 ` Jan Beulich
2017-05-18 17:09 ` [PATCH for-next v3 16/22] x86/traps: move callback_op code Wei Liu
2017-05-29 16:09 ` Jan Beulich
2017-05-18 17:09 ` [PATCH for-next v3 17/22] x86/traps: move hypercall_page_initialise_ring3_kernel Wei Liu
2017-05-29 16:10 ` Jan Beulich
2017-05-18 17:10 ` Wei Liu [this message]
2017-05-29 16:12 ` [PATCH for-next v3 18/22] x86/traps: merge x86_64/compat/traps.c into pv/traps.c Jan Beulich
2017-05-18 17:10 ` [PATCH for-next v3 19/22] x86: clean up pv/traps.c Wei Liu
2017-05-29 16:18 ` Jan Beulich
2017-05-18 17:10 ` [PATCH for-next v3 20/22] x86: guest_has_trap_callback should return bool Wei Liu
2017-05-18 17:10 ` [PATCH for-next v3 21/22] x86: fix coding style issues in asm-x86/traps.h Wei Liu
2017-05-18 17:10 ` [PATCH for-next v3 22/22] x86: clean up traps.c Wei Liu
2017-05-29 16:21 ` Jan Beulich
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20170518171004.27204-19-wei.liu2@citrix.com \
--to=wei.liu2@citrix.com \
--cc=JBeulich@suse.com \
--cc=andrew.cooper3@citrix.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).