* [PATCH v2 1/3] x86: don't build unused entry code when !PV32
2021-04-06 13:58 [PATCH v2 0/3] x86: asm-offsets.h and !PV32 adjustments Jan Beulich
@ 2021-04-06 14:01 ` Jan Beulich
2021-04-06 16:56 ` Wei Liu
2021-04-06 14:01 ` [PATCH v2 2/3] x86: slim down hypercall handling " Jan Beulich
2021-04-06 14:02 ` [PATCH v2 3/3] x86: avoid building COMPAT code when !HVM && !PV32 Jan Beulich
2 siblings, 1 reply; 8+ messages in thread
From: Jan Beulich @ 2021-04-06 14:01 UTC (permalink / raw)
To: xen-devel; +Cc: Andrew Cooper, George Dunlap, Wei Liu, Roger Pau Monné
Except for the initial part of cstar_enter compat/entry.S is all dead
code in this case. Further, along the lines of the PV conditionals we
already have in entry.S, make code PV32-conditional there too (to a
fair part because this code actually references compat/entry.S).
This has the side effect of moving the tail part (now at compat_syscall)
of the code out of .text.entry (in line with e.g. compat_sysenter).
Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Avoid #ifdef-ary in compat/entry.S.
---
TBD: I'm on the fence of whether (in a separate patch) to also make
conditional struct pv_domain's is_32bit field.
--- a/xen/arch/x86/x86_64/asm-offsets.c
+++ b/xen/arch/x86/x86_64/asm-offsets.c
@@ -9,7 +9,7 @@
#include <xen/perfc.h>
#endif
#include <xen/sched.h>
-#ifdef CONFIG_PV
+#ifdef CONFIG_PV32
#include <compat/xen.h>
#endif
#include <asm/hardirq.h>
@@ -102,19 +102,21 @@ void __dummy__(void)
BLANK();
#endif
-#ifdef CONFIG_PV
+#ifdef CONFIG_PV32
OFFSET(DOMAIN_is_32bit_pv, struct domain, arch.pv.is_32bit);
BLANK();
- OFFSET(VCPUINFO_upcall_pending, struct vcpu_info, evtchn_upcall_pending);
- OFFSET(VCPUINFO_upcall_mask, struct vcpu_info, evtchn_upcall_mask);
- BLANK();
-
OFFSET(COMPAT_VCPUINFO_upcall_pending, struct compat_vcpu_info, evtchn_upcall_pending);
OFFSET(COMPAT_VCPUINFO_upcall_mask, struct compat_vcpu_info, evtchn_upcall_mask);
BLANK();
#endif
+#ifdef CONFIG_PV
+ OFFSET(VCPUINFO_upcall_pending, struct vcpu_info, evtchn_upcall_pending);
+ OFFSET(VCPUINFO_upcall_mask, struct vcpu_info, evtchn_upcall_mask);
+ BLANK();
+#endif
+
OFFSET(CPUINFO_guest_cpu_user_regs, struct cpu_info, guest_cpu_user_regs);
OFFSET(CPUINFO_verw_sel, struct cpu_info, verw_sel);
OFFSET(CPUINFO_current_vcpu, struct cpu_info, current_vcpu);
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -11,8 +11,6 @@
#include <public/xen.h>
#include <irq_vectors.h>
-#ifdef CONFIG_PV32
-
ENTRY(entry_int82)
ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP
pushq $0
@@ -29,8 +27,6 @@ ENTRY(entry_int82)
mov %rsp, %rdi
call do_entry_int82
-#endif /* CONFIG_PV32 */
-
/* %rbx: struct vcpu */
ENTRY(compat_test_all_events)
ASSERT_NOT_IN_ATOMIC
@@ -197,43 +193,7 @@ ENTRY(cr4_pv32_restore)
xor %eax, %eax
ret
- .section .text.entry, "ax", @progbits
-
-/* See lstar_enter for entry register state. */
-ENTRY(cstar_enter)
-#ifdef CONFIG_XEN_SHSTK
- ALTERNATIVE "", "setssbsy", X86_FEATURE_XEN_SHSTK
-#endif
- /* sti could live here when we don't switch page tables below. */
- CR4_PV32_RESTORE
- movq 8(%rsp),%rax /* Restore %rax. */
- movq $FLAT_USER_SS32, 8(%rsp) /* Assume a 64bit domain. Compat handled lower. */
- pushq %r11
- pushq $FLAT_USER_CS32
- pushq %rcx
- pushq $0
- movl $TRAP_syscall, 4(%rsp)
- SAVE_ALL
-
- SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo, Clob: acd */
- /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
-
- GET_STACK_END(bx)
- mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rcx
- test %rcx, %rcx
- jz .Lcstar_cr3_okay
- movb $0, STACK_CPUINFO_FIELD(use_pv_cr3)(%rbx)
- mov %rcx, %cr3
- /* %r12 is still zero at this point. */
- mov %r12, STACK_CPUINFO_FIELD(xen_cr3)(%rbx)
-.Lcstar_cr3_okay:
- sti
-
- movq STACK_CPUINFO_FIELD(current_vcpu)(%rbx), %rbx
- movq VCPU_domain(%rbx),%rcx
- cmpb $0,DOMAIN_is_32bit_pv(%rcx)
- je switch_to_kernel
-
+ENTRY(compat_syscall)
/* Fix up reported %cs/%ss for compat domains. */
movl $FLAT_COMPAT_USER_SS, UREGS_ss(%rsp)
movl $FLAT_COMPAT_USER_CS, UREGS_cs(%rsp)
@@ -262,8 +222,6 @@ UNLIKELY_END(compat_syscall_gpf)
movb %cl,TRAPBOUNCE_flags(%rdx)
jmp .Lcompat_bounce_exception
- .text
-
ENTRY(compat_sysenter)
CR4_PV32_RESTORE
movq VCPU_trap_ctxt(%rbx),%rcx
--- a/xen/arch/x86/x86_64/Makefile
+++ b/xen/arch/x86/x86_64/Makefile
@@ -1,4 +1,4 @@
-obj-$(CONFIG_PV) += compat/
+obj-$(CONFIG_PV32) += compat/
obj-bin-y += entry.o
obj-y += traps.o
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -24,7 +24,7 @@
#ifdef CONFIG_PV
/* %rbx: struct vcpu */
-ENTRY(switch_to_kernel)
+switch_to_kernel:
leaq VCPU_trap_bounce(%rbx),%rdx
/* TB_eip = 32-bit syscall ? syscall32_addr : syscall_addr */
@@ -283,6 +283,45 @@ ENTRY(lstar_enter)
call pv_hypercall
jmp test_all_events
+/* See lstar_enter for entry register state. */
+ENTRY(cstar_enter)
+#ifdef CONFIG_XEN_SHSTK
+ ALTERNATIVE "", "setssbsy", X86_FEATURE_XEN_SHSTK
+#endif
+ /* sti could live here when we don't switch page tables below. */
+ CR4_PV32_RESTORE
+ movq 8(%rsp), %rax /* Restore %rax. */
+ movq $FLAT_USER_SS32, 8(%rsp) /* Assume a 64bit domain. Compat handled lower. */
+ pushq %r11
+ pushq $FLAT_USER_CS32
+ pushq %rcx
+ pushq $0
+ movl $TRAP_syscall, 4(%rsp)
+ SAVE_ALL
+
+ SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo, Clob: acd */
+ /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
+
+ GET_STACK_END(bx)
+ mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rcx
+ test %rcx, %rcx
+ jz .Lcstar_cr3_okay
+ movb $0, STACK_CPUINFO_FIELD(use_pv_cr3)(%rbx)
+ mov %rcx, %cr3
+ /* %r12 is still zero at this point. */
+ mov %r12, STACK_CPUINFO_FIELD(xen_cr3)(%rbx)
+.Lcstar_cr3_okay:
+ sti
+
+ movq STACK_CPUINFO_FIELD(current_vcpu)(%rbx), %rbx
+
+#ifdef CONFIG_PV32
+ movq VCPU_domain(%rbx), %rcx
+ cmpb $0, DOMAIN_is_32bit_pv(%rcx)
+ jne compat_syscall
+#endif
+ jmp switch_to_kernel
+
ENTRY(sysenter_entry)
#ifdef CONFIG_XEN_SHSTK
ALTERNATIVE "", "setssbsy", X86_FEATURE_XEN_SHSTK
@@ -340,8 +379,10 @@ UNLIKELY_END(sysenter_gpf)
movq VCPU_domain(%rbx),%rdi
movq %rax,TRAPBOUNCE_eip(%rdx)
movb %cl,TRAPBOUNCE_flags(%rdx)
+#ifdef CONFIG_PV32
cmpb $0, DOMAIN_is_32bit_pv(%rdi)
jne compat_sysenter
+#endif
jmp .Lbounce_exception
ENTRY(int80_direct_trap)
@@ -382,6 +423,7 @@ UNLIKELY_END(msi_check)
mov 0x80 * TRAPINFO_sizeof + TRAPINFO_eip(%rsi), %rdi
movzwl 0x80 * TRAPINFO_sizeof + TRAPINFO_cs (%rsi), %ecx
+#ifdef CONFIG_PV32
mov %ecx, %edx
and $~3, %edx
@@ -390,6 +432,10 @@ UNLIKELY_END(msi_check)
test %rdx, %rdx
jz int80_slow_path
+#else
+ test %rdi, %rdi
+ jz int80_slow_path
+#endif
/* Construct trap_bounce from trap_ctxt[0x80]. */
lea VCPU_trap_bounce(%rbx), %rdx
@@ -402,8 +448,10 @@ UNLIKELY_END(msi_check)
lea (, %rcx, TBF_INTERRUPT), %ecx
mov %cl, TRAPBOUNCE_flags(%rdx)
+#ifdef CONFIG_PV32
cmpb $0, DOMAIN_is_32bit_pv(%rax)
jne compat_int80_direct_trap
+#endif
call create_bounce_frame
jmp test_all_events
@@ -555,12 +603,16 @@ ENTRY(dom_crash_sync_extable)
GET_STACK_END(ax)
leaq STACK_CPUINFO_FIELD(guest_cpu_user_regs)(%rax),%rsp
# create_bounce_frame() temporarily clobbers CS.RPL. Fix up.
+#ifdef CONFIG_PV32
movq STACK_CPUINFO_FIELD(current_vcpu)(%rax), %rax
movq VCPU_domain(%rax),%rax
cmpb $0, DOMAIN_is_32bit_pv(%rax)
sete %al
leal (%rax,%rax,2),%eax
orb %al,UREGS_cs(%rsp)
+#else
+ orb $3, UREGS_cs(%rsp)
+#endif
xorl %edi,%edi
jmp asm_domain_crash_synchronous /* Does not return */
.popsection
@@ -578,11 +630,15 @@ ret_from_intr:
GET_CURRENT(bx)
testb $3, UREGS_cs(%rsp)
jz restore_all_xen
+#ifdef CONFIG_PV32
movq VCPU_domain(%rbx), %rax
cmpb $0, DOMAIN_is_32bit_pv(%rax)
je test_all_events
jmp compat_test_all_events
#else
+ jmp test_all_events
+#endif
+#else
ret_from_intr:
ASSERT_CONTEXT_IS_XEN
jmp restore_all_xen
@@ -671,7 +727,7 @@ handle_exception_saved:
testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rsp)
jz exception_with_ints_disabled
-#ifdef CONFIG_PV
+#if defined(CONFIG_PV32)
ALTERNATIVE_2 "jmp .Lcr4_pv32_done", \
__stringify(mov VCPU_domain(%rbx), %rax), X86_FEATURE_XEN_SMEP, \
__stringify(mov VCPU_domain(%rbx), %rax), X86_FEATURE_XEN_SMAP
@@ -711,7 +767,7 @@ handle_exception_saved:
test $~(PFEC_write_access|PFEC_insn_fetch),%eax
jz compat_test_all_events
.Lcr4_pv32_done:
-#else
+#elif !defined(CONFIG_PV)
ASSERT_CONTEXT_IS_XEN
#endif /* CONFIG_PV */
sti
@@ -730,9 +786,11 @@ handle_exception_saved:
#ifdef CONFIG_PV
testb $3,UREGS_cs(%rsp)
jz restore_all_xen
+#ifdef CONFIG_PV32
movq VCPU_domain(%rbx),%rax
cmpb $0, DOMAIN_is_32bit_pv(%rax)
jne compat_test_all_events
+#endif
jmp test_all_events
#else
ASSERT_CONTEXT_IS_XEN
@@ -968,11 +1026,16 @@ handle_ist_exception:
je 1f
movl $EVENT_CHECK_VECTOR,%edi
call send_IPI_self
-1: movq VCPU_domain(%rbx),%rax
+1:
+#ifdef CONFIG_PV32
+ movq VCPU_domain(%rbx),%rax
cmpb $0,DOMAIN_is_32bit_pv(%rax)
je restore_all_guest
jmp compat_restore_all_guest
#else
+ jmp restore_all_guest
+#endif
+#else
ASSERT_CONTEXT_IS_XEN
jmp restore_all_xen
#endif
--- a/xen/include/asm-x86/asm_defns.h
+++ b/xen/include/asm-x86/asm_defns.h
@@ -305,7 +305,7 @@ static always_inline void stac(void)
subq $-(UREGS_error_code-UREGS_r15+\adj), %rsp
.endm
-#ifdef CONFIG_PV
+#ifdef CONFIG_PV32
#define CR4_PV32_RESTORE \
ALTERNATIVE_2 "", \
"call cr4_pv32_restore", X86_FEATURE_XEN_SMEP, \
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v2 1/3] x86: don't build unused entry code when !PV32
2021-04-06 14:01 ` [PATCH v2 1/3] x86: don't build unused entry code when !PV32 Jan Beulich
@ 2021-04-06 16:56 ` Wei Liu
0 siblings, 0 replies; 8+ messages in thread
From: Wei Liu @ 2021-04-06 16:56 UTC (permalink / raw)
To: Jan Beulich
Cc: xen-devel, Andrew Cooper, George Dunlap, Wei Liu, Roger Pau Monné
On Tue, Apr 06, 2021 at 04:01:22PM +0200, Jan Beulich wrote:
> Except for the initial part of cstar_enter compat/entry.S is all dead
> code in this case. Further, along the lines of the PV conditionals we
> already have in entry.S, make code PV32-conditional there too (to a
> fair part because this code actually references compat/entry.S).
>
> This has the side effect of moving the tail part (now at compat_syscall)
> of the code out of .text.entry (in line with e.g. compat_sysenter).
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Wei Liu <wl@xen.org>
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH v2 2/3] x86: slim down hypercall handling when !PV32
2021-04-06 13:58 [PATCH v2 0/3] x86: asm-offsets.h and !PV32 adjustments Jan Beulich
2021-04-06 14:01 ` [PATCH v2 1/3] x86: don't build unused entry code when !PV32 Jan Beulich
@ 2021-04-06 14:01 ` Jan Beulich
2021-04-06 16:59 ` Wei Liu
2021-04-06 14:02 ` [PATCH v2 3/3] x86: avoid building COMPAT code when !HVM && !PV32 Jan Beulich
2 siblings, 1 reply; 8+ messages in thread
From: Jan Beulich @ 2021-04-06 14:01 UTC (permalink / raw)
To: xen-devel
Cc: Andrew Cooper, George Dunlap, Ian Jackson, Julien Grall,
Stefano Stabellini, Wei Liu, Roger Pau Monné
In such a build various of the compat handlers aren't needed. Don't
reference them from the hypercall table, and compile out those which
aren't needed for HVM. Also compile out switch_compat(), which has no
purpose in such a build.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: New.
--- a/xen/arch/x86/Makefile
+++ b/xen/arch/x86/Makefile
@@ -17,7 +17,8 @@ obj-bin-y += bzimage.init.o
obj-bin-y += clear_page.o
obj-bin-y += copy_page.o
obj-y += cpuid.o
-obj-$(CONFIG_PV) += compat.o x86_64/compat.o
+obj-$(CONFIG_PV) += compat.o
+obj-$(CONFIG_PV32) += x86_64/compat.o
obj-$(CONFIG_KEXEC) += crash.o
obj-$(CONFIG_GDBSX) += debug.o
obj-y += delay.o
--- a/xen/arch/x86/hvm/hypercall.c
+++ b/xen/arch/x86/hvm/hypercall.c
@@ -121,7 +121,9 @@ static long hvm_physdev_op(int cmd, XEN_
#define do_arch_1 paging_domctl_continuation
-static const hypercall_table_t hvm_hypercall_table[] = {
+static const struct {
+ hypercall_fn_t *native, *compat;
+} hvm_hypercall_table[] = {
HVM_CALL(memory_op),
#ifdef CONFIG_GRANT_TABLE
HVM_CALL(grant_table_op),
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -4498,7 +4498,9 @@ long do_update_va_mapping_otherdomain(un
return rc;
}
+#endif /* CONFIG_PV */
+#ifdef CONFIG_PV32
int compat_update_va_mapping(unsigned int va, uint32_t lo, uint32_t hi,
unsigned int flags)
{
@@ -4533,7 +4535,7 @@ int compat_update_va_mapping_otherdomain
return rc;
}
-#endif /* CONFIG_PV */
+#endif /* CONFIG_PV32 */
typedef struct e820entry e820entry_t;
DEFINE_XEN_GUEST_HANDLE(e820entry_t);
--- a/xen/arch/x86/pv/callback.c
+++ b/xen/arch/x86/pv/callback.c
@@ -19,12 +19,11 @@
#include <xen/event.h>
#include <xen/hypercall.h>
#include <xen/guest_access.h>
-#include <compat/callback.h>
-#include <compat/nmi.h>
#include <asm/shared.h>
#include <public/callback.h>
+#include <public/nmi.h>
static int register_guest_nmi_callback(unsigned long address)
{
@@ -203,6 +202,11 @@ long do_set_callbacks(unsigned long even
return 0;
}
+#ifdef CONFIG_PV32
+
+#include <compat/callback.h>
+#include <compat/nmi.h>
+
static long compat_register_guest_callback(struct compat_callback_register *reg)
{
long ret = 0;
@@ -343,6 +347,8 @@ long compat_set_callbacks(unsigned long
return 0;
}
+#endif /* CONFIG_PV32 */
+
long do_set_trap_table(XEN_GUEST_HANDLE_PARAM(const_trap_info_t) traps)
{
struct trap_info cur;
@@ -388,6 +394,7 @@ long do_set_trap_table(XEN_GUEST_HANDLE_
return rc;
}
+#ifdef CONFIG_PV32
int compat_set_trap_table(XEN_GUEST_HANDLE(trap_info_compat_t) traps)
{
struct vcpu *curr = current;
@@ -429,6 +436,7 @@ int compat_set_trap_table(XEN_GUEST_HAND
return rc;
}
+#endif
long do_nmi_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
{
@@ -455,6 +463,7 @@ long do_nmi_op(unsigned int cmd, XEN_GUE
return rc;
}
+#ifdef CONFIG_PV32
int compat_nmi_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
{
struct compat_nmi_callback cb;
@@ -479,6 +488,7 @@ int compat_nmi_op(unsigned int cmd, XEN_
return rc;
}
+#endif
/*
* Local variables:
--- a/xen/arch/x86/pv/descriptor-tables.c
+++ b/xen/arch/x86/pv/descriptor-tables.c
@@ -149,6 +149,8 @@ long do_set_gdt(XEN_GUEST_HANDLE_PARAM(x
return ret;
}
+#ifdef CONFIG_PV32
+
int compat_set_gdt(XEN_GUEST_HANDLE_PARAM(uint) frame_list,
unsigned int entries)
{
@@ -185,6 +187,18 @@ int compat_set_gdt(XEN_GUEST_HANDLE_PARA
return ret;
}
+int compat_update_descriptor(uint32_t pa_lo, uint32_t pa_hi,
+ uint32_t desc_lo, uint32_t desc_hi)
+{
+ seg_desc_t d;
+
+ d.raw = ((uint64_t)desc_hi << 32) | desc_lo;
+
+ return do_update_descriptor(pa_lo | ((uint64_t)pa_hi << 32), d);
+}
+
+#endif /* CONFIG_PV32 */
+
static bool check_descriptor(const struct domain *dom, seg_desc_t *d)
{
unsigned int a = d->a, b = d->b, cs, dpl;
@@ -334,16 +348,6 @@ long do_update_descriptor(uint64_t gaddr
return ret;
}
-int compat_update_descriptor(uint32_t pa_lo, uint32_t pa_hi,
- uint32_t desc_lo, uint32_t desc_hi)
-{
- seg_desc_t d;
-
- d.raw = ((uint64_t)desc_hi << 32) | desc_lo;
-
- return do_update_descriptor(pa_lo | ((uint64_t)pa_hi << 32), d);
-}
-
/*
* Local variables:
* mode: C
--- a/xen/arch/x86/pv/domain.c
+++ b/xen/arch/x86/pv/domain.c
@@ -212,6 +212,7 @@ unsigned long pv_make_cr4(const struct v
return cr4;
}
+#ifdef CONFIG_PV32
int switch_compat(struct domain *d)
{
struct vcpu *v;
@@ -256,6 +257,7 @@ int switch_compat(struct domain *d)
return rc;
}
+#endif
static int pv_create_gdt_ldt_l1tab(struct vcpu *v)
{
--- a/xen/arch/x86/pv/hypercall.c
+++ b/xen/arch/x86/pv/hypercall.c
@@ -25,12 +25,18 @@
#include <xen/trace.h>
#include <irq_vectors.h>
+#ifdef CONFIG_PV32
#define HYPERCALL(x) \
[ __HYPERVISOR_ ## x ] = { (hypercall_fn_t *) do_ ## x, \
(hypercall_fn_t *) do_ ## x }
#define COMPAT_CALL(x) \
[ __HYPERVISOR_ ## x ] = { (hypercall_fn_t *) do_ ## x, \
(hypercall_fn_t *) compat_ ## x }
+#else
+#define HYPERCALL(x) \
+ [ __HYPERVISOR_ ## x ] = { (hypercall_fn_t *) do_ ## x }
+#define COMPAT_CALL(x) HYPERCALL(x)
+#endif
#define do_arch_1 paging_domctl_continuation
@@ -176,6 +182,7 @@ void pv_hypercall(struct cpu_user_regs *
}
#endif
}
+#ifdef CONFIG_PV32
else
{
unsigned int ebx = regs->ebx;
@@ -225,6 +232,7 @@ void pv_hypercall(struct cpu_user_regs *
}
#endif
}
+#endif /* CONFIG_PV32 */
/*
* PV guests use SYSCALL or INT $0x82 to make a hypercall, both of which
@@ -255,7 +263,7 @@ enum mc_disposition arch_do_multicall_ca
else
call->result = -ENOSYS;
}
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_PV32
else
{
struct compat_multicall_entry *call = &state->compat_call;
--- a/xen/arch/x86/pv/iret.c
+++ b/xen/arch/x86/pv/iret.c
@@ -104,6 +104,7 @@ unsigned long do_iret(void)
return 0;
}
+#ifdef CONFIG_PV32
unsigned int compat_iret(void)
{
struct cpu_user_regs *regs = guest_cpu_user_regs();
@@ -223,6 +224,7 @@ unsigned int compat_iret(void)
*/
return regs->eax;
}
+#endif
/*
* Local variables:
--- a/xen/arch/x86/pv/shim.c
+++ b/xen/arch/x86/pv/shim.c
@@ -255,13 +255,17 @@ void __init pv_shim_setup_dom(struct dom
*/
rw_pv_hypercall_table = __va(__pa(pv_hypercall_table));
rw_pv_hypercall_table[__HYPERVISOR_event_channel_op].native =
- rw_pv_hypercall_table[__HYPERVISOR_event_channel_op].compat =
(hypercall_fn_t *)pv_shim_event_channel_op;
-
rw_pv_hypercall_table[__HYPERVISOR_grant_table_op].native =
- rw_pv_hypercall_table[__HYPERVISOR_grant_table_op].compat =
(hypercall_fn_t *)pv_shim_grant_table_op;
+#ifdef CONFIG_PV32
+ rw_pv_hypercall_table[__HYPERVISOR_event_channel_op].compat =
+ (hypercall_fn_t *)pv_shim_event_channel_op;
+ rw_pv_hypercall_table[__HYPERVISOR_grant_table_op].compat =
+ (hypercall_fn_t *)pv_shim_grant_table_op;
+#endif
+
guest = d;
/*
--- a/xen/include/asm-x86/compat.h
+++ b/xen/include/asm-x86/compat.h
@@ -6,3 +6,11 @@
typedef uint32_t compat_ptr_t;
typedef unsigned long full_ptr_t;
+
+struct domain;
+#ifdef CONFIG_PV32
+int switch_compat(struct domain *);
+#else
+#include <xen/errno.h>
+static inline int switch_compat(struct domain *d) { return -EOPNOTSUPP; }
+#endif
--- a/xen/include/asm-x86/hypercall.h
+++ b/xen/include/asm-x86/hypercall.h
@@ -16,7 +16,10 @@ typedef unsigned long hypercall_fn_t(
unsigned long, unsigned long, unsigned long);
typedef struct {
- hypercall_fn_t *native, *compat;
+ hypercall_fn_t *native;
+#ifdef CONFIG_PV32
+ hypercall_fn_t *compat;
+#endif
} hypercall_table_t;
typedef struct {
--- a/xen/include/xen/compat.h
+++ b/xen/include/xen/compat.h
@@ -227,9 +227,6 @@ void xlat_start_info(struct start_info *
struct vcpu_runstate_info;
void xlat_vcpu_runstate_info(struct vcpu_runstate_info *);
-struct domain;
-int switch_compat(struct domain *);
-
#else
#define compat_handle_is_null(hnd) 0
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v2 2/3] x86: slim down hypercall handling when !PV32
2021-04-06 14:01 ` [PATCH v2 2/3] x86: slim down hypercall handling " Jan Beulich
@ 2021-04-06 16:59 ` Wei Liu
0 siblings, 0 replies; 8+ messages in thread
From: Wei Liu @ 2021-04-06 16:59 UTC (permalink / raw)
To: Jan Beulich
Cc: xen-devel, Andrew Cooper, George Dunlap, Ian Jackson,
Julien Grall, Stefano Stabellini, Wei Liu, Roger Pau Monné
On Tue, Apr 06, 2021 at 04:01:41PM +0200, Jan Beulich wrote:
> In such a build various of the compat handlers aren't needed. Don't
> reference them from the hypercall table, and compile out those which
> aren't needed for HVM. Also compile out switch_compat(), which has no
> purpose in such a build.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Wei Liu <wl@xen.org>
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH v2 3/3] x86: avoid building COMPAT code when !HVM && !PV32
2021-04-06 13:58 [PATCH v2 0/3] x86: asm-offsets.h and !PV32 adjustments Jan Beulich
2021-04-06 14:01 ` [PATCH v2 1/3] x86: don't build unused entry code when !PV32 Jan Beulich
2021-04-06 14:01 ` [PATCH v2 2/3] x86: slim down hypercall handling " Jan Beulich
@ 2021-04-06 14:02 ` Jan Beulich
2021-04-06 17:03 ` Wei Liu
2021-04-08 10:02 ` Jan Beulich
2 siblings, 2 replies; 8+ messages in thread
From: Jan Beulich @ 2021-04-06 14:02 UTC (permalink / raw)
To: xen-devel
Cc: Andrew Cooper, George Dunlap, Ian Jackson, Julien Grall,
Stefano Stabellini, Wei Liu, Roger Pau Monné
It was probably a mistake to, over time, drop various CONFIG_COMPAT
conditionals from x86-specific code, as we now have a build
configuration again where we'd prefer this to be unset. Arrange for
CONFIG_COMPAT to actually be off in this case, dealing with fallout.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: New.
--- a/xen/arch/x86/Kconfig
+++ b/xen/arch/x86/Kconfig
@@ -6,7 +6,6 @@ config X86
select ACPI
select ACPI_LEGACY_TABLES_LOOKUP
select ARCH_SUPPORTS_INT128
- select COMPAT
select CORE_PARKING
select HAS_ALTERNATIVE
select HAS_CPUFREQ
@@ -57,6 +56,7 @@ config PV32
bool "Support for 32bit PV guests"
depends on PV
default y
+ select COMPAT
---help---
The 32bit PV ABI uses Ring1, an area of the x86 architecture which
was deprecated and mostly removed in the AMD64 spec. As a result,
@@ -91,6 +91,7 @@ config PV_LINEAR_PT
config HVM
def_bool !PV_SHIM_EXCLUSIVE
+ select COMPAT
select IOREQ_SERVER
prompt "HVM support"
---help---
--- a/xen/arch/x86/Makefile
+++ b/xen/arch/x86/Makefile
@@ -50,7 +50,8 @@ obj-y += nmi.o
obj-y += numa.o
obj-y += pci.o
obj-y += percpu.o
-obj-y += physdev.o x86_64/physdev.o
+obj-y += physdev.o
+obj-$(CONFIG_COMPAT) += x86_64/physdev.o
obj-y += psr.o
obj-y += setup.o
obj-y += shutdown.o
@@ -72,7 +73,8 @@ obj-y += xstate.o
ifneq ($(CONFIG_PV_SHIM_EXCLUSIVE),y)
obj-y += domctl.o
-obj-y += platform_hypercall.o x86_64/platform_hypercall.o
+obj-y += platform_hypercall.o
+obj-$(CONFIG_COMPAT) += x86_64/platform_hypercall.o
obj-y += sysctl.o
endif
--- a/xen/arch/x86/cpu/mcheck/mce.c
+++ b/xen/arch/x86/cpu/mcheck/mce.c
@@ -1291,6 +1291,8 @@ static void x86_mc_mceinject(void *data)
#error BITS_PER_LONG definition absent
#endif
+# ifdef CONFIG_COMPAT
+
# include <compat/arch-x86/xen-mca.h>
# define xen_mcinfo_msr mcinfo_msr
@@ -1343,6 +1345,11 @@ CHECK_mcinfo_recovery;
# undef xen_page_offline_action
# undef xen_mcinfo_recovery
+# else
+# define compat_handle_is_null(h) true
+# define copy_to_compat(h, p, n) true /* really (-EFAULT), but gcc chokes */
+# endif /* CONFIG_COMPAT */
+
/* Machine Check Architecture Hypercall */
long do_mca(XEN_GUEST_HANDLE_PARAM(xen_mc_t) u_xen_mc)
{
@@ -1351,11 +1358,15 @@ long do_mca(XEN_GUEST_HANDLE_PARAM(xen_m
struct vcpu *v = current;
union {
struct xen_mc_fetch *nat;
+#ifdef CONFIG_COMPAT
struct compat_mc_fetch *cmp;
+#endif
} mc_fetch;
union {
struct xen_mc_physcpuinfo *nat;
+#ifdef CONFIG_COMPAT
struct compat_mc_physcpuinfo *cmp;
+#endif
} mc_physcpuinfo;
uint32_t flags, cmdflags;
int nlcpu;
--- a/xen/arch/x86/cpu/vpmu.c
+++ b/xen/arch/x86/cpu/vpmu.c
@@ -39,10 +39,12 @@
#include <public/pmu.h>
#include <xsm/xsm.h>
+#ifdef CONFIG_COMPAT
#include <compat/pmu.h>
CHECK_pmu_cntr_pair;
CHECK_pmu_data;
CHECK_pmu_params;
+#endif
static unsigned int __read_mostly opt_vpmu_enabled;
unsigned int __read_mostly vpmu_mode = XENPMU_MODE_OFF;
@@ -232,6 +234,7 @@ void vpmu_do_interrupt(struct cpu_user_r
domid = sampled->domain->domain_id;
/* Store appropriate registers in xenpmu_data */
+#ifdef CONFIG_COMPAT
/* FIXME: 32-bit PVH should go here as well */
if ( is_pv_32bit_vcpu(sampling) )
{
@@ -254,6 +257,7 @@ void vpmu_do_interrupt(struct cpu_user_r
*flags |= PMU_SAMPLE_USER;
}
else
+#endif
{
struct xen_pmu_regs *r = &vpmu->xenpmu_data->pmu.r.regs;
@@ -448,7 +452,9 @@ static int vpmu_arch_initialise(struct v
BUILD_BUG_ON(sizeof(struct xen_pmu_intel_ctxt) > XENPMU_CTXT_PAD_SZ);
BUILD_BUG_ON(sizeof(struct xen_pmu_amd_ctxt) > XENPMU_CTXT_PAD_SZ);
BUILD_BUG_ON(sizeof(struct xen_pmu_regs) > XENPMU_REGS_PAD_SZ);
+#ifdef CONFIG_COMPAT
BUILD_BUG_ON(sizeof(struct compat_pmu_regs) > XENPMU_REGS_PAD_SZ);
+#endif
ASSERT(!(vpmu->flags & ~VPMU_AVAILABLE) && !vpmu->context);
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -64,7 +64,9 @@
#include <asm/amd.h>
#include <xen/numa.h>
#include <xen/iommu.h>
+#ifdef CONFIG_COMPAT
#include <compat/vcpu.h>
+#endif
#include <asm/psr.h>
#include <asm/pv/domain.h>
#include <asm/pv/mm.h>
@@ -1020,11 +1022,13 @@ void arch_domain_creation_finished(struc
hvm_domain_creation_finished(d);
}
+#ifdef CONFIG_COMPAT
#define xen_vcpu_guest_context vcpu_guest_context
#define fpu_ctxt fpu_ctxt.x
CHECK_FIELD_(struct, vcpu_guest_context, fpu_ctxt);
#undef fpu_ctxt
#undef xen_vcpu_guest_context
+#endif
/* Called by XEN_DOMCTL_setvcpucontext and VCPUOP_initialise. */
int arch_set_info_guest(
@@ -1045,7 +1049,11 @@ int arch_set_info_guest(
* we expect the tools to DTRT even in compat-mode callers. */
compat = is_pv_32bit_domain(d);
+#ifdef CONFIG_COMPAT
#define c(fld) (compat ? (c.cmp->fld) : (c.nat->fld))
+#else
+#define c(fld) (c.nat->fld)
+#endif
flags = c(flags);
if ( is_pv_domain(d) )
@@ -1078,6 +1086,7 @@ int arch_set_info_guest(
if ( !__addr_ok(c.nat->ldt_base) )
return -EINVAL;
}
+#ifdef CONFIG_COMPAT
else
{
fixup_guest_stack_selector(d, c.cmp->user_regs.ss);
@@ -1089,6 +1098,7 @@ int arch_set_info_guest(
for ( i = 0; i < ARRAY_SIZE(c.cmp->trap_ctxt); i++ )
fixup_guest_code_selector(d, c.cmp->trap_ctxt[i].cs);
}
+#endif
/* LDT safety checks. */
if ( ((c(ldt_base) & (PAGE_SIZE - 1)) != 0) ||
@@ -1119,6 +1129,7 @@ int arch_set_info_guest(
memcpy(v->arch.pv.trap_ctxt, c.nat->trap_ctxt,
sizeof(c.nat->trap_ctxt));
}
+#ifdef CONFIG_COMPAT
else
{
XLAT_cpu_user_regs(&v->arch.user_regs, &c.cmp->user_regs);
@@ -1129,6 +1140,7 @@ int arch_set_info_guest(
c.cmp->trap_ctxt + i);
}
}
+#endif
if ( v->vcpu_id == 0 && (c(vm_assist) & ~arch_vm_assist_valid_mask(d)) )
return -EINVAL;
@@ -1184,13 +1196,17 @@ int arch_set_info_guest(
pfn = pagetable_get_pfn(v->arch.guest_table_user);
fail |= xen_pfn_to_cr3(pfn) != c.nat->ctrlreg[1];
}
- } else {
+ }
+#ifdef CONFIG_COMPAT
+ else
+ {
l4_pgentry_t *l4tab = map_domain_page(_mfn(pfn));
pfn = l4e_get_pfn(*l4tab);
unmap_domain_page(l4tab);
fail = compat_pfn_to_cr3(pfn) != c.cmp->ctrlreg[3];
}
+#endif
fail |= v->arch.pv.gdt_ents != c(gdt_ents);
for ( i = 0; !fail && i < nr_gdt_frames; ++i )
@@ -1293,6 +1309,7 @@ int arch_set_info_guest(
if ( !compat )
rc = pv_set_gdt(v, c.nat->gdt_frames, c.nat->gdt_ents);
+#ifdef CONFIG_COMPAT
else
{
unsigned long gdt_frames[ARRAY_SIZE(v->arch.pv.gdt_frames)];
@@ -1302,6 +1319,7 @@ int arch_set_info_guest(
rc = pv_set_gdt(v, gdt_frames, c.cmp->gdt_ents);
}
+#endif
if ( rc != 0 )
return rc;
@@ -1309,8 +1327,10 @@ int arch_set_info_guest(
if ( !compat )
cr3_mfn = _mfn(xen_cr3_to_pfn(c.nat->ctrlreg[3]));
+#ifdef CONFIG_COMPAT
else
cr3_mfn = _mfn(compat_cr3_to_pfn(c.cmp->ctrlreg[3]));
+#endif
cr3_page = get_page_from_mfn(cr3_mfn, d);
if ( !cr3_page )
@@ -1817,9 +1837,13 @@ bool update_runstate_area(struct vcpu *v
if ( VM_ASSIST(v->domain, runstate_update_flag) )
{
+#ifdef CONFIG_COMPAT
guest_handle = has_32bit_shinfo(v->domain)
? &v->runstate_guest.compat.p->state_entry_time + 1
: &v->runstate_guest.native.p->state_entry_time + 1;
+#else
+ guest_handle = &v->runstate_guest.p->state_entry_time + 1;
+#endif
guest_handle--;
runstate.state_entry_time |= XEN_RUNSTATE_UPDATE;
__raw_copy_to_guest(guest_handle,
@@ -1827,6 +1851,7 @@ bool update_runstate_area(struct vcpu *v
smp_wmb();
}
+#ifdef CONFIG_COMPAT
if ( has_32bit_shinfo(v->domain) )
{
struct compat_vcpu_runstate_info info;
@@ -1836,6 +1861,7 @@ bool update_runstate_area(struct vcpu *v
rc = true;
}
else
+#endif
rc = __copy_to_guest(runstate_guest(v), &runstate, 1) !=
sizeof(runstate);
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1384,18 +1384,24 @@ long arch_do_domctl(
return ret;
}
+#ifdef CONFIG_COMPAT
#define xen_vcpu_guest_context vcpu_guest_context
#define fpu_ctxt fpu_ctxt.x
CHECK_FIELD_(struct, vcpu_guest_context, fpu_ctxt);
#undef fpu_ctxt
#undef xen_vcpu_guest_context
+#endif
void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
{
unsigned int i;
const struct domain *d = v->domain;
bool compat = is_pv_32bit_domain(d);
+#ifdef CONFIG_COMPAT
#define c(fld) (!compat ? (c.nat->fld) : (c.cmp->fld))
+#else
+#define c(fld) (c.nat->fld)
+#endif
memcpy(&c.nat->fpu_ctxt, v->arch.fpu_ctxt, sizeof(c.nat->fpu_ctxt));
if ( is_pv_domain(d) )
@@ -1413,6 +1419,7 @@ void arch_get_info_guest(struct vcpu *v,
memcpy(c.nat->trap_ctxt, v->arch.pv.trap_ctxt,
sizeof(c.nat->trap_ctxt));
}
+#ifdef CONFIG_COMPAT
else
{
XLAT_cpu_user_regs(&c.cmp->user_regs, &v->arch.user_regs);
@@ -1423,6 +1430,7 @@ void arch_get_info_guest(struct vcpu *v,
v->arch.pv.trap_ctxt + i);
}
}
+#endif
for ( i = 0; i < ARRAY_SIZE(v->arch.dr); ++i )
c(debugreg[i] = v->arch.dr[i]);
@@ -1468,8 +1476,10 @@ void arch_get_info_guest(struct vcpu *v,
c(ldt_ents = v->arch.pv.ldt_ents);
for ( i = 0; i < ARRAY_SIZE(v->arch.pv.gdt_frames); ++i )
c(gdt_frames[i] = v->arch.pv.gdt_frames[i]);
+#ifdef CONFIG_COMPAT
BUILD_BUG_ON(ARRAY_SIZE(c.nat->gdt_frames) !=
ARRAY_SIZE(c.cmp->gdt_frames));
+#endif
for ( ; i < ARRAY_SIZE(c.nat->gdt_frames); ++i )
c(gdt_frames[i] = 0);
c(gdt_ents = v->arch.pv.gdt_ents);
@@ -1504,6 +1514,7 @@ void arch_get_info_guest(struct vcpu *v,
pagetable_is_null(v->arch.guest_table_user) ? 0
: xen_pfn_to_cr3(pagetable_get_pfn(v->arch.guest_table_user));
}
+#ifdef CONFIG_COMPAT
else
{
const l4_pgentry_t *l4e =
@@ -1512,6 +1523,7 @@ void arch_get_info_guest(struct vcpu *v,
c.cmp->ctrlreg[3] = compat_pfn_to_cr3(l4e_get_pfn(*l4e));
unmap_domain_page(l4e);
}
+#endif
if ( guest_kernel_mode(v, &v->arch.user_regs) )
c(flags |= VGCF_in_kernel);
--- a/xen/arch/x86/efi/Makefile
+++ b/xen/arch/x86/efi/Makefile
@@ -8,13 +8,14 @@ cmd_objcopy_o_ihex = $(OBJCOPY) -I ihex
boot.init.o: buildid.o
-EFIOBJ := boot.init.o pe.init.o ebmalloc.o compat.o runtime.o
+EFIOBJ-y := boot.init.o pe.init.o ebmalloc.o runtime.o
+EFIOBJ-$(CONFIG_COMPAT) += compat.o
$(call cc-option-add,cflags-stack-boundary,CC,-mpreferred-stack-boundary=4)
-$(EFIOBJ): CFLAGS-stack-boundary := $(cflags-stack-boundary)
+$(EFIOBJ-y): CFLAGS-stack-boundary := $(cflags-stack-boundary)
obj-y := stub.o
-obj-$(XEN_BUILD_EFI) := $(filter-out %.init.o,$(EFIOBJ))
-obj-bin-$(XEN_BUILD_EFI) := $(filter %.init.o,$(EFIOBJ))
+obj-$(XEN_BUILD_EFI) := $(filter-out %.init.o,$(EFIOBJ-y))
+obj-bin-$(XEN_BUILD_EFI) := $(filter %.init.o,$(EFIOBJ-y))
extra-$(XEN_BUILD_EFI) += buildid.o relocs-dummy.o
nocov-$(XEN_BUILD_EFI) += stub.o
--- a/xen/arch/x86/hypercall.c
+++ b/xen/arch/x86/hypercall.c
@@ -21,10 +21,15 @@
#include <xen/hypercall.h>
+#ifdef CONFIG_COMPAT
#define ARGS(x, n) \
[ __HYPERVISOR_ ## x ] = { n, n }
#define COMP(x, n, c) \
[ __HYPERVISOR_ ## x ] = { n, c }
+#else
+#define ARGS(x, n) [ __HYPERVISOR_ ## x ] = { n }
+#define COMP(x, n, c) ARGS(x, n)
+#endif
const hypercall_args_t hypercall_args_table[NR_hypercalls] =
{
@@ -113,7 +118,11 @@ unsigned long hypercall_create_continuat
regs->rax = op;
+#ifdef CONFIG_COMPAT
if ( !curr->hcall_compat )
+#else
+ if ( true )
+#endif
{
for ( i = 0; *p != '\0'; i++ )
{
--- a/xen/arch/x86/pv/dom0_build.c
+++ b/xen/arch/x86/pv/dom0_build.c
@@ -877,9 +877,11 @@ int __init dom0_construct_pv(struct doma
pv_shim_setup_dom(d, l4start, v_start, vxenstore_start, vconsole_start,
vphysmap_start, si);
+#ifdef CONFIG_COMPAT
if ( is_pv_32bit_domain(d) )
xlat_start_info(si, pv_shim ? XLAT_start_info_console_domU
: XLAT_start_info_console_dom0);
+#endif
/* Return to idle domain's page tables. */
mapcache_override_current(NULL);
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -27,8 +27,10 @@
#include <xen/virtual_region.h>
#include <xen/watchdog.h>
#include <public/version.h>
+#ifdef CONFIG_COMPAT
#include <compat/platform.h>
#include <compat/xen.h>
+#endif
#include <xen/bitops.h>
#include <asm/smp.h>
#include <asm/processor.h>
@@ -1615,10 +1617,12 @@ void __init noreturn __start_xen(unsigne
BUILD_BUG_ON(sizeof(shared_info_t) > PAGE_SIZE);
BUILD_BUG_ON(sizeof(struct vcpu_info) != 64);
+#ifdef CONFIG_COMPAT
BUILD_BUG_ON(sizeof_field(struct compat_platform_op, u) !=
sizeof_field(struct compat_platform_op, u.pad));
BUILD_BUG_ON(sizeof(start_info_compat_t) > PAGE_SIZE);
BUILD_BUG_ON(sizeof(struct compat_vcpu_info) != 64);
+#endif
/* Check definitions in public headers match internal defs. */
BUILD_BUG_ON(__HYPERVISOR_VIRT_START != HYPERVISOR_VIRT_START);
--- a/xen/arch/x86/oprofile/backtrace.c
+++ b/xen/arch/x86/oprofile/backtrace.c
@@ -27,7 +27,6 @@ struct __packed frame_head_32bit {
uint32_t ret;
};
typedef struct frame_head_32bit frame_head32_t;
-DEFINE_COMPAT_HANDLE(frame_head32_t);
static struct frame_head *
dump_hypervisor_backtrace(struct vcpu *vcpu, const struct frame_head *head,
@@ -58,8 +57,10 @@ dump_guest_backtrace(struct vcpu *vcpu,
{
frame_head_t bufhead;
+#ifdef CONFIG_COMPAT
if ( is_32bit_vcpu(vcpu) )
{
+ DEFINE_COMPAT_HANDLE(frame_head32_t);
__compat_handle_const_frame_head32_t guest_head =
{ .c = (unsigned long)head };
frame_head32_t bufhead32;
@@ -73,6 +74,7 @@ dump_guest_backtrace(struct vcpu *vcpu,
bufhead.ret = bufhead32.ret;
}
else
+#endif
{
XEN_GUEST_HANDLE_PARAM(const_frame_head_t) guest_head =
const_guest_handle_from_ptr(head, frame_head_t);
--- a/xen/arch/x86/oprofile/xenoprof.c
+++ b/xen/arch/x86/oprofile/xenoprof.c
@@ -12,7 +12,6 @@
#include <xen/sched.h>
#include <xen/xenoprof.h>
#include <public/xenoprof.h>
-#include <compat/xenoprof.h>
#include <asm/hvm/support.h>
#include "op_counter.h"
@@ -54,6 +53,9 @@ int xenoprof_arch_ibs_counter(XEN_GUEST_
return 0;
}
+#ifdef CONFIG_COMPAT
+#include <compat/xenoprof.h>
+
int compat_oprof_arch_counter(XEN_GUEST_HANDLE_PARAM(void) arg)
{
struct compat_oprof_counter counter;
@@ -73,6 +75,7 @@ int compat_oprof_arch_counter(XEN_GUEST_
return 0;
}
+#endif
int xenoprofile_get_mode(struct vcpu *curr, const struct cpu_user_regs *regs)
{
--- a/xen/arch/x86/x86_64/Makefile
+++ b/xen/arch/x86/x86_64/Makefile
@@ -8,9 +8,9 @@ obj-y += acpi_mmcfg.o
obj-y += mmconf-fam10h.o
obj-y += mmconfig_64.o
obj-y += mmconfig-shared.o
-obj-y += domain.o
-obj-y += cpu_idle.o
-obj-y += cpufreq.o
+obj-$(CONFIG_COMPAT) += domain.o
+obj-$(CONFIG_COMPAT) += cpu_idle.o
+obj-$(CONFIG_COMPAT) += cpufreq.o
obj-bin-$(CONFIG_KEXEC) += kexec_reloc.o
obj-$(CONFIG_CRASH_DEBUG) += gdbstub.o
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -1347,7 +1347,9 @@ void set_gpfn_from_mfn(unsigned long mfn
machine_to_phys_mapping[mfn] = entry;
}
+#ifdef CONFIG_COMPAT
#include "compat/mm.c"
+#endif
/*
* Local variables:
--- a/xen/common/time.c
+++ b/xen/common/time.c
@@ -108,7 +108,7 @@ void update_domain_wallclock_time(struct
sec = wc_sec + d->time_offset.seconds;
shared_info(d, wc_sec) = sec;
shared_info(d, wc_nsec) = wc_nsec;
-#ifdef CONFIG_X86
+#if defined(CONFIG_X86) && defined(CONFIG_COMPAT)
if ( likely(!has_32bit_shinfo(d)) )
d->shared_info->native.wc_sec_hi = sec >> 32;
else
--- a/xen/include/asm-x86/compat.h
+++ b/xen/include/asm-x86/compat.h
@@ -2,11 +2,15 @@
* compat.h
*/
+#ifdef CONFIG_COMPAT
+
#define COMPAT_BITS_PER_LONG 32
typedef uint32_t compat_ptr_t;
typedef unsigned long full_ptr_t;
+#endif
+
struct domain;
#ifdef CONFIG_PV32
int switch_compat(struct domain *);
--- a/xen/include/asm-x86/hypercall.h
+++ b/xen/include/asm-x86/hypercall.h
@@ -23,7 +23,10 @@ typedef struct {
} hypercall_table_t;
typedef struct {
- uint8_t native, compat;
+ uint8_t native;
+#ifdef CONFIG_COMPAT
+ uint8_t compat;
+#endif
} hypercall_args_t;
extern const hypercall_args_t hypercall_args_table[NR_hypercalls];
--- a/xen/include/asm-x86/shared.h
+++ b/xen/include/asm-x86/shared.h
@@ -1,6 +1,8 @@
#ifndef __XEN_X86_SHARED_H__
#define __XEN_X86_SHARED_H__
+#ifdef CONFIG_COMPAT
+
#define nmi_reason(d) (!has_32bit_shinfo(d) ? \
(u32 *)&(d)->shared_info->native.arch.nmi_reason : \
(u32 *)&(d)->shared_info->compat.arch.nmi_reason)
@@ -37,6 +39,34 @@ static inline void arch_set_##field(stru
v->vcpu_info->compat.arch.field = val; \
}
+#else
+
+#define nmi_reason(d) (&(d)->shared_info->arch.nmi_reason)
+
+#define GET_SET_SHARED(type, field) \
+static inline type arch_get_##field(const struct domain *d) \
+{ \
+ return d->shared_info->arch.field; \
+} \
+static inline void arch_set_##field(struct domain *d, \
+ type val) \
+{ \
+ d->shared_info->arch.field = val; \
+}
+
+#define GET_SET_VCPU(type, field) \
+static inline type arch_get_##field(const struct vcpu *v) \
+{ \
+ return v->vcpu_info->arch.field; \
+} \
+static inline void arch_set_##field(struct vcpu *v, \
+ type val) \
+{ \
+ v->vcpu_info->arch.field = val; \
+}
+
+#endif
+
GET_SET_SHARED(unsigned long, max_pfn)
GET_SET_SHARED(xen_pfn_t, pfn_to_mfn_frame_list_list)
GET_SET_SHARED(unsigned long, nmi_reason)
--- a/xen/include/xen/compat.h
+++ b/xen/include/xen/compat.h
@@ -5,10 +5,11 @@
#ifndef __XEN_COMPAT_H__
#define __XEN_COMPAT_H__
-#ifdef CONFIG_COMPAT
-
#include <xen/types.h>
#include <asm/compat.h>
+
+#ifdef CONFIG_COMPAT
+
#include <compat/xlat.h>
#define __DEFINE_COMPAT_HANDLE(name, type) \
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -1047,7 +1047,6 @@ static always_inline bool is_pv_vcpu(con
return is_pv_domain(v->domain);
}
-#ifdef CONFIG_COMPAT
static always_inline bool is_pv_32bit_domain(const struct domain *d)
{
#ifdef CONFIG_PV32
@@ -1078,7 +1077,7 @@ static always_inline bool is_pv_64bit_vc
{
return is_pv_64bit_domain(v->domain);
}
-#endif
+
static always_inline bool is_hvm_domain(const struct domain *d)
{
return IS_ENABLED(CONFIG_HVM) &&
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v2 3/3] x86: avoid building COMPAT code when !HVM && !PV32
2021-04-06 14:02 ` [PATCH v2 3/3] x86: avoid building COMPAT code when !HVM && !PV32 Jan Beulich
@ 2021-04-06 17:03 ` Wei Liu
2021-04-08 10:02 ` Jan Beulich
1 sibling, 0 replies; 8+ messages in thread
From: Wei Liu @ 2021-04-06 17:03 UTC (permalink / raw)
To: Jan Beulich
Cc: xen-devel, Andrew Cooper, George Dunlap, Ian Jackson,
Julien Grall, Stefano Stabellini, Wei Liu, Roger Pau Monné
On Tue, Apr 06, 2021 at 04:02:08PM +0200, Jan Beulich wrote:
> It was probably a mistake to, over time, drop various CONFIG_COMPAT
> conditionals from x86-specific code, as we now have a build
> configuration again where we'd prefer this to be unset. Arrange for
> CONFIG_COMPAT to actually be off in this case, dealing with fallout.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Wei Liu <wl@xen.org>
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH v2 3/3] x86: avoid building COMPAT code when !HVM && !PV32
2021-04-06 14:02 ` [PATCH v2 3/3] x86: avoid building COMPAT code when !HVM && !PV32 Jan Beulich
2021-04-06 17:03 ` Wei Liu
@ 2021-04-08 10:02 ` Jan Beulich
1 sibling, 0 replies; 8+ messages in thread
From: Jan Beulich @ 2021-04-08 10:02 UTC (permalink / raw)
To: xen-devel
Cc: Andrew Cooper, George Dunlap, Ian Jackson, Julien Grall,
Stefano Stabellini, Wei Liu, Roger Pau Monné
On 06.04.2021 16:02, Jan Beulich wrote:
> --- a/xen/include/xen/compat.h
> +++ b/xen/include/xen/compat.h
> @@ -5,10 +5,11 @@
> #ifndef __XEN_COMPAT_H__
> #define __XEN_COMPAT_H__
>
> -#ifdef CONFIG_COMPAT
> -
> #include <xen/types.h>
> #include <asm/compat.h>
> +
> +#ifdef CONFIG_COMPAT
> +
> #include <compat/xlat.h>
>
> #define __DEFINE_COMPAT_HANDLE(name, type) \
As I've just noticed this breaks the Arm build, for the lack of
asm/compat.h there. I'll be folding in the hunks below; an alternative
would be to require every arch to have a (perhaps empty) compat.h,
which seems less desirable to me.
Jan
--- unstable.orig/xen/arch/x86/Kconfig
+++ unstable/xen/arch/x86/Kconfig
@@ -9,6 +9,7 @@ config X86
select ARCH_SUPPORTS_INT128
select CORE_PARKING
select HAS_ALTERNATIVE
+ select HAS_COMPAT
select HAS_CPUFREQ
select HAS_EHCI
select HAS_EX_TABLE
--- unstable.orig/xen/common/Kconfig
+++ unstable/xen/common/Kconfig
@@ -25,6 +25,9 @@ config GRANT_TABLE
config HAS_ALTERNATIVE
bool
+config HAS_COMPAT
+ bool
+
config HAS_DEVICE_TREE
bool
--- unstable.orig/xen/include/xen/compat.h
+++ unstable/xen/include/xen/compat.h
@@ -6,7 +6,9 @@
#define __XEN_COMPAT_H__
#include <xen/types.h>
+#ifdef CONFIG_HAS_COMPAT
#include <asm/compat.h>
+#endif
#ifdef CONFIG_COMPAT
^ permalink raw reply [flat|nested] 8+ messages in thread