From: Jan Beulich <jbeulich@suse.com>
To: "Roger Pau Monné" <roger.pau@citrix.com>,
"Andrew Cooper" <andrew.cooper3@citrix.com>
Cc: "xen-devel@lists.xenproject.org" <xen-devel@lists.xenproject.org>,
Wei Liu <wl@xen.org>
Subject: Re: [PATCH 5/5] x86: don't build unused entry code when !PV32
Date: Mon, 4 Jan 2021 14:56:12 +0100 [thread overview]
Message-ID: <0341c1f1-dc50-552c-f246-56605ae7c83a@suse.com> (raw)
In-Reply-To: <20201228153004.qip3v6er5rk22fnu@Air-de-Roger>
On 28.12.2020 16:30, Roger Pau Monné wrote:
> On Wed, Nov 25, 2020 at 09:51:33AM +0100, Jan Beulich wrote:
>> --- a/xen/arch/x86/x86_64/compat/entry.S
>> +++ b/xen/arch/x86/x86_64/compat/entry.S
>> @@ -29,8 +29,6 @@ ENTRY(entry_int82)
>> mov %rsp, %rdi
>> call do_entry_int82
>>
>> -#endif /* CONFIG_PV32 */
>> -
>> /* %rbx: struct vcpu */
>> ENTRY(compat_test_all_events)
>> ASSERT_NOT_IN_ATOMIC
>> @@ -197,6 +195,8 @@ ENTRY(cr4_pv32_restore)
>> xor %eax, %eax
>> ret
>>
>> +#endif /* CONFIG_PV32 */
>
> I've also wondered, it feels weird to add CONFIG_PV32 gates to the
> compat entry.S, since that's supposed to be only used when there's
> support for 32bit PV guests?
>
> Wouldn't this file only get built when such support is enabled?
No. We need cstar_enter also for 64-bit guests' 32-bit
user space possibly making system calls via SYSCALL.
>> --- a/xen/arch/x86/x86_64/entry.S
>> +++ b/xen/arch/x86/x86_64/entry.S
>> @@ -328,8 +328,10 @@ UNLIKELY_END(sysenter_gpf)
>> movq VCPU_domain(%rbx),%rdi
>> movq %rax,TRAPBOUNCE_eip(%rdx)
>> movb %cl,TRAPBOUNCE_flags(%rdx)
>> +#ifdef CONFIG_PV32
>> cmpb $0, DOMAIN_is_32bit_pv(%rdi)
>> jne compat_sysenter
>> +#endif
>> jmp .Lbounce_exception
>>
>> ENTRY(int80_direct_trap)
>> @@ -370,6 +372,7 @@ UNLIKELY_END(msi_check)
>> mov 0x80 * TRAPINFO_sizeof + TRAPINFO_eip(%rsi), %rdi
>> movzwl 0x80 * TRAPINFO_sizeof + TRAPINFO_cs (%rsi), %ecx
>>
>> +#ifdef CONFIG_PV32
>> mov %ecx, %edx
>> and $~3, %edx
>>
>> @@ -378,6 +381,10 @@ UNLIKELY_END(msi_check)
>>
>> test %rdx, %rdx
>> jz int80_slow_path
>> +#else
>> + test %rdi, %rdi
>> + jz int80_slow_path
>> +#endif
>>
>> /* Construct trap_bounce from trap_ctxt[0x80]. */
>> lea VCPU_trap_bounce(%rbx), %rdx
>> @@ -390,8 +397,10 @@ UNLIKELY_END(msi_check)
>> lea (, %rcx, TBF_INTERRUPT), %ecx
>> mov %cl, TRAPBOUNCE_flags(%rdx)
>>
>> +#ifdef CONFIG_PV32
>> cmpb $0, DOMAIN_is_32bit_pv(%rax)
>> jne compat_int80_direct_trap
>> +#endif
>>
>> call create_bounce_frame
>> jmp test_all_events
>> @@ -541,12 +550,16 @@ ENTRY(dom_crash_sync_extable)
>> GET_STACK_END(ax)
>> leaq STACK_CPUINFO_FIELD(guest_cpu_user_regs)(%rax),%rsp
>> # create_bounce_frame() temporarily clobbers CS.RPL. Fix up.
>> +#ifdef CONFIG_PV32
>> movq STACK_CPUINFO_FIELD(current_vcpu)(%rax), %rax
>> movq VCPU_domain(%rax),%rax
>> cmpb $0, DOMAIN_is_32bit_pv(%rax)
>> sete %al
>> leal (%rax,%rax,2),%eax
>> orb %al,UREGS_cs(%rsp)
>> +#else
>> + orb $3, UREGS_cs(%rsp)
>> +#endif
>> xorl %edi,%edi
>> jmp asm_domain_crash_synchronous /* Does not return */
>> .popsection
>> @@ -562,11 +575,15 @@ ENTRY(ret_from_intr)
>> GET_CURRENT(bx)
>> testb $3, UREGS_cs(%rsp)
>> jz restore_all_xen
>> +#ifdef CONFIG_PV32
>> movq VCPU_domain(%rbx), %rax
>> cmpb $0, DOMAIN_is_32bit_pv(%rax)
>> je test_all_events
>> jmp compat_test_all_events
>> #else
>> + jmp test_all_events
>> +#endif
>> +#else
>> ASSERT_CONTEXT_IS_XEN
>> jmp restore_all_xen
>> #endif
>> @@ -652,7 +669,7 @@ handle_exception_saved:
>> testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rsp)
>> jz exception_with_ints_disabled
>>
>> -#ifdef CONFIG_PV
>> +#if defined(CONFIG_PV32)
>> ALTERNATIVE_2 "jmp .Lcr4_pv32_done", \
>> __stringify(mov VCPU_domain(%rbx), %rax), X86_FEATURE_XEN_SMEP, \
>> __stringify(mov VCPU_domain(%rbx), %rax), X86_FEATURE_XEN_SMAP
>> @@ -692,7 +709,7 @@ handle_exception_saved:
>> test $~(PFEC_write_access|PFEC_insn_fetch),%eax
>> jz compat_test_all_events
>> .Lcr4_pv32_done:
>> -#else
>> +#elif !defined(CONFIG_PV)
>> ASSERT_CONTEXT_IS_XEN
>> #endif /* CONFIG_PV */
>> sti
>> @@ -711,9 +728,11 @@ handle_exception_saved:
>> #ifdef CONFIG_PV
>> testb $3,UREGS_cs(%rsp)
>> jz restore_all_xen
>> +#ifdef CONFIG_PV32
>> movq VCPU_domain(%rbx),%rax
>> cmpb $0, DOMAIN_is_32bit_pv(%rax)
>> jne compat_test_all_events
>> +#endif
>> jmp test_all_events
>> #else
>> ASSERT_CONTEXT_IS_XEN
>> @@ -947,11 +966,16 @@ handle_ist_exception:
>> je 1f
>> movl $EVENT_CHECK_VECTOR,%edi
>> call send_IPI_self
>> -1: movq VCPU_domain(%rbx),%rax
>> +1:
>> +#ifdef CONFIG_PV32
>> + movq VCPU_domain(%rbx),%rax
>> cmpb $0,DOMAIN_is_32bit_pv(%rax)
>> je restore_all_guest
>> jmp compat_restore_all_guest
>> #else
>> + jmp restore_all_guest
>> +#endif
>> +#else
>> ASSERT_CONTEXT_IS_XEN
>> jmp restore_all_xen
>> #endif
>
> I would like to have Andrew's opinion on this one (as you and him tend
> to modify more asm code than myself). There are quite a lot of
> addition to the assembly code, and IMO it makes the code more complex
> which I think we should try to avoid, as assembly is already hard
> enough.
Well, while I can see your point (and I indeed asked myself the same
question when making this change), this merely follows the route
started with the addition on CONFIG_PV conditionals. If we think that
prior step didn't set a good precedent, we ought to undo it.
Otherwise I see no good argument against doing the same kind of
transformation a 2nd time (and further ones, if need be down the
road).
Jan
next prev parent reply other threads:[~2021-01-04 13:56 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-11-25 8:42 [PATCH 0/5] x86: asm-offsets.h and !PV32 adjustments Jan Beulich
2020-11-25 8:45 ` [PATCH 1/5] x86/build: limit rebuilding of asm-offsets.h Jan Beulich
2020-12-28 12:00 ` Roger Pau Monné
2021-01-04 13:46 ` Jan Beulich
2020-11-25 8:49 ` [PATCH 2/5] x86/build: limit #include-ing by asm-offsets.c Jan Beulich
2020-12-28 12:54 ` Roger Pau Monné
2021-01-04 13:48 ` Jan Beulich
2020-11-25 8:49 ` [PATCH 3/5] x86/build: restrict contents of asm-offsets.h when !HVM / !PV Jan Beulich
2020-12-28 13:07 ` Roger Pau Monné
2020-11-25 8:50 ` [PATCH 4/5] x86: hypercall vector is unused when !PV32 Jan Beulich
2020-12-28 13:37 ` Roger Pau Monné
2020-11-25 8:51 ` [PATCH 5/5] x86: don't build unused entry code " Jan Beulich
2020-12-28 15:30 ` Roger Pau Monné
2021-01-04 13:56 ` Jan Beulich [this message]
2021-01-04 15:53 ` Roger Pau Monné
2021-01-04 16:11 ` Jan Beulich
2021-04-01 7:51 ` Ping: " Jan Beulich
2021-04-01 14:01 ` Roger Pau Monné
2021-04-01 14:20 ` Jan Beulich
2021-04-06 9:52 ` Jan Beulich
2021-04-01 14:31 ` Andrew Cooper
2021-04-01 14:37 ` Jan Beulich
2021-04-06 17:34 ` Andrew Cooper
2021-04-07 7:54 ` Jan Beulich
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=0341c1f1-dc50-552c-f246-56605ae7c83a@suse.com \
--to=jbeulich@suse.com \
--cc=andrew.cooper3@citrix.com \
--cc=roger.pau@citrix.com \
--cc=wl@xen.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).