From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: x86@kernel.org, linux-arch@vger.kernel.org,
Will Deacon <will@kernel.org>, Arnd Bergmann <arnd@arndb.de>,
Mark Rutland <mark.rutland@arm.com>,
Kees Cook <keescook@chromium.org>,
Keno Fischer <keno@juliacomputing.com>,
Paolo Bonzini <pbonzini@redhat.com>,
kvm@vger.kernel.org,
Gabriel Krisman Bertazi <krisman@collabora.com>,
Sean Christopherson <sean.j.christopherson@intel.com>
Subject: [patch V5 07/15] x86/entry: Consolidate 32/64 bit syscall entry
Date: Thu, 23 Jul 2020 00:00:01 +0200 [thread overview]
Message-ID: <20200722220520.051234096@linutronix.de> (raw)
In-Reply-To: 20200722215954.464281930@linutronix.de
From: Thomas Gleixner <tglx@linutronix.de>
64bit and 32bit entry code have the same open coded syscall entry handling
after the bitwidth specific bits.
Move it to a helper function and share the code.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/x86/entry/common.c | 93 +++++++++++++++++++++---------------------------
1 file changed, 41 insertions(+), 52 deletions(-)
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -366,8 +366,7 @@ static void __syscall_return_slowpath(st
exit_to_user_mode();
}
-#ifdef CONFIG_X86_64
-__visible noinstr void do_syscall_64(unsigned long nr, struct pt_regs *regs)
+static noinstr long syscall_enter(struct pt_regs *regs, unsigned long nr)
{
struct thread_info *ti;
@@ -379,6 +378,16 @@ static void __syscall_return_slowpath(st
if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY)
nr = syscall_trace_enter(regs);
+ instrumentation_end();
+ return nr;
+}
+
+#ifdef CONFIG_X86_64
+__visible noinstr void do_syscall_64(unsigned long nr, struct pt_regs *regs)
+{
+ nr = syscall_enter(regs, nr);
+
+ instrumentation_begin();
if (likely(nr < NR_syscalls)) {
nr = array_index_nospec(nr, NR_syscalls);
regs->ax = sys_call_table[nr](regs);
@@ -390,64 +399,53 @@ static void __syscall_return_slowpath(st
regs->ax = x32_sys_call_table[nr](regs);
#endif
}
- __syscall_return_slowpath(regs);
-
instrumentation_end();
- exit_to_user_mode();
+ syscall_return_slowpath(regs);
}
#endif
#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
+static __always_inline unsigned int syscall_32_enter(struct pt_regs *regs)
+{
+ if (IS_ENABLED(CONFIG_IA32_EMULATION))
+ current_thread_info()->status |= TS_COMPAT;
+ /*
+ * Subtlety here: if ptrace pokes something larger than 2^32-1 into
+ * orig_ax, the unsigned int return value truncates it. This may
+ * or may not be necessary, but it matches the old asm behavior.
+ */
+ return syscall_enter(regs, (unsigned int)regs->orig_ax);
+}
+
/*
- * Does a 32-bit syscall. Called with IRQs on in CONTEXT_KERNEL. Does
- * all entry and exit work and returns with IRQs off. This function is
- * extremely hot in workloads that use it, and it's usually called from
- * do_fast_syscall_32, so forcibly inline it to improve performance.
+ * Invoke a 32-bit syscall. Called with IRQs on in CONTEXT_KERNEL.
*/
-static void do_syscall_32_irqs_on(struct pt_regs *regs)
+static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs,
+ unsigned int nr)
{
- struct thread_info *ti = current_thread_info();
- unsigned int nr = (unsigned int)regs->orig_ax;
-
-#ifdef CONFIG_IA32_EMULATION
- ti->status |= TS_COMPAT;
-#endif
-
- if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) {
- /*
- * Subtlety here: if ptrace pokes something larger than
- * 2^32-1 into orig_ax, this truncates it. This may or
- * may not be necessary, but it matches the old asm
- * behavior.
- */
- nr = syscall_trace_enter(regs);
- }
-
if (likely(nr < IA32_NR_syscalls)) {
+ instrumentation_begin();
nr = array_index_nospec(nr, IA32_NR_syscalls);
regs->ax = ia32_sys_call_table[nr](regs);
+ instrumentation_end();
}
-
- __syscall_return_slowpath(regs);
}
/* Handles int $0x80 */
__visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
{
- enter_from_user_mode(regs);
- instrumentation_begin();
-
- local_irq_enable();
- do_syscall_32_irqs_on(regs);
+ unsigned int nr = syscall_32_enter(regs);
- instrumentation_end();
- exit_to_user_mode();
+ do_syscall_32_irqs_on(regs, nr);
+ syscall_return_slowpath(regs);
}
-static bool __do_fast_syscall_32(struct pt_regs *regs)
+static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
{
+ unsigned int nr = syscall_32_enter(regs);
int res;
+ instrumentation_begin();
/* Fetch EBP from where the vDSO stashed it. */
if (IS_ENABLED(CONFIG_X86_64)) {
/*
@@ -460,17 +458,18 @@ static bool __do_fast_syscall_32(struct
res = get_user(*(u32 *)®s->bp,
(u32 __user __force *)(unsigned long)(u32)regs->sp);
}
+ instrumentation_end();
if (res) {
/* User code screwed up. */
regs->ax = -EFAULT;
- local_irq_disable();
- __prepare_exit_to_usermode(regs);
+ syscall_return_slowpath(regs);
return false;
}
/* Now this is just like a normal syscall. */
- do_syscall_32_irqs_on(regs);
+ do_syscall_32_irqs_on(regs, nr);
+ syscall_return_slowpath(regs);
return true;
}
@@ -483,7 +482,6 @@ static bool __do_fast_syscall_32(struct
*/
unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
vdso_image_32.sym_int80_landing_pad;
- bool success;
/*
* SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
@@ -492,17 +490,8 @@ static bool __do_fast_syscall_32(struct
*/
regs->ip = landing_pad;
- enter_from_user_mode(regs);
- instrumentation_begin();
-
- local_irq_enable();
- success = __do_fast_syscall_32(regs);
-
- instrumentation_end();
- exit_to_user_mode();
-
- /* If it failed, keep it simple: use IRET. */
- if (!success)
+ /* Invoke the syscall. If it failed, keep it simple: use IRET. */
+ if (!__do_fast_syscall_32(regs))
return 0;
#ifdef CONFIG_X86_64
next prev parent reply other threads:[~2020-07-22 22:11 UTC|newest]
Thread overview: 45+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-07-22 21:59 [patch V5 00/15] entry, x86, kvm: Generic entry/exit functionality for host and guest Thomas Gleixner
2020-07-22 21:59 ` [patch V5 01/15] seccomp: Provide stub for __secure_computing() Thomas Gleixner
2020-07-24 19:08 ` [tip: core/entry] " tip-bot2 for Thomas Gleixner
2020-07-22 21:59 ` [patch V5 02/15] entry: Provide generic syscall entry functionality Thomas Gleixner
2020-07-24 19:08 ` [tip: core/entry] " tip-bot2 for Thomas Gleixner
2020-07-22 21:59 ` [patch V5 03/15] entry: Provide generic syscall exit function Thomas Gleixner
2020-07-24 19:08 ` [tip: core/entry] " tip-bot2 for Thomas Gleixner
2020-07-22 21:59 ` [patch V5 04/15] entry: Provide generic interrupt entry/exit code Thomas Gleixner
2020-07-24 19:08 ` [tip: core/entry] " tip-bot2 for Thomas Gleixner
2020-07-22 21:59 ` [patch V5 05/15] entry: Provide infrastructure for work before transitioning to guest mode Thomas Gleixner
2020-07-24 19:08 ` [tip: core/entry] " tip-bot2 for Thomas Gleixner
2020-07-29 16:55 ` [patch V5 05/15] " Qian Cai
2020-07-30 7:19 ` Thomas Gleixner
2020-07-30 10:34 ` [tip: x86/entry] x86/kvm: Use __xfer_to_guest_mode_work_pending() in kvm_run_vcpu() tip-bot2 for Thomas Gleixner
2020-07-22 22:00 ` [patch V5 06/15] x86/entry: Consolidate check_user_regs() Thomas Gleixner
2020-07-24 20:11 ` [tip: x86/entry] " tip-bot2 for Thomas Gleixner
2020-07-22 22:00 ` Thomas Gleixner [this message]
2020-07-24 20:11 ` [tip: x86/entry] x86/entry: Consolidate 32/64 bit syscall entry tip-bot2 for Thomas Gleixner
2020-07-26 18:33 ` Brian Gerst
2020-07-27 13:38 ` Thomas Gleixner
2020-07-22 22:00 ` [patch V5 08/15] x86/entry: Move user return notifier out of loop Thomas Gleixner
2020-07-23 23:41 ` Sean Christopherson
2020-07-24 20:11 ` [tip: x86/entry] " tip-bot2 for Thomas Gleixner
2020-07-22 22:00 ` [patch V5 09/15] x86/ptrace: Provide pt_regs helper for entry/exit Thomas Gleixner
2020-07-24 20:11 ` [tip: x86/entry] " tip-bot2 for Thomas Gleixner
2020-07-22 22:00 ` [patch V5 10/15] x86/entry: Use generic syscall entry function Thomas Gleixner
2020-07-24 20:11 ` [tip: x86/entry] " tip-bot2 for Thomas Gleixner
2020-07-22 22:00 ` [patch V5 11/15] x86/entry: Use generic syscall exit functionality Thomas Gleixner
2020-07-24 20:11 ` [tip: x86/entry] " tip-bot2 for Thomas Gleixner
2020-07-22 22:00 ` [patch V5 12/15] x86/entry: Cleanup idtentry_entry/exit_user Thomas Gleixner
2020-07-24 20:11 ` [tip: x86/entry] " tip-bot2 for Thomas Gleixner
2020-07-22 22:00 ` [patch V5 13/15] x86/entry: Use generic interrupt entry/exit code Thomas Gleixner
2020-07-24 14:28 ` Ingo Molnar
2020-07-24 20:11 ` [tip: x86/entry] " tip-bot2 for Thomas Gleixner
2020-07-22 22:00 ` [patch V5 14/15] x86/entry: Cleanup idtentry_enter/exit Thomas Gleixner
2020-07-24 20:11 ` [tip: x86/entry] " tip-bot2 for Thomas Gleixner
2020-07-22 22:00 ` [patch V5 15/15] x86/kvm: Use generic xfer to guest work function Thomas Gleixner
2020-07-24 0:17 ` Sean Christopherson
2020-07-24 0:46 ` Thomas Gleixner
2020-07-24 0:55 ` Sean Christopherson
2020-07-24 14:24 ` Ingo Molnar
2020-07-24 19:08 ` Thomas Gleixner
2020-07-24 20:11 ` [tip: x86/entry] " tip-bot2 for Thomas Gleixner
2020-07-24 20:51 ` [patch V5 00/15] entry, x86, kvm: Generic entry/exit functionality for host and guest Thomas Gleixner
2020-07-29 13:39 ` Steven Price
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200722220520.051234096@linutronix.de \
--to=tglx@linutronix.de \
--cc=arnd@arndb.de \
--cc=keescook@chromium.org \
--cc=keno@juliacomputing.com \
--cc=krisman@collabora.com \
--cc=kvm@vger.kernel.org \
--cc=linux-arch@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mark.rutland@arm.com \
--cc=pbonzini@redhat.com \
--cc=sean.j.christopherson@intel.com \
--cc=will@kernel.org \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).