All of lore.kernel.org
 help / color / mirror / Atom feed
* [luto:x86/entry 5/10] arch/x86/entry/entry_32.S:930: Error: invalid operands and sections) for `|'
@ 2020-06-11 12:35 kernel test robot
  0 siblings, 0 replies; only message in thread
From: kernel test robot @ 2020-06-11 12:35 UTC (permalink / raw)
  To: kbuild-all

[-- Attachment #1: Type: text/plain, Size: 6650 bytes --]

tree:   https://git.kernel.org/pub/scm/linux/kernel/git/luto/linux.git x86/entry
head:   69982f2c73a199503541565a5533583ef07adcf7
commit: bad87f44f688bdddcb377bc4434c90d1c01e945e [5/10] x86/entry: Use the high bits of regs->cs to store the entry type
config: i386-allyesconfig (attached as .config)
compiler: gcc-9 (Debian 9.3.0-13) 9.3.0
reproduce (this is a W=1 build):
        git checkout bad87f44f688bdddcb377bc4434c90d1c01e945e
        # save the attached .config to linux build tree
        make W=1 ARCH=i386 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All errors (new ones prefixed by >>, old ones prefixed by <<):

arch/x86/entry/entry_32.S: Assembler messages:
>> arch/x86/entry/entry_32.S:930: Error: invalid operands (*ABS* and *UND* sections) for `|'

vim +930 arch/x86/entry/entry_32.S

   876	
   877	/*
   878	 * 32-bit SYSENTER entry.
   879	 *
   880	 * 32-bit system calls through the vDSO's __kernel_vsyscall enter here
   881	 * if X86_FEATURE_SEP is available.  This is the preferred system call
   882	 * entry on 32-bit systems.
   883	 *
   884	 * The SYSENTER instruction, in principle, should *only* occur in the
   885	 * vDSO.  In practice, a small number of Android devices were shipped
   886	 * with a copy of Bionic that inlined a SYSENTER instruction.  This
   887	 * never happened in any of Google's Bionic versions -- it only happened
   888	 * in a narrow range of Intel-provided versions.
   889	 *
   890	 * SYSENTER loads SS, ESP, CS, and EIP from previously programmed MSRs.
   891	 * IF and VM in RFLAGS are cleared (IOW: interrupts are off).
   892	 * SYSENTER does not save anything on the stack,
   893	 * and does not save old EIP (!!!), ESP, or EFLAGS.
   894	 *
   895	 * To avoid losing track of EFLAGS.VM (and thus potentially corrupting
   896	 * user and/or vm86 state), we explicitly disable the SYSENTER
   897	 * instruction in vm86 mode by reprogramming the MSRs.
   898	 *
   899	 * Arguments:
   900	 * eax  system call number
   901	 * ebx  arg1
   902	 * ecx  arg2
   903	 * edx  arg3
   904	 * esi  arg4
   905	 * edi  arg5
   906	 * ebp  user stack
   907	 * 0(%ebp) arg6
   908	 */
   909	SYM_FUNC_START(entry_SYSENTER_32)
   910		/*
   911		 * On entry-stack with all userspace-regs live - save and
   912		 * restore eflags and %eax to use it as scratch-reg for the cr3
   913		 * switch.
   914		 */
   915		pushfl
   916		pushl	%eax
   917		BUG_IF_WRONG_CR3 no_user_check=1
   918		SWITCH_TO_KERNEL_CR3 scratch_reg=%eax
   919		popl	%eax
   920		popfl
   921	
   922		/* Stack empty again, switch to task stack */
   923		movl	TSS_entry2task_stack(%esp), %esp
   924	
   925	.Lsysenter_past_esp:
   926		pushl	$__USER_DS		/* pt_regs->ss */
   927		pushl	%ebp			/* pt_regs->sp (stashed in bp) */
   928		pushfl				/* pt_regs->flags (except IF = 0) */
   929		orl	$X86_EFLAGS_IF, (%esp)	/* Fix IF */
 > 930		pushl	$(__USER_CS | CSH_ENTRY_SYSENTER)	/* pt_regs->cs */
   931		pushl	$0			/* pt_regs->ip = 0 (placeholder) */
   932		pushl	%eax			/* pt_regs->orig_ax */
   933		SAVE_ALL pt_regs_ax=$-ENOSYS	/* save rest, stack already switched */
   934	
   935		/*
   936		 * SYSENTER doesn't filter flags, so we need to clear NT, AC
   937		 * and TF ourselves.  To save a few cycles, we can check whether
   938		 * either was set instead of doing an unconditional popfq.
   939		 * This needs to happen before enabling interrupts so that
   940		 * we don't get preempted with NT set.
   941		 *
   942		 * If TF is set, we will single-step all the way to here -- do_debug
   943		 * will ignore all the traps.  (Yes, this is slow, but so is
   944		 * single-stepping in general.  This allows us to avoid having
   945		 * a more complicated code to handle the case where a user program
   946		 * forces us to single-step through the SYSENTER entry code.)
   947		 *
   948		 * NB.: .Lsysenter_fix_flags is a label with the code under it moved
   949		 * out-of-line as an optimization: NT is unlikely to be set in the
   950		 * majority of the cases and instead of polluting the I$ unnecessarily,
   951		 * we're keeping that code behind a branch which will predict as
   952		 * not-taken and therefore its instructions won't be fetched.
   953		 */
   954		testl	$X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp)
   955		jnz	.Lsysenter_fix_flags
   956	.Lsysenter_flags_fixed:
   957	
   958		movl	%esp, %eax
   959		call	do_fast_syscall_32
   960		/* XEN PV guests always use IRET path */
   961		ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
   962			    "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
   963	
   964		STACKLEAK_ERASE
   965	
   966		/* Opportunistic SYSEXIT */
   967	
   968		/*
   969		 * Setup entry stack - we keep the pointer in %eax and do the
   970		 * switch after almost all user-state is restored.
   971		 */
   972	
   973		/* Load entry stack pointer and allocate frame for eflags/eax */
   974		movl	PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %eax
   975		subl	$(2*4), %eax
   976	
   977		/* Copy eflags and eax to entry stack */
   978		movl	PT_EFLAGS(%esp), %edi
   979		movl	PT_EAX(%esp), %esi
   980		movl	%edi, (%eax)
   981		movl	%esi, 4(%eax)
   982	
   983		/* Restore user registers and segments */
   984		movl	PT_EIP(%esp), %edx	/* pt_regs->ip */
   985		movl	PT_OLDESP(%esp), %ecx	/* pt_regs->sp */
   986	1:	mov	PT_FS(%esp), %fs
   987		PTGS_TO_GS
   988	
   989		popl	%ebx			/* pt_regs->bx */
   990		addl	$2*4, %esp		/* skip pt_regs->cx and pt_regs->dx */
   991		popl	%esi			/* pt_regs->si */
   992		popl	%edi			/* pt_regs->di */
   993		popl	%ebp			/* pt_regs->bp */
   994	
   995		/* Switch to entry stack */
   996		movl	%eax, %esp
   997	
   998		/* Now ready to switch the cr3 */
   999		SWITCH_TO_USER_CR3 scratch_reg=%eax
  1000	
  1001		/*
  1002		 * Restore all flags except IF. (We restore IF separately because
  1003		 * STI gives a one-instruction window in which we won't be interrupted,
  1004		 * whereas POPF does not.)
  1005		 */
  1006		btrl	$X86_EFLAGS_IF_BIT, (%esp)
  1007		BUG_IF_WRONG_CR3 no_user_check=1
  1008		popfl
  1009		popl	%eax
  1010	
  1011		/*
  1012		 * Return back to the vDSO, which will pop ecx and edx.
  1013		 * Don't bother with DS and ES (they already contain __USER_DS).
  1014		 */
  1015		sti
  1016		sysexit
  1017	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all(a)lists.01.org

[-- Attachment #2: config.gz --]
[-- Type: application/gzip, Size: 72930 bytes --]

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2020-06-11 12:35 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-06-11 12:35 [luto:x86/entry 5/10] arch/x86/entry/entry_32.S:930: Error: invalid operands and sections) for `|' kernel test robot

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.