All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jan Beulich <jbeulich@suse.com>
To: "xen-devel@lists.xenproject.org" <xen-devel@lists.xenproject.org>
Cc: "Andrew Cooper" <andrew.cooper3@citrix.com>,
	"Wei Liu" <wl@xen.org>, "Roger Pau Monné" <roger.pau@citrix.com>,
	"Julien Grall" <julien@xen.org>,
	"Stefano Stabellini" <sstabellini@kernel.org>,
	"Volodymyr Babchuk" <volodymyr_babchuk@epam.com>,
	"Bertrand Marquis" <bertrand.marquis@arm.com>,
	"Bobby Eshleman" <bobbyeshleman@gmail.com>,
	"Alistair Francis" <alistair.francis@wdc.com>,
	"Connor Davis" <connojdavis@gmail.com>
Subject: [PATCH v2 1/2] x86: annotate entry points with type and size
Date: Tue, 23 May 2023 13:30:51 +0200	[thread overview]
Message-ID: <fd492a4a-11ba-b63a-daf4-99697db0db0e@suse.com> (raw)
In-Reply-To: <db10bc3d-962e-72a7-b53d-93a7ddd7f3ef@suse.com>

Recent gas versions generate minimalistic Dwarf debug info for items
annotated as functions and having their sizes specified [1]. "Borrow"
Arm's END() and (remotely) derive other annotation infrastructure from
Linux'es.

For switch_to_kernel() and restore_all_guest() so far implicit alignment
(from being first in their respective sections) is being made explicit
(as in: using FUNC() without 2nd argument). Whereas for
{,compat}create_bounce_frame() and autogen_entrypoints[] alignment is
newly arranged for.

Except for the added alignment padding (including their knock-on
effects) no change in generated code/data.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

[1] https://sourceware.org/git?p=binutils-gdb.git;a=commitdiff;h=591cc9fbbfd6d51131c0f1d4a92e7893edcc7a28
---
v2: Full rework.
---
Only two of the assembly files are being converted for now. More could
be done right here or as follow-on in separate patches.

In principle the framework should be possible to use by other
architectures as well. If we want this, the main questions are going to
be:
- What header file name? (I don't really like Linux'es linkage.h, so I'd
  prefer e.g. asm-defns.h or asm_defns.h as we already have in x86.)
- How much per-arch customization do we want to permit up front (i.e.
  without knowing how much of it is going to be needed)? Initially I'd
  expect only the default function alignment (and padding) to require
  per-arch definitions.

Note that the FB-label in autogen_stubs() cannot be converted just yet:
Such labels cannot be used with .type. We could further diverge from
Linux'es model and avoid setting STT_NOTYPE explicitly (that's the type
labels get by default anyway).

Note that we can't use ALIGN() (in place of SYM_ALIGN()) as long as we
still have ALIGN.

--- a/xen/arch/x86/include/asm/asm_defns.h
+++ b/xen/arch/x86/include/asm/asm_defns.h
@@ -81,6 +81,45 @@ register unsigned long current_stack_poi
 
 #ifdef __ASSEMBLY__
 
+#define SYM_ALIGN(algn...) .balign algn
+
+#define SYM_L_GLOBAL(name) .globl name
+#define SYM_L_WEAK(name)   .weak name
+#define SYM_L_LOCAL(name)  /* nothing */
+
+#define SYM_T_FUNC         STT_FUNC
+#define SYM_T_DATA         STT_OBJECT
+#define SYM_T_NONE         STT_NOTYPE
+
+#define SYM(name, typ, linkage, algn...)          \
+        .type name, SYM_T_ ## typ;                \
+        SYM_L_ ## linkage(name);                  \
+        SYM_ALIGN(algn);                          \
+        name:
+
+#define END(name) .size name, . - name
+
+#define ARG1_(x, y...) (x)
+#define ARG2_(x, y...) ARG1_(y)
+
+#define LAST__(nr) ARG ## nr ## _
+#define LAST_(nr)  LAST__(nr)
+#define LAST(x, y...) LAST_(count_args(x, ## y))(x, ## y)
+
+#define FUNC(name, algn...) \
+        SYM(name, FUNC, GLOBAL, LAST(16, ## algn), 0x90)
+#define LABEL(name, algn...) \
+        SYM(name, NONE, GLOBAL, LAST(16, ## algn), 0x90)
+#define DATA(name, algn...) \
+        SYM(name, DATA, GLOBAL, LAST(0, ## algn), 0xff)
+
+#define FUNC_LOCAL(name, algn...) \
+        SYM(name, FUNC, LOCAL, LAST(16, ## algn), 0x90)
+#define LABEL_LOCAL(name, algn...) \
+        SYM(name, NONE, LOCAL, LAST(16, ## algn), 0x90)
+#define DATA_LOCAL(name, algn...) \
+        SYM(name, DATA, LOCAL, LAST(0, ## algn), 0xff)
+
 #ifdef HAVE_AS_QUOTED_SYM
 #define SUBSECTION_LBL(tag)                        \
         .ifndef .L.tag;                            \
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -8,10 +8,11 @@
 #include <asm/page.h>
 #include <asm/processor.h>
 #include <asm/desc.h>
+#include <xen/lib.h>
 #include <public/xen.h>
 #include <irq_vectors.h>
 
-ENTRY(entry_int82)
+FUNC(entry_int82)
         ENDBR64
         ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP
         pushq $0
@@ -27,9 +28,10 @@ ENTRY(entry_int82)
 
         mov   %rsp, %rdi
         call  do_entry_int82
+END(entry_int82)
 
 /* %rbx: struct vcpu */
-ENTRY(compat_test_all_events)
+FUNC(compat_test_all_events)
         ASSERT_NOT_IN_ATOMIC
         cli                             # tests must not race interrupts
 /*compat_test_softirqs:*/
@@ -66,24 +68,21 @@ compat_test_guest_events:
         call  compat_create_bounce_frame
         jmp   compat_test_all_events
 
-        ALIGN
 /* %rbx: struct vcpu */
-compat_process_softirqs:
+LABEL_LOCAL(compat_process_softirqs)
         sti
         call  do_softirq
         jmp   compat_test_all_events
 
-        ALIGN
 /* %rbx: struct vcpu, %rdx: struct trap_bounce */
-.Lcompat_process_trapbounce:
+LABEL_LOCAL(.Lcompat_process_trapbounce)
         sti
 .Lcompat_bounce_exception:
         call  compat_create_bounce_frame
         jmp   compat_test_all_events
 
-	ALIGN
 /* %rbx: struct vcpu */
-compat_process_mce:
+LABEL_LOCAL(compat_process_mce)
         testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%rbx)
         jnz   .Lcompat_test_guest_nmi
         sti
@@ -97,9 +96,8 @@ compat_process_mce:
         movb %dl,VCPU_async_exception_mask(%rbx)
         jmp   compat_process_trap
 
-	ALIGN
 /* %rbx: struct vcpu */
-compat_process_nmi:
+LABEL_LOCAL(compat_process_nmi)
         testb $1 << VCPU_TRAP_NMI,VCPU_async_exception_mask(%rbx)
         jnz   compat_test_guest_events
         sti
@@ -116,9 +114,10 @@ compat_process_trap:
         leaq  VCPU_trap_bounce(%rbx),%rdx
         call  compat_create_bounce_frame
         jmp   compat_test_all_events
+END(compat_test_all_events)
 
 /* %rbx: struct vcpu, interrupts disabled */
-ENTRY(compat_restore_all_guest)
+FUNC(compat_restore_all_guest)
         ASSERT_INTERRUPTS_DISABLED
         mov   $~(X86_EFLAGS_IOPL | X86_EFLAGS_VM), %r11d
         and   UREGS_eflags(%rsp),%r11d
@@ -161,9 +160,10 @@ ENTRY(compat_restore_all_guest)
         RESTORE_ALL adj=8 compat=1
 .Lft0:  iretq
         _ASM_PRE_EXTABLE(.Lft0, handle_exception)
+END(compat_restore_all_guest)
 
 /* This mustn't modify registers other than %rax. */
-ENTRY(cr4_pv32_restore)
+FUNC(cr4_pv32_restore)
         push  %rdx
         GET_CPUINFO_FIELD(cr4, dx)
         mov   (%rdx), %rax
@@ -193,8 +193,9 @@ ENTRY(cr4_pv32_restore)
         pop   %rdx
         xor   %eax, %eax
         ret
+END(cr4_pv32_restore)
 
-ENTRY(compat_syscall)
+FUNC(compat_syscall)
         /* Fix up reported %cs/%ss for compat domains. */
         movl  $FLAT_COMPAT_USER_SS, UREGS_ss(%rsp)
         movl  $FLAT_COMPAT_USER_CS, UREGS_cs(%rsp)
@@ -222,8 +223,9 @@ UNLIKELY_END(compat_syscall_gpf)
         movw  %si,TRAPBOUNCE_cs(%rdx)
         movb  %cl,TRAPBOUNCE_flags(%rdx)
         jmp   .Lcompat_bounce_exception
+END(compat_syscall)
 
-ENTRY(compat_sysenter)
+FUNC(compat_sysenter)
         CR4_PV32_RESTORE
         movq  VCPU_trap_ctxt(%rbx),%rcx
         cmpb  $X86_EXC_GP, UREGS_entry_vector(%rsp)
@@ -236,17 +238,19 @@ ENTRY(compat_sysenter)
         movw  %ax,TRAPBOUNCE_cs(%rdx)
         call  compat_create_bounce_frame
         jmp   compat_test_all_events
+END(compat_sysenter)
 
-ENTRY(compat_int80_direct_trap)
+FUNC(compat_int80_direct_trap)
         CR4_PV32_RESTORE
         call  compat_create_bounce_frame
         jmp   compat_test_all_events
+END(compat_int80_direct_trap)
 
 /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK:            */
 /*   {[ERRCODE,] EIP, CS, EFLAGS, [ESP, SS]}                             */
 /* %rdx: trap_bounce, %rbx: struct vcpu                                  */
 /* On return only %rbx and %rdx are guaranteed non-clobbered.            */
-compat_create_bounce_frame:
+FUNC_LOCAL(compat_create_bounce_frame)
         ASSERT_INTERRUPTS_ENABLED
         mov   %fs,%edi
         ALTERNATIVE "", stac, X86_FEATURE_XEN_SMAP
@@ -352,3 +356,4 @@ compat_crash_page_fault:
         jmp   .Lft14
 .previous
         _ASM_EXTABLE(.Lft14, .Lfx14)
+END(compat_create_bounce_frame)
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -9,6 +9,7 @@
 #include <asm/asm_defns.h>
 #include <asm/page.h>
 #include <asm/processor.h>
+#include <xen/lib.h>
 #include <public/xen.h>
 #include <irq_vectors.h>
 
@@ -24,7 +25,7 @@
 
 #ifdef CONFIG_PV
 /* %rbx: struct vcpu */
-switch_to_kernel:
+FUNC_LOCAL(switch_to_kernel)
         leaq  VCPU_trap_bounce(%rbx),%rdx
 
         /* TB_eip = 32-bit syscall ? syscall32_addr : syscall_addr */
@@ -89,24 +90,21 @@ test_guest_events:
         call  create_bounce_frame
         jmp   test_all_events
 
-        ALIGN
 /* %rbx: struct vcpu */
-process_softirqs:
+LABEL_LOCAL(process_softirqs)
         sti
         call do_softirq
         jmp  test_all_events
 
-        ALIGN
 /* %rbx: struct vcpu, %rdx struct trap_bounce */
-.Lprocess_trapbounce:
+LABEL_LOCAL(.Lprocess_trapbounce)
         sti
 .Lbounce_exception:
         call  create_bounce_frame
         jmp   test_all_events
 
-        ALIGN
 /* %rbx: struct vcpu */
-process_mce:
+LABEL_LOCAL(process_mce)
         testb $1 << VCPU_TRAP_MCE, VCPU_async_exception_mask(%rbx)
         jnz  .Ltest_guest_nmi
         sti
@@ -120,9 +118,8 @@ process_mce:
         movb %dl, VCPU_async_exception_mask(%rbx)
         jmp  process_trap
 
-        ALIGN
 /* %rbx: struct vcpu */
-process_nmi:
+LABEL_LOCAL(process_nmi)
         testb $1 << VCPU_TRAP_NMI, VCPU_async_exception_mask(%rbx)
         jnz  test_guest_events
         sti
@@ -139,11 +136,12 @@ process_trap:
         leaq VCPU_trap_bounce(%rbx), %rdx
         call create_bounce_frame
         jmp  test_all_events
+END(switch_to_kernel)
 
         .section .text.entry, "ax", @progbits
 
 /* %rbx: struct vcpu, interrupts disabled */
-restore_all_guest:
+FUNC_LOCAL(restore_all_guest)
         ASSERT_INTERRUPTS_DISABLED
 
         /* Stash guest SPEC_CTRL value while we can read struct vcpu. */
@@ -220,8 +218,7 @@ restore_all_guest:
         sysretq
 1:      sysretl
 
-        ALIGN
-.Lrestore_rcx_iret_exit_to_guest:
+LABEL_LOCAL(.Lrestore_rcx_iret_exit_to_guest)
         movq  8(%rsp), %rcx           # RIP
 /* No special register assumptions. */
 iret_exit_to_guest:
@@ -230,6 +227,7 @@ iret_exit_to_guest:
         addq  $8,%rsp
 .Lft0:  iretq
         _ASM_PRE_EXTABLE(.Lft0, handle_exception)
+END(restore_all_guest)
 
 /*
  * When entering SYSCALL from kernel mode:
@@ -246,7 +244,7 @@ iret_exit_to_guest:
  *  - Guest %rsp stored in %rax
  *  - Xen stack loaded, pointing at the %ss slot
  */
-ENTRY(lstar_enter)
+FUNC(lstar_enter)
 #ifdef CONFIG_XEN_SHSTK
         ALTERNATIVE "", "setssbsy", X86_FEATURE_XEN_SHSTK
 #endif
@@ -281,9 +279,10 @@ ENTRY(lstar_enter)
         mov   %rsp, %rdi
         call  pv_hypercall
         jmp   test_all_events
+END(lstar_enter)
 
 /* See lstar_enter for entry register state. */
-ENTRY(cstar_enter)
+FUNC(cstar_enter)
 #ifdef CONFIG_XEN_SHSTK
         ALTERNATIVE "", "setssbsy", X86_FEATURE_XEN_SHSTK
 #endif
@@ -321,8 +320,9 @@ ENTRY(cstar_enter)
         jne   compat_syscall
 #endif
         jmp   switch_to_kernel
+END(cstar_enter)
 
-ENTRY(sysenter_entry)
+FUNC(sysenter_entry)
         ENDBR64
 #ifdef CONFIG_XEN_SHSTK
         ALTERNATIVE "", "setssbsy", X86_FEATURE_XEN_SHSTK
@@ -331,7 +331,7 @@ ENTRY(sysenter_entry)
         pushq $FLAT_USER_SS
         pushq $0
         pushfq
-GLOBAL(sysenter_eflags_saved)
+LABEL(sysenter_eflags_saved, 0)
         ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP
         pushq $3 /* ring 3 null cs */
         pushq $0 /* null rip */
@@ -385,8 +385,9 @@ UNLIKELY_END(sysenter_gpf)
         jne   compat_sysenter
 #endif
         jmp   .Lbounce_exception
+END(sysenter_entry)
 
-ENTRY(int80_direct_trap)
+FUNC(int80_direct_trap)
         ENDBR64
         ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP
         pushq $0
@@ -474,6 +475,7 @@ int80_slow_path:
          */
         GET_STACK_END(14)
         jmp   handle_exception_saved
+END(int80_direct_trap)
 
         /* create_bounce_frame & helpers don't need to be in .text.entry */
         .text
@@ -482,7 +484,7 @@ int80_slow_path:
 /*   { RCX, R11, [ERRCODE,] RIP, CS, RFLAGS, RSP, SS }                   */
 /* %rdx: trap_bounce, %rbx: struct vcpu                                  */
 /* On return only %rbx and %rdx are guaranteed non-clobbered.            */
-create_bounce_frame:
+FUNC_LOCAL(create_bounce_frame)
         ASSERT_INTERRUPTS_ENABLED
         testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
         jnz   1f
@@ -618,6 +620,7 @@ ENTRY(dom_crash_sync_extable)
         xorl  %edi,%edi
         jmp   asm_domain_crash_synchronous /* Does not return */
         .popsection
+END(create_bounce_frame)
 #endif /* CONFIG_PV */
 
 /* --- CODE BELOW THIS LINE (MOSTLY) NOT GUEST RELATED --- */
@@ -626,7 +629,7 @@ ENTRY(dom_crash_sync_extable)
 
 /* No special register assumptions. */
 #ifdef CONFIG_PV
-ENTRY(continue_pv_domain)
+FUNC(continue_pv_domain)
         ENDBR64
         call  check_wakeup_from_wait
 ret_from_intr:
@@ -641,26 +644,28 @@ ret_from_intr:
 #else
         jmp   test_all_events
 #endif
+END(continue_pv_domain)
 #else
-ret_from_intr:
+FUNC(ret_from_intr, 0)
         ASSERT_CONTEXT_IS_XEN
         jmp   restore_all_xen
+END(ret_from_intr)
 #endif
 
         .section .init.text, "ax", @progbits
-ENTRY(early_page_fault)
+FUNC(early_page_fault)
         ENDBR64
         movl  $X86_EXC_PF, 4(%rsp)
         SAVE_ALL
         movq  %rsp, %rdi
         call  do_early_page_fault
         jmp   restore_all_xen
+END(early_page_fault)
 
         .section .text.entry, "ax", @progbits
 
-        ALIGN
 /* No special register assumptions. */
-restore_all_xen:
+FUNC_LOCAL(restore_all_xen)
         /*
          * Check whether we need to switch to the per-CPU page tables, in
          * case we return to late PV exit code (from an NMI or #MC).
@@ -677,8 +682,9 @@ UNLIKELY_END(exit_cr3)
 
         RESTORE_ALL adj=8
         iretq
+END(restore_all_xen)
 
-ENTRY(common_interrupt)
+FUNC(common_interrupt)
         ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP
         SAVE_ALL
 
@@ -707,12 +713,14 @@ ENTRY(common_interrupt)
         mov   %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
         mov   %bl, STACK_CPUINFO_FIELD(use_pv_cr3)(%r14)
         jmp ret_from_intr
+END(common_interrupt)
 
-ENTRY(page_fault)
+FUNC(page_fault)
         ENDBR64
         movl  $X86_EXC_PF, 4(%rsp)
+END(page_fault)
 /* No special register assumptions. */
-GLOBAL(handle_exception)
+FUNC(handle_exception, 0)
         ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP
         SAVE_ALL
 
@@ -882,92 +890,108 @@ FATAL_exception_with_ints_disabled:
         movq  %rsp,%rdi
         call  fatal_trap
         BUG   /* fatal_trap() shouldn't return. */
+END(handle_exception)
 
-ENTRY(divide_error)
+FUNC(divide_error)
         ENDBR64
         pushq $0
         movl  $X86_EXC_DE, 4(%rsp)
         jmp   handle_exception
+END(divide_error)
 
-ENTRY(coprocessor_error)
+FUNC(coprocessor_error)
         ENDBR64
         pushq $0
         movl  $X86_EXC_MF, 4(%rsp)
         jmp   handle_exception
+END(coprocessor_error)
 
-ENTRY(simd_coprocessor_error)
+FUNC(simd_coprocessor_error)
         ENDBR64
         pushq $0
         movl  $X86_EXC_XM, 4(%rsp)
         jmp   handle_exception
+END(coprocessor_error)
 
-ENTRY(device_not_available)
+FUNC(device_not_available)
         ENDBR64
         pushq $0
         movl  $X86_EXC_NM, 4(%rsp)
         jmp   handle_exception
+END(device_not_available)
 
-ENTRY(debug)
+FUNC(debug)
         ENDBR64
         pushq $0
         movl  $X86_EXC_DB, 4(%rsp)
         jmp   handle_ist_exception
+END(debug)
 
-ENTRY(int3)
+FUNC(int3)
         ENDBR64
         pushq $0
         movl  $X86_EXC_BP, 4(%rsp)
         jmp   handle_exception
+END(int3)
 
-ENTRY(overflow)
+FUNC(overflow)
         ENDBR64
         pushq $0
         movl  $X86_EXC_OF, 4(%rsp)
         jmp   handle_exception
+END(overflow)
 
-ENTRY(bounds)
+FUNC(bounds)
         ENDBR64
         pushq $0
         movl  $X86_EXC_BR, 4(%rsp)
         jmp   handle_exception
+END(bounds)
 
-ENTRY(invalid_op)
+FUNC(invalid_op)
         ENDBR64
         pushq $0
         movl  $X86_EXC_UD, 4(%rsp)
         jmp   handle_exception
+END(invalid_op)
 
-ENTRY(invalid_TSS)
+FUNC(invalid_TSS)
         ENDBR64
         movl  $X86_EXC_TS, 4(%rsp)
         jmp   handle_exception
+END(invalid_TSS)
 
-ENTRY(segment_not_present)
+FUNC(segment_not_present)
         ENDBR64
         movl  $X86_EXC_NP, 4(%rsp)
         jmp   handle_exception
+END(segment_not_present)
 
-ENTRY(stack_segment)
+FUNC(stack_segment)
         ENDBR64
         movl  $X86_EXC_SS, 4(%rsp)
         jmp   handle_exception
+END(stack_segment)
 
-ENTRY(general_protection)
+FUNC(general_protection)
         ENDBR64
         movl  $X86_EXC_GP, 4(%rsp)
         jmp   handle_exception
+END(general_protection)
 
-ENTRY(alignment_check)
+FUNC(alignment_check)
         ENDBR64
         movl  $X86_EXC_AC, 4(%rsp)
         jmp   handle_exception
+END(alignment_check)
 
-ENTRY(entry_CP)
+FUNC(entry_CP)
         ENDBR64
         movl  $X86_EXC_CP, 4(%rsp)
         jmp   handle_exception
+END(entry_CP)
 
-ENTRY(double_fault)
+FUNC(double_fault)
         ENDBR64
         movl  $X86_EXC_DF, 4(%rsp)
         /* Set AC to reduce chance of further SMAP faults */
@@ -991,8 +1015,9 @@ ENTRY(double_fault)
         movq  %rsp,%rdi
         call  do_double_fault
         BUG   /* do_double_fault() shouldn't return. */
+END(double_fault)
 
-ENTRY(nmi)
+FUNC(nmi)
         ENDBR64
         pushq $0
         movl  $X86_EXC_NMI, 4(%rsp)
@@ -1120,21 +1145,24 @@ handle_ist_exception:
         ASSERT_CONTEXT_IS_XEN
         jmp   restore_all_xen
 #endif
+END(nmi)
 
-ENTRY(machine_check)
+FUNC(machine_check)
         ENDBR64
         pushq $0
         movl  $X86_EXC_MC, 4(%rsp)
         jmp   handle_ist_exception
+END(machine_check)
 
 /* No op trap handler.  Required for kexec crash path. */
-GLOBAL(trap_nop)
+FUNC(trap_nop, 0)
         ENDBR64
         iretq
+END(trap_nop)
 
 /* Table of automatically generated entry points.  One per vector. */
         .pushsection .init.rodata, "a", @progbits
-GLOBAL(autogen_entrypoints)
+DATA(autogen_entrypoints, 8)
         /* pop into the .init.rodata section and record an entry point. */
         .macro entrypoint ent
         .pushsection .init.rodata, "a", @progbits
@@ -1143,7 +1171,7 @@ GLOBAL(autogen_entrypoints)
         .endm
 
         .popsection
-autogen_stubs: /* Automatically generated stubs. */
+FUNC_LOCAL(autogen_stubs, 0) /* Automatically generated stubs. */
 
         vec = 0
         .rept X86_NR_VECTORS
@@ -1187,6 +1215,7 @@ autogen_stubs: /* Automatically generate
 
         vec = vec + 1
         .endr
+END(autogen_stubs)
 
         .section .init.rodata, "a", @progbits
-        .size autogen_entrypoints, . - autogen_entrypoints
+END(autogen_entrypoints)



  reply	other threads:[~2023-05-23 11:31 UTC|newest]

Thread overview: 100+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-04-12 10:25 [PATCH 0/2] x86: aid debug info generation in assembly files Jan Beulich
2022-04-12 10:27 ` [PATCH 1/2] x86: improve .debug_line contents for assembly sources Jan Beulich
2022-04-14 12:40   ` Roger Pau Monné
2022-04-14 12:52     ` Jan Beulich
2022-04-14 13:31       ` Roger Pau Monné
2022-04-14 13:36         ` Roger Pau Monné
2022-04-14 14:15         ` Jan Beulich
2022-04-14 16:02           ` Roger Pau Monné
2022-04-14 16:34             ` Jan Beulich
2022-04-26  9:26             ` Jan Beulich
2022-04-12 10:28 ` [PATCH 2/2] x86: annotate entry points with type and size Jan Beulich
2022-04-14 12:49   ` Andrew Cooper
2022-04-14 12:59     ` Jan Beulich
2022-06-23 11:47       ` Jan Beulich
2023-05-23 11:30 ` [PATCH v2 0/2] " Jan Beulich
2023-05-23 11:30   ` Jan Beulich [this message]
2023-05-29 13:34     ` [PATCH v2 1/2] " Roger Pau Monné
2023-05-30  8:06       ` Jan Beulich
2023-05-30 13:21         ` Roger Pau Monné
2023-05-30 14:23           ` Jan Beulich
2023-05-30 15:15             ` Roger Pau Monné
2023-05-23 11:31   ` [PATCH v2 2/2] x86: also mark assembler globals hidden Jan Beulich
2023-05-29 13:38     ` Roger Pau Monné
2023-07-10  8:50 ` [PATCH v3 0/8] annotate entry points with type and size Jan Beulich
2023-07-10  8:51   ` [PATCH v3 1/8] common: move a few macros out of xen/lib.h Jan Beulich
2023-07-18 15:40     ` Oleksii
2023-07-18 19:49     ` Shawn Anastasio
2023-07-19  6:28       ` Jan Beulich
2023-07-10  8:52   ` [PATCH v3 2/8] common: assembly entry point type/size annotations Jan Beulich
2023-07-10  9:28     ` Jan Beulich
2023-07-10  8:53   ` [PATCH v3 3/8] x86: annotate entry points with type and size Jan Beulich
2023-07-10  8:54   ` [PATCH v3 4/8] x86: also mark assembler globals hidden Jan Beulich
2023-07-10  8:55   ` [PATCH v3 5/8] Arm: annotate entry points with type and size Jan Beulich
2023-07-10  8:56   ` [PATCH v3 6/8] RISC-V: " Jan Beulich
2023-07-10  8:58     ` Jan Beulich
2023-07-26 15:28       ` Oleksii
2023-07-26 15:43         ` Jan Beulich
2023-07-26 16:55           ` Oleksii
2023-07-10  8:56   ` [PATCH v3 7/8] PPC: switch entry point annotations to common model Jan Beulich
2023-07-10  8:57   ` [PATCH v3 8/8] tools/binfile: switch to common annotations model Jan Beulich
2023-07-17 14:18   ` [PATCH v3 9/8] common: honor CONFIG_CC_SPLIT_SECTIONS also for assembly functions Jan Beulich
2023-07-18 12:28     ` Jan Beulich
2023-08-04  6:24 ` [PATCH v4 0/8] annotate entry points with type and size Jan Beulich
2023-08-04  6:26   ` [PATCH v4 1/8] common: assembly entry point type/size annotations Jan Beulich
2023-09-14 21:06     ` Julien Grall
2023-09-18 10:24       ` Jan Beulich
2023-09-18 10:34         ` Julien Grall
2023-09-18 10:51           ` Jan Beulich
2023-08-04  6:26   ` [PATCH v4 2/8] x86: annotate entry points with type and size Jan Beulich
2023-08-04  6:27   ` [PATCH v4 3/8] x86: also mark assembler globals hidden Jan Beulich
2023-08-04  6:28   ` [PATCH v4 4/8] Arm: annotate entry points with type and size Jan Beulich
2023-09-14 21:25     ` Julien Grall
2023-09-15  7:00       ` Jan Beulich
2023-08-04  6:29   ` [PATCH v4 5/8] RISC-V: " Jan Beulich
2023-08-04  6:30   ` [PATCH v4 5/8] PPC: switch entry point annotations to common model Jan Beulich
2023-08-04  6:30   ` [PATCH v4 6/8] tools/binfile: switch to common annotations model Jan Beulich
2023-09-14 21:30     ` Julien Grall
2023-08-04  6:31   ` [PATCH v4 8/8] common: honor CONFIG_CC_SPLIT_SECTIONS also for assembly functions Jan Beulich
2023-08-04  6:32   ` [PATCH v4 0/8] annotate entry points with type and size Jan Beulich
2024-01-15 14:30 ` [PATCH v5 " Jan Beulich
2024-01-15 14:34   ` [PATCH v5 1/8] common: assembly entry point type/size annotations Jan Beulich
2024-01-17 17:02     ` Roger Pau Monné
2024-01-18 15:48       ` Jan Beulich
2024-01-18 14:52     ` Roger Pau Monné
2024-01-18 16:00       ` Jan Beulich
2024-01-15 14:34   ` [PATCH v5 2/8] x86: annotate entry points with type and size Jan Beulich
2024-01-18 17:45     ` Roger Pau Monné
2024-01-19  8:06       ` Jan Beulich
2024-01-19  9:48     ` Roger Pau Monné
2024-01-15 14:35   ` [PATCH v5 3/8] x86: also mark assembler globals hidden Jan Beulich
2024-01-15 14:36   ` [PATCH v5 4/8] Arm: annotate entry points with type and size Jan Beulich
2024-01-22 13:22     ` Jan Beulich
2024-03-15 19:09       ` Julien Grall
2024-01-15 14:37   ` [PATCH v5 5/8] RISC-V: " Jan Beulich
2024-01-16 12:15     ` Oleksii
2024-01-15 14:38   ` [PATCH v5 6/8] PPC: switch entry point annotations to common model Jan Beulich
2024-01-22 13:20     ` Ping: " Jan Beulich
2024-01-23  3:00       ` Shawn Anastasio
2024-01-15 14:39   ` [PATCH v5 7/8] tools/binfile: switch to common annotations model Jan Beulich
2024-01-15 14:40   ` [PATCH v5 8/8] common: honor CONFIG_CC_SPLIT_SECTIONS also for assembly functions Jan Beulich
2024-01-19 10:36     ` Roger Pau Monné
2024-01-22 10:50       ` Jan Beulich
2024-01-22 17:40         ` Roger Pau Monné
2024-02-07 13:34 ` [PATCH v6 7/7] (mostly) x86: add/convert entry point annotations Jan Beulich
2024-02-07 13:35   ` [PATCH v6 0/7] " Jan Beulich
2024-02-07 13:36   ` [PATCH v6 1/7] common: honor CONFIG_CC_SPLIT_SECTIONS also for assembly functions Jan Beulich
2024-02-07 13:37   ` [PATCH v6 2/7] SVM: convert entry point annotations Jan Beulich
2024-02-07 13:48     ` Andrew Cooper
2024-02-07 13:37   ` [PATCH v6 3/7] VMX: " Jan Beulich
2024-02-07 13:55     ` Andrew Cooper
2024-02-07 14:25       ` Jan Beulich
2024-02-08 16:20         ` Jan Beulich
2024-02-07 13:37   ` [PATCH v6 4/7] x86/ACPI: annotate assembly functions with type and size Jan Beulich
2024-02-07 14:00     ` Andrew Cooper
2024-02-07 13:38   ` [PATCH v6 5/7] x86/kexec: convert entry point annotations Jan Beulich
2024-02-07 14:05     ` Andrew Cooper
2024-02-07 13:38   ` [PATCH v6 6/7] x86: convert misc assembly function annotations Jan Beulich
2024-02-07 14:11     ` Andrew Cooper
2024-02-07 13:39   ` [PATCH v6 7/7] x86: move ENTRY(), GLOBAL(), and ALIGN Jan Beulich
2024-02-07 14:27     ` Andrew Cooper

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=fd492a4a-11ba-b63a-daf4-99697db0db0e@suse.com \
    --to=jbeulich@suse.com \
    --cc=alistair.francis@wdc.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=bertrand.marquis@arm.com \
    --cc=bobbyeshleman@gmail.com \
    --cc=connojdavis@gmail.com \
    --cc=julien@xen.org \
    --cc=roger.pau@citrix.com \
    --cc=sstabellini@kernel.org \
    --cc=volodymyr_babchuk@epam.com \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.