* [PATCH 0/2] arm64: Finish up assembler annotation modernisation
@ 2020-04-28 16:43 Mark Brown
2020-04-28 16:43 ` [PATCH 1/2] arm64: kernel: Convert to modern annotations for assembly functions Mark Brown
2020-04-28 16:43 ` [PATCH 2/2] arm64: Disable old style assembly annotations Mark Brown
0 siblings, 2 replies; 5+ messages in thread
From: Mark Brown @ 2020-04-28 16:43 UTC (permalink / raw)
To: Will Deacon, Catalin Marinas; +Cc: Mark Brown, linux-arm-kernel
This series finishes up the conversion of arm64 to use modern assembler
annotations. The second patch selects a symbol added by
2ce0d7f9766f0e49b (x86/asm: Provide a Kconfig symbol for disabling old
assembly annotations) in -next to ensure that we don't regress and start
using old annotations again.
This will be needed for BTI kernel support so it'd be good if they could
end up on that branch, either through merging a topic branch or
otherwise. The patches are generated against a merge of v5.7-rc3 and
for-next/bti in the arm64 tree.
Mark Brown (2):
arm64: kernel: Convert to modern annotations for assembly functions
arm64: Disable old style assembly annotations
arch/arm64/Kconfig | 1 +
arch/arm64/kernel/cpu-reset.S | 4 +-
arch/arm64/kernel/efi-rt-wrapper.S | 4 +-
arch/arm64/kernel/entry-fpsimd.S | 20 ++++-----
arch/arm64/kernel/entry.S | 7 +--
arch/arm64/kernel/hibernate-asm.S | 16 +++----
arch/arm64/kernel/hyp-stub.S | 20 ++++-----
arch/arm64/kernel/probes/kprobes_trampoline.S | 4 +-
arch/arm64/kernel/reloc_test_syms.S | 44 +++++++++----------
arch/arm64/kernel/relocate_kernel.S | 4 +-
arch/arm64/kernel/sleep.S | 12 ++---
arch/arm64/kernel/smccc-call.S | 8 ++--
12 files changed, 73 insertions(+), 71 deletions(-)
--
2.20.1
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH 1/2] arm64: kernel: Convert to modern annotations for assembly functions
2020-04-28 16:43 [PATCH 0/2] arm64: Finish up assembler annotation modernisation Mark Brown
@ 2020-04-28 16:43 ` Mark Brown
2020-04-30 17:18 ` Will Deacon
2020-04-28 16:43 ` [PATCH 2/2] arm64: Disable old style assembly annotations Mark Brown
1 sibling, 1 reply; 5+ messages in thread
From: Mark Brown @ 2020-04-28 16:43 UTC (permalink / raw)
To: Will Deacon, Catalin Marinas; +Cc: Mark Brown, linux-arm-kernel
In an effort to clarify and simplify the annotation of assembly functions
in the kernel new macros have been introduced. These replace ENTRY and
ENDPROC and also add a new annotation for static functions which previously
had no ENTRY equivalent. Update the annotations in the core kernel code to
the new macros.
Signed-off-by: Mark Brown <broonie@kernel.org>
---
arch/arm64/kernel/cpu-reset.S | 4 +-
arch/arm64/kernel/efi-rt-wrapper.S | 4 +-
arch/arm64/kernel/entry-fpsimd.S | 20 ++++-----
arch/arm64/kernel/entry.S | 7 +--
arch/arm64/kernel/hibernate-asm.S | 16 +++----
arch/arm64/kernel/hyp-stub.S | 20 ++++-----
arch/arm64/kernel/probes/kprobes_trampoline.S | 4 +-
arch/arm64/kernel/reloc_test_syms.S | 44 +++++++++----------
arch/arm64/kernel/relocate_kernel.S | 4 +-
arch/arm64/kernel/sleep.S | 12 ++---
arch/arm64/kernel/smccc-call.S | 8 ++--
11 files changed, 72 insertions(+), 71 deletions(-)
diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S
index 38087b4c0432..646103743a50 100644
--- a/arch/arm64/kernel/cpu-reset.S
+++ b/arch/arm64/kernel/cpu-reset.S
@@ -29,7 +29,7 @@
* branch to what would be the reset vector. It must be executed with the
* flat identity mapping.
*/
-ENTRY(__cpu_soft_restart)
+SYM_FUNC_START(__cpu_soft_restart)
/* Clear sctlr_el1 flags. */
mrs x12, sctlr_el1
mov_q x13, SCTLR_ELx_FLAGS
@@ -47,6 +47,6 @@ ENTRY(__cpu_soft_restart)
mov x1, x3 // arg1
mov x2, x4 // arg2
br x8
-ENDPROC(__cpu_soft_restart)
+SYM_FUNC_END(__cpu_soft_restart)
.popsection
diff --git a/arch/arm64/kernel/efi-rt-wrapper.S b/arch/arm64/kernel/efi-rt-wrapper.S
index 3fc71106cb2b..1192c4bb48df 100644
--- a/arch/arm64/kernel/efi-rt-wrapper.S
+++ b/arch/arm64/kernel/efi-rt-wrapper.S
@@ -5,7 +5,7 @@
#include <linux/linkage.h>
-ENTRY(__efi_rt_asm_wrapper)
+SYM_FUNC_START(__efi_rt_asm_wrapper)
stp x29, x30, [sp, #-32]!
mov x29, sp
@@ -35,4 +35,4 @@ ENTRY(__efi_rt_asm_wrapper)
b.ne 0f
ret
0: b efi_handle_corrupted_x18 // tail call
-ENDPROC(__efi_rt_asm_wrapper)
+SYM_FUNC_END(__efi_rt_asm_wrapper)
diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S
index 0f24eae8f3cc..f880dd63ddc3 100644
--- a/arch/arm64/kernel/entry-fpsimd.S
+++ b/arch/arm64/kernel/entry-fpsimd.S
@@ -16,34 +16,34 @@
*
* x0 - pointer to struct fpsimd_state
*/
-ENTRY(fpsimd_save_state)
+SYM_FUNC_START(fpsimd_save_state)
fpsimd_save x0, 8
ret
-ENDPROC(fpsimd_save_state)
+SYM_FUNC_END(fpsimd_save_state)
/*
* Load the FP registers.
*
* x0 - pointer to struct fpsimd_state
*/
-ENTRY(fpsimd_load_state)
+SYM_FUNC_START(fpsimd_load_state)
fpsimd_restore x0, 8
ret
-ENDPROC(fpsimd_load_state)
+SYM_FUNC_END(fpsimd_load_state)
#ifdef CONFIG_ARM64_SVE
-ENTRY(sve_save_state)
+SYM_FUNC_START(sve_save_state)
sve_save 0, x1, 2
ret
-ENDPROC(sve_save_state)
+SYM_FUNC_END(sve_save_state)
-ENTRY(sve_load_state)
+SYM_FUNC_START(sve_load_state)
sve_load 0, x1, x2, 3, x4
ret
-ENDPROC(sve_load_state)
+SYM_FUNC_END(sve_load_state)
-ENTRY(sve_get_vl)
+SYM_FUNC_START(sve_get_vl)
_sve_rdvl 0, 1
ret
-ENDPROC(sve_get_vl)
+SYM_FUNC_END(sve_get_vl)
#endif /* CONFIG_ARM64_SVE */
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index ddcde093c433..664a833aa619 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -730,7 +730,7 @@ SYM_CODE_END(el0_error)
/*
* Ok, we need to do extra processing, enter the slow path.
*/
-work_pending:
+SYM_CODE_START_LOCAL(work_pending)
mov x0, sp // 'regs'
bl do_notify_resume
#ifdef CONFIG_TRACE_IRQFLAGS
@@ -738,10 +738,11 @@ work_pending:
#endif
ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for single-step
b finish_ret_to_user
+SYM_CODE_END(work_pending)
/*
* "slow" syscall return path.
*/
-ret_to_user:
+SYM_CODE_START_LOCAL(ret_to_user)
disable_daif
gic_prio_kentry_setup tmp=x3
ldr x1, [tsk, #TSK_TI_FLAGS]
@@ -753,7 +754,7 @@ finish_ret_to_user:
bl stackleak_erase
#endif
kernel_exit 0
-ENDPROC(ret_to_user)
+SYM_CODE_END(ret_to_user)
.popsection // .entry.text
diff --git a/arch/arm64/kernel/hibernate-asm.S b/arch/arm64/kernel/hibernate-asm.S
index 6532105b3e32..8ccca660034e 100644
--- a/arch/arm64/kernel/hibernate-asm.S
+++ b/arch/arm64/kernel/hibernate-asm.S
@@ -65,7 +65,7 @@
* x5: physical address of a zero page that remains zero after resume
*/
.pushsection ".hibernate_exit.text", "ax"
-ENTRY(swsusp_arch_suspend_exit)
+SYM_CODE_START(swsusp_arch_suspend_exit)
/*
* We execute from ttbr0, change ttbr1 to our copied linear map tables
* with a break-before-make via the zero page
@@ -110,7 +110,7 @@ ENTRY(swsusp_arch_suspend_exit)
cbz x24, 3f /* Do we need to re-initialise EL2? */
hvc #0
3: ret
-ENDPROC(swsusp_arch_suspend_exit)
+SYM_CODE_END(swsusp_arch_suspend_exit)
/*
* Restore the hyp stub.
@@ -119,15 +119,15 @@ ENDPROC(swsusp_arch_suspend_exit)
*
* x24: The physical address of __hyp_stub_vectors
*/
-el1_sync:
+SYM_CODE_START_LOCAL(el1_sync)
msr vbar_el2, x24
eret
-ENDPROC(el1_sync)
+SYM_CODE_END(el1_sync)
.macro invalid_vector label
-\label:
+SYM_CODE_START_LOCAL(\label)
b \label
-ENDPROC(\label)
+SYM_CODE_END(\label)
.endm
invalid_vector el2_sync_invalid
@@ -141,7 +141,7 @@ ENDPROC(\label)
/* el2 vectors - switch el2 here while we restore the memory image. */
.align 11
-ENTRY(hibernate_el2_vectors)
+SYM_CODE_START(hibernate_el2_vectors)
ventry el2_sync_invalid // Synchronous EL2t
ventry el2_irq_invalid // IRQ EL2t
ventry el2_fiq_invalid // FIQ EL2t
@@ -161,6 +161,6 @@ ENTRY(hibernate_el2_vectors)
ventry el1_irq_invalid // IRQ 32-bit EL1
ventry el1_fiq_invalid // FIQ 32-bit EL1
ventry el1_error_invalid // Error 32-bit EL1
-END(hibernate_el2_vectors)
+SYM_CODE_END(hibernate_el2_vectors)
.popsection
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index e473ead806ed..160f5881a0b7 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -21,7 +21,7 @@
.align 11
-ENTRY(__hyp_stub_vectors)
+SYM_CODE_START(__hyp_stub_vectors)
ventry el2_sync_invalid // Synchronous EL2t
ventry el2_irq_invalid // IRQ EL2t
ventry el2_fiq_invalid // FIQ EL2t
@@ -41,11 +41,11 @@ ENTRY(__hyp_stub_vectors)
ventry el1_irq_invalid // IRQ 32-bit EL1
ventry el1_fiq_invalid // FIQ 32-bit EL1
ventry el1_error_invalid // Error 32-bit EL1
-ENDPROC(__hyp_stub_vectors)
+SYM_CODE_END(__hyp_stub_vectors)
.align 11
-el1_sync:
+SYM_CODE_START_LOCAL(el1_sync)
cmp x0, #HVC_SET_VECTORS
b.ne 2f
msr vbar_el2, x1
@@ -68,12 +68,12 @@ el1_sync:
9: mov x0, xzr
eret
-ENDPROC(el1_sync)
+SYM_CODE_END(el1_sync)
.macro invalid_vector label
-\label:
+SYM_CODE_START_LOCAL(\label)
b \label
-ENDPROC(\label)
+SYM_CODE_END(\label)
.endm
invalid_vector el2_sync_invalid
@@ -106,15 +106,15 @@ ENDPROC(\label)
* initialisation entry point.
*/
-ENTRY(__hyp_set_vectors)
+SYM_FUNC_START(__hyp_set_vectors)
mov x1, x0
mov x0, #HVC_SET_VECTORS
hvc #0
ret
-ENDPROC(__hyp_set_vectors)
+SYM_FUNC_END(__hyp_set_vectors)
-ENTRY(__hyp_reset_vectors)
+SYM_FUNC_START(__hyp_reset_vectors)
mov x0, #HVC_RESET_VECTORS
hvc #0
ret
-ENDPROC(__hyp_reset_vectors)
+SYM_FUNC_END(__hyp_reset_vectors)
diff --git a/arch/arm64/kernel/probes/kprobes_trampoline.S b/arch/arm64/kernel/probes/kprobes_trampoline.S
index 45dce03aaeaf..890ca72c5a51 100644
--- a/arch/arm64/kernel/probes/kprobes_trampoline.S
+++ b/arch/arm64/kernel/probes/kprobes_trampoline.S
@@ -61,7 +61,7 @@
ldp x28, x29, [sp, #S_X28]
.endm
-ENTRY(kretprobe_trampoline)
+SYM_CODE_START(kretprobe_trampoline)
sub sp, sp, #S_FRAME_SIZE
save_all_base_regs
@@ -79,4 +79,4 @@ ENTRY(kretprobe_trampoline)
add sp, sp, #S_FRAME_SIZE
ret
-ENDPROC(kretprobe_trampoline)
+SYM_CODE_END(kretprobe_trampoline)
diff --git a/arch/arm64/kernel/reloc_test_syms.S b/arch/arm64/kernel/reloc_test_syms.S
index 16a34f188f26..53e8cdfe80e1 100644
--- a/arch/arm64/kernel/reloc_test_syms.S
+++ b/arch/arm64/kernel/reloc_test_syms.S
@@ -5,81 +5,81 @@
#include <linux/linkage.h>
-ENTRY(absolute_data64)
+SYM_CODE_START(absolute_data64)
ldr x0, 0f
ret
0: .quad sym64_abs
-ENDPROC(absolute_data64)
+SYM_CODE_END(absolute_data64)
-ENTRY(absolute_data32)
+SYM_CODE_START(absolute_data32)
ldr w0, 0f
ret
0: .long sym32_abs
-ENDPROC(absolute_data32)
+SYM_CODE_END(absolute_data32)
-ENTRY(absolute_data16)
+SYM_CODE_START(absolute_data16)
adr x0, 0f
ldrh w0, [x0]
ret
0: .short sym16_abs, 0
-ENDPROC(absolute_data16)
+SYM_CODE_END(absolute_data16)
-ENTRY(signed_movw)
+SYM_CODE_START(signed_movw)
movz x0, #:abs_g2_s:sym64_abs
movk x0, #:abs_g1_nc:sym64_abs
movk x0, #:abs_g0_nc:sym64_abs
ret
-ENDPROC(signed_movw)
+SYM_CODE_END(signed_movw)
-ENTRY(unsigned_movw)
+SYM_CODE_START(unsigned_movw)
movz x0, #:abs_g3:sym64_abs
movk x0, #:abs_g2_nc:sym64_abs
movk x0, #:abs_g1_nc:sym64_abs
movk x0, #:abs_g0_nc:sym64_abs
ret
-ENDPROC(unsigned_movw)
+SYM_CODE_END(unsigned_movw)
.align 12
.space 0xff8
-ENTRY(relative_adrp)
+SYM_CODE_START(relative_adrp)
adrp x0, sym64_rel
add x0, x0, #:lo12:sym64_rel
ret
-ENDPROC(relative_adrp)
+SYM_CODE_END(relative_adrp)
.align 12
.space 0xffc
-ENTRY(relative_adrp_far)
+SYM_CODE_START(relative_adrp_far)
adrp x0, memstart_addr
add x0, x0, #:lo12:memstart_addr
ret
-ENDPROC(relative_adrp_far)
+SYM_CODE_END(relative_adrp_far)
-ENTRY(relative_adr)
+SYM_CODE_START(relative_adr)
adr x0, sym64_rel
ret
-ENDPROC(relative_adr)
+SYM_CODE_END(relative_adr)
-ENTRY(relative_data64)
+SYM_CODE_START(relative_data64)
adr x1, 0f
ldr x0, [x1]
add x0, x0, x1
ret
0: .quad sym64_rel - .
-ENDPROC(relative_data64)
+SYM_CODE_END(relative_data64)
-ENTRY(relative_data32)
+SYM_CODE_START(relative_data32)
adr x1, 0f
ldr w0, [x1]
add x0, x0, x1
ret
0: .long sym64_rel - .
-ENDPROC(relative_data32)
+SYM_CODE_END(relative_data32)
-ENTRY(relative_data16)
+SYM_CODE_START(relative_data16)
adr x1, 0f
ldrsh w0, [x1]
add x0, x0, x1
ret
0: .short sym64_rel - ., 0
-ENDPROC(relative_data16)
+SYM_CODE_END(relative_data16)
diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
index c40ce496c78b..542d6edc6806 100644
--- a/arch/arm64/kernel/relocate_kernel.S
+++ b/arch/arm64/kernel/relocate_kernel.S
@@ -26,7 +26,7 @@
* control_code_page, a special page which has been set up to be preserved
* during the copy operation.
*/
-ENTRY(arm64_relocate_new_kernel)
+SYM_CODE_START(arm64_relocate_new_kernel)
/* Setup the list loop variables. */
mov x18, x2 /* x18 = dtb address */
@@ -111,7 +111,7 @@ ENTRY(arm64_relocate_new_kernel)
mov x3, xzr
br x17
-ENDPROC(arm64_relocate_new_kernel)
+SYM_CODE_END(arm64_relocate_new_kernel)
.align 3 /* To keep the 64-bit values below naturally aligned. */
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index 7b2f2e650c44..70e5e697ebc7 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -62,7 +62,7 @@
*
* x0 = struct sleep_stack_data area
*/
-ENTRY(__cpu_suspend_enter)
+SYM_FUNC_START(__cpu_suspend_enter)
stp x29, lr, [x0, #SLEEP_STACK_DATA_CALLEE_REGS]
stp x19, x20, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+16]
stp x21, x22, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+32]
@@ -95,10 +95,10 @@ ENTRY(__cpu_suspend_enter)
ldp x29, lr, [sp], #16
mov x0, #1
ret
-ENDPROC(__cpu_suspend_enter)
+SYM_FUNC_END(__cpu_suspend_enter)
.pushsection ".idmap.text", "awx"
-ENTRY(cpu_resume)
+SYM_FUNC_START(cpu_resume)
bl el2_setup // if in EL2 drop to EL1 cleanly
mov x0, #ARM64_CPU_RUNTIME
bl __cpu_setup
@@ -107,11 +107,11 @@ ENTRY(cpu_resume)
bl __enable_mmu
ldr x8, =_cpu_resume
br x8
-ENDPROC(cpu_resume)
+SYM_FUNC_END(cpu_resume)
.ltorg
.popsection
-ENTRY(_cpu_resume)
+SYM_FUNC_START(_cpu_resume)
mrs x1, mpidr_el1
adr_l x8, mpidr_hash // x8 = struct mpidr_hash virt address
@@ -147,4 +147,4 @@ ENTRY(_cpu_resume)
ldp x29, lr, [x29]
mov x0, #0
ret
-ENDPROC(_cpu_resume)
+SYM_FUNC_END(_cpu_resume)
diff --git a/arch/arm64/kernel/smccc-call.S b/arch/arm64/kernel/smccc-call.S
index 54655273d1e0..1f93809528a4 100644
--- a/arch/arm64/kernel/smccc-call.S
+++ b/arch/arm64/kernel/smccc-call.S
@@ -30,9 +30,9 @@
* unsigned long a6, unsigned long a7, struct arm_smccc_res *res,
* struct arm_smccc_quirk *quirk)
*/
-ENTRY(__arm_smccc_smc)
+SYM_FUNC_START(__arm_smccc_smc)
SMCCC smc
-ENDPROC(__arm_smccc_smc)
+SYM_FUNC_END(__arm_smccc_smc)
EXPORT_SYMBOL(__arm_smccc_smc)
/*
@@ -41,7 +41,7 @@ EXPORT_SYMBOL(__arm_smccc_smc)
* unsigned long a6, unsigned long a7, struct arm_smccc_res *res,
* struct arm_smccc_quirk *quirk)
*/
-ENTRY(__arm_smccc_hvc)
+SYM_FUNC_START(__arm_smccc_hvc)
SMCCC hvc
-ENDPROC(__arm_smccc_hvc)
+SYM_FUNC_END(__arm_smccc_hvc)
EXPORT_SYMBOL(__arm_smccc_hvc)
--
2.20.1
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH 2/2] arm64: Disable old style assembly annotations
2020-04-28 16:43 [PATCH 0/2] arm64: Finish up assembler annotation modernisation Mark Brown
2020-04-28 16:43 ` [PATCH 1/2] arm64: kernel: Convert to modern annotations for assembly functions Mark Brown
@ 2020-04-28 16:43 ` Mark Brown
1 sibling, 0 replies; 5+ messages in thread
From: Mark Brown @ 2020-04-28 16:43 UTC (permalink / raw)
To: Will Deacon, Catalin Marinas; +Cc: Mark Brown, linux-arm-kernel
Now that we have converted arm64 over to the new style SYM_ assembler
annotations select ARCH_USE_SYM_ANNOTATIONS so the old macros aren't
available and we don't regress.
Signed-off-by: Mark Brown <broonie@kernel.org>
---
arch/arm64/Kconfig | 1 +
1 file changed, 1 insertion(+)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 43be825d0730..6f199d8146d4 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -66,6 +66,7 @@ config ARM64
select ARCH_USE_GNU_PROPERTY
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
+ select ARCH_USE_SYM_ANNOTATIONS
select ARCH_SUPPORTS_MEMORY_FAILURE
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && (GCC_VERSION >= 50000 || CC_IS_CLANG)
--
2.20.1
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [PATCH 1/2] arm64: kernel: Convert to modern annotations for assembly functions
2020-04-28 16:43 ` [PATCH 1/2] arm64: kernel: Convert to modern annotations for assembly functions Mark Brown
@ 2020-04-30 17:18 ` Will Deacon
2020-04-30 18:12 ` Mark Brown
0 siblings, 1 reply; 5+ messages in thread
From: Will Deacon @ 2020-04-30 17:18 UTC (permalink / raw)
To: Mark Brown; +Cc: Catalin Marinas, linux-arm-kernel
On Tue, Apr 28, 2020 at 05:43:30PM +0100, Mark Brown wrote:
> In an effort to clarify and simplify the annotation of assembly functions
> in the kernel new macros have been introduced. These replace ENTRY and
> ENDPROC and also add a new annotation for static functions which previously
> had no ENTRY equivalent. Update the annotations in the core kernel code to
> the new macros.
>
> Signed-off-by: Mark Brown <broonie@kernel.org>
> ---
> arch/arm64/kernel/cpu-reset.S | 4 +-
> arch/arm64/kernel/efi-rt-wrapper.S | 4 +-
> arch/arm64/kernel/entry-fpsimd.S | 20 ++++-----
> arch/arm64/kernel/entry.S | 7 +--
> arch/arm64/kernel/hibernate-asm.S | 16 +++----
> arch/arm64/kernel/hyp-stub.S | 20 ++++-----
> arch/arm64/kernel/probes/kprobes_trampoline.S | 4 +-
> arch/arm64/kernel/reloc_test_syms.S | 44 +++++++++----------
> arch/arm64/kernel/relocate_kernel.S | 4 +-
> arch/arm64/kernel/sleep.S | 12 ++---
> arch/arm64/kernel/smccc-call.S | 8 ++--
> 11 files changed, 72 insertions(+), 71 deletions(-)
>
> diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S
> index 38087b4c0432..646103743a50 100644
> --- a/arch/arm64/kernel/cpu-reset.S
> +++ b/arch/arm64/kernel/cpu-reset.S
> @@ -29,7 +29,7 @@
> * branch to what would be the reset vector. It must be executed with the
> * flat identity mapping.
> */
> -ENTRY(__cpu_soft_restart)
> +SYM_FUNC_START(__cpu_soft_restart)
> /* Clear sctlr_el1 flags. */
> mrs x12, sctlr_el1
> mov_q x13, SCTLR_ELx_FLAGS
> @@ -47,6 +47,6 @@ ENTRY(__cpu_soft_restart)
> mov x1, x3 // arg1
> mov x2, x4 // arg2
> br x8
> -ENDPROC(__cpu_soft_restart)
> +SYM_FUNC_END(__cpu_soft_restart)
Hmm, this function is probably weird enough to justify SYM_CODE_* since it
never returns and doesn't have a stack.
> diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
> index ddcde093c433..664a833aa619 100644
> --- a/arch/arm64/kernel/entry.S
> +++ b/arch/arm64/kernel/entry.S
> @@ -730,7 +730,7 @@ SYM_CODE_END(el0_error)
> /*
> * Ok, we need to do extra processing, enter the slow path.
> */
> -work_pending:
> +SYM_CODE_START_LOCAL(work_pending)
> mov x0, sp // 'regs'
> bl do_notify_resume
> #ifdef CONFIG_TRACE_IRQFLAGS
> @@ -738,10 +738,11 @@ work_pending:
> #endif
> ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for single-step
> b finish_ret_to_user
> +SYM_CODE_END(work_pending)
> /*
> * "slow" syscall return path.
> */
> -ret_to_user:
> +SYM_CODE_START_LOCAL(ret_to_user)
Would this be better off as a SYM_INNER_LABEL inside work_pending? Given
that ret_to_user and work_pending both branch into each other, separating
them doesn't feel quite right.
> diff --git a/arch/arm64/kernel/hibernate-asm.S b/arch/arm64/kernel/hibernate-asm.S
> index 6532105b3e32..8ccca660034e 100644
> --- a/arch/arm64/kernel/hibernate-asm.S
> +++ b/arch/arm64/kernel/hibernate-asm.S
[...]
> .macro invalid_vector label
> -\label:
> +SYM_CODE_START_LOCAL(\label)
> b \label
> -ENDPROC(\label)
> +SYM_CODE_END(\label)
> .endm
[...]
> diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
> index e473ead806ed..160f5881a0b7 100644
> --- a/arch/arm64/kernel/hyp-stub.S
> +++ b/arch/arm64/kernel/hyp-stub.S
[...]
> .macro invalid_vector label
> -\label:
> +SYM_CODE_START_LOCAL(\label)
> b \label
> -ENDPROC(\label)
> +SYM_CODE_END(\label)
> .endm
Huh, this is the exact same macro as the one from the hibernate code. Maybe
we should stick it in asm/asembler.h alongside ventry? Obviously a separate
patch, though.
> diff --git a/arch/arm64/kernel/probes/kprobes_trampoline.S b/arch/arm64/kernel/probes/kprobes_trampoline.S
> index 45dce03aaeaf..890ca72c5a51 100644
> --- a/arch/arm64/kernel/probes/kprobes_trampoline.S
> +++ b/arch/arm64/kernel/probes/kprobes_trampoline.S
> @@ -61,7 +61,7 @@
> ldp x28, x29, [sp, #S_X28]
> .endm
>
> -ENTRY(kretprobe_trampoline)
> +SYM_CODE_START(kretprobe_trampoline)
> sub sp, sp, #S_FRAME_SIZE
>
> save_all_base_regs
> @@ -79,4 +79,4 @@ ENTRY(kretprobe_trampoline)
> add sp, sp, #S_FRAME_SIZE
> ret
>
> -ENDPROC(kretprobe_trampoline)
> +SYM_CODE_END(kretprobe_trampoline)
> diff --git a/arch/arm64/kernel/reloc_test_syms.S b/arch/arm64/kernel/reloc_test_syms.S
> index 16a34f188f26..53e8cdfe80e1 100644
> --- a/arch/arm64/kernel/reloc_test_syms.S
> +++ b/arch/arm64/kernel/reloc_test_syms.S
> @@ -5,81 +5,81 @@
>
> #include <linux/linkage.h>
>
> -ENTRY(absolute_data64)
> +SYM_CODE_START(absolute_data64)
> ldr x0, 0f
> ret
> 0: .quad sym64_abs
> -ENDPROC(absolute_data64)
> +SYM_CODE_END(absolute_data64)
Hmm, but all the functions in here *are* actually just called from the C
code in reloc_test_core.c afaict, so they should probably be using
SYM_FUNC_*.
> diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
> index c40ce496c78b..542d6edc6806 100644
> --- a/arch/arm64/kernel/relocate_kernel.S
> +++ b/arch/arm64/kernel/relocate_kernel.S
> @@ -26,7 +26,7 @@
> * control_code_page, a special page which has been set up to be preserved
> * during the copy operation.
> */
> -ENTRY(arm64_relocate_new_kernel)
> +SYM_CODE_START(arm64_relocate_new_kernel)
>
> /* Setup the list loop variables. */
> mov x18, x2 /* x18 = dtb address */
> @@ -111,7 +111,7 @@ ENTRY(arm64_relocate_new_kernel)
> mov x3, xzr
> br x17
>
> -ENDPROC(arm64_relocate_new_kernel)
> +SYM_CODE_END(arm64_relocate_new_kernel)
>
> .align 3 /* To keep the 64-bit values below naturally aligned. */
>
> diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
> index 7b2f2e650c44..70e5e697ebc7 100644
> --- a/arch/arm64/kernel/sleep.S
> +++ b/arch/arm64/kernel/sleep.S
> @@ -62,7 +62,7 @@
> *
> * x0 = struct sleep_stack_data area
> */
> -ENTRY(__cpu_suspend_enter)
> +SYM_FUNC_START(__cpu_suspend_enter)
> stp x29, lr, [x0, #SLEEP_STACK_DATA_CALLEE_REGS]
> stp x19, x20, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+16]
> stp x21, x22, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+32]
> @@ -95,10 +95,10 @@ ENTRY(__cpu_suspend_enter)
> ldp x29, lr, [sp], #16
> mov x0, #1
> ret
> -ENDPROC(__cpu_suspend_enter)
> +SYM_FUNC_END(__cpu_suspend_enter)
>
> .pushsection ".idmap.text", "awx"
> -ENTRY(cpu_resume)
> +SYM_FUNC_START(cpu_resume)
> bl el2_setup // if in EL2 drop to EL1 cleanly
> mov x0, #ARM64_CPU_RUNTIME
> bl __cpu_setup
> @@ -107,11 +107,11 @@ ENTRY(cpu_resume)
> bl __enable_mmu
> ldr x8, =_cpu_resume
> br x8
> -ENDPROC(cpu_resume)
> +SYM_FUNC_END(cpu_resume)
SYM_CODE_* here, as this is I think this is the entry point from the resume
path?
Will
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH 1/2] arm64: kernel: Convert to modern annotations for assembly functions
2020-04-30 17:18 ` Will Deacon
@ 2020-04-30 18:12 ` Mark Brown
0 siblings, 0 replies; 5+ messages in thread
From: Mark Brown @ 2020-04-30 18:12 UTC (permalink / raw)
To: Will Deacon; +Cc: Catalin Marinas, linux-arm-kernel
[-- Attachment #1.1: Type: text/plain, Size: 2946 bytes --]
On Thu, Apr 30, 2020 at 06:18:25PM +0100, Will Deacon wrote:
> On Tue, Apr 28, 2020 at 05:43:30PM +0100, Mark Brown wrote:
> > -work_pending:
> > +SYM_CODE_START_LOCAL(work_pending)
> > mov x0, sp // 'regs'
> > bl do_notify_resume
> > #ifdef CONFIG_TRACE_IRQFLAGS
> > @@ -738,10 +738,11 @@ work_pending:
> > #endif
> > ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for single-step
> > b finish_ret_to_user
> > +SYM_CODE_END(work_pending)
> > /*
> > * "slow" syscall return path.
> > */
> > -ret_to_user:
> > +SYM_CODE_START_LOCAL(ret_to_user)
> Would this be better off as a SYM_INNER_LABEL inside work_pending? Given
> that ret_to_user and work_pending both branch into each other, separating
> them doesn't feel quite right.
I remember looking at these when doing the conversion and thinking that
nothing looked quite right due to the cross calls :/ The number of
things that branch to ret_to_user made me think it should really be it's
own thing rather than just a label in the middle of another block but
then work_pending is really a subroutine of ret_to_user that uses a
branch rather than a call so how do you annotate that?
Possibly we could move work_pending after the kernel_exit in ret_to_user
and make work_pending the SYM_INNER_LABEL, doing things the opposite way
around to what you suggest? I think that's more the intent.
> > +SYM_CODE_START_LOCAL(\label)
> > b \label
> > -ENDPROC(\label)
> > +SYM_CODE_END(\label)
> > .endm
> Huh, this is the exact same macro as the one from the hibernate code. Maybe
> we should stick it in asm/asembler.h alongside ventry? Obviously a separate
> patch, though.
I agree.
> > -ENTRY(absolute_data64)
> > +SYM_CODE_START(absolute_data64)
> > ldr x0, 0f
> > ret
> > 0: .quad sym64_abs
> > -ENDPROC(absolute_data64)
> > +SYM_CODE_END(absolute_data64)
> Hmm, but all the functions in here *are* actually just called from the C
> code in reloc_test_core.c afaict, so they should probably be using
> SYM_FUNC_*.
You're right I think - I remember thinking as I was going through that
since they were explicitly designed to test relocations it might be
important that they emit exactly the instructions that are written but
now I look again the functions are actually called rather than just
linked so we need to emit landing pads for them.
> > -ENTRY(cpu_resume)
> > +SYM_FUNC_START(cpu_resume)
> > bl el2_setup // if in EL2 drop to EL1 cleanly
> > mov x0, #ARM64_CPU_RUNTIME
> > bl __cpu_setup
> > @@ -107,11 +107,11 @@ ENTRY(cpu_resume)
> > bl __enable_mmu
> > ldr x8, =_cpu_resume
> > br x8
> > -ENDPROC(cpu_resume)
> > +SYM_FUNC_END(cpu_resume)
> SYM_CODE_* here, as this is I think this is the entry point from the resume
> path?
It has a C prototype in asm/suspend.h and swsup_arch_suspend_exit in
hibernate-asm.S runs earlier but now I look again it jumps here by
issuing a ret rather than via a call so it's definitely not a normal C
function.
[-- Attachment #1.2: signature.asc --]
[-- Type: application/pgp-signature, Size: 488 bytes --]
[-- Attachment #2: Type: text/plain, Size: 176 bytes --]
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2020-04-30 18:12 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-04-28 16:43 [PATCH 0/2] arm64: Finish up assembler annotation modernisation Mark Brown
2020-04-28 16:43 ` [PATCH 1/2] arm64: kernel: Convert to modern annotations for assembly functions Mark Brown
2020-04-30 17:18 ` Will Deacon
2020-04-30 18:12 ` Mark Brown
2020-04-28 16:43 ` [PATCH 2/2] arm64: Disable old style assembly annotations Mark Brown
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).