From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S967994AbeE2WRN (ORCPT ); Tue, 29 May 2018 18:17:13 -0400 Received: from mail-pl0-f68.google.com ([209.85.160.68]:39346 "EHLO mail-pl0-f68.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S967914AbeE2WRC (ORCPT ); Tue, 29 May 2018 18:17:02 -0400 X-Google-Smtp-Source: ADUXVKLhSbAJvU/RWf+PLsjo3nJnhd7JZD+BQ7LBCGNyN/4ghfMU1MN3mzsiV06/KBz/jqOriMLdew== From: Thomas Garnier To: kernel-hardening@lists.openwall.com Cc: Thomas Garnier , "Rafael J. Wysocki" , Len Brown , Pavel Machek , Thomas Gleixner , Ingo Molnar , "H. Peter Anvin" , x86@kernel.org, linux-pm@vger.kernel.org, linux-kernel@vger.kernel.org Subject: [PATCH v4 09/27] x86/acpi: Adapt assembly for PIE support Date: Tue, 29 May 2018 15:15:10 -0700 Message-Id: <20180529221625.33541-10-thgarnie@google.com> X-Mailer: git-send-email 2.17.0.921.gf22659ad46-goog In-Reply-To: <20180529221625.33541-1-thgarnie@google.com> References: <20180529221625.33541-1-thgarnie@google.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Change the assembly code to use only relative references of symbols for the kernel to be PIE compatible. Position Independent Executable (PIE) support will allow to extend the KASLR randomization range 0xffffffff80000000. Signed-off-by: Thomas Garnier Acked-by: Pavel Machek --- arch/x86/kernel/acpi/wakeup_64.S | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S index 50b8ed0317a3..472659c0f811 100644 --- a/arch/x86/kernel/acpi/wakeup_64.S +++ b/arch/x86/kernel/acpi/wakeup_64.S @@ -14,7 +14,7 @@ * Hooray, we are in Long 64-bit mode (but still running in low memory) */ ENTRY(wakeup_long64) - movq saved_magic, %rax + movq saved_magic(%rip), %rax movq $0x123456789abcdef0, %rdx cmpq %rdx, %rax jne bogus_64_magic @@ -25,14 +25,14 @@ ENTRY(wakeup_long64) movw %ax, %es movw %ax, %fs movw %ax, %gs - movq saved_rsp, %rsp + movq saved_rsp(%rip), %rsp - movq saved_rbx, %rbx - movq saved_rdi, %rdi - movq saved_rsi, %rsi - movq saved_rbp, %rbp + movq saved_rbx(%rip), %rbx + movq saved_rdi(%rip), %rdi + movq saved_rsi(%rip), %rsi + movq saved_rbp(%rip), %rbp - movq saved_rip, %rax + movq saved_rip(%rip), %rax jmp *%rax ENDPROC(wakeup_long64) @@ -45,7 +45,7 @@ ENTRY(do_suspend_lowlevel) xorl %eax, %eax call save_processor_state - movq $saved_context, %rax + leaq saved_context(%rip), %rax movq %rsp, pt_regs_sp(%rax) movq %rbp, pt_regs_bp(%rax) movq %rsi, pt_regs_si(%rax) @@ -64,13 +64,14 @@ ENTRY(do_suspend_lowlevel) pushfq popq pt_regs_flags(%rax) - movq $.Lresume_point, saved_rip(%rip) + leaq .Lresume_point(%rip), %rax + movq %rax, saved_rip(%rip) - movq %rsp, saved_rsp - movq %rbp, saved_rbp - movq %rbx, saved_rbx - movq %rdi, saved_rdi - movq %rsi, saved_rsi + movq %rsp, saved_rsp(%rip) + movq %rbp, saved_rbp(%rip) + movq %rbx, saved_rbx(%rip) + movq %rdi, saved_rdi(%rip) + movq %rsi, saved_rsi(%rip) addq $8, %rsp movl $3, %edi @@ -82,7 +83,7 @@ ENTRY(do_suspend_lowlevel) .align 4 .Lresume_point: /* We don't restore %rax, it must be 0 anyway */ - movq $saved_context, %rax + leaq saved_context(%rip), %rax movq saved_context_cr4(%rax), %rbx movq %rbx, %cr4 movq saved_context_cr3(%rax), %rbx -- 2.17.0.921.gf22659ad46-goog