From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1755576AbbCaRBr (ORCPT ); Tue, 31 Mar 2015 13:01:47 -0400 Received: from mx1.redhat.com ([209.132.183.28]:48840 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752398AbbCaRBn (ORCPT ); Tue, 31 Mar 2015 13:01:43 -0400 From: Denys Vlasenko To: Ingo Molnar Cc: Denys Vlasenko , Linus Torvalds , Steven Rostedt , Borislav Petkov , "H. Peter Anvin" , Andy Lutomirski , Oleg Nesterov , Frederic Weisbecker , Alexei Starovoitov , Will Drewry , Kees Cook , x86@kernel.org, linux-kernel@vger.kernel.org Subject: [PATCH 8/9] x86/asm: replace MOVQ $imm,%reg with MOVL Date: Tue, 31 Mar 2015 19:00:10 +0200 Message-Id: <1427821211-25099-8-git-send-email-dvlasenk@redhat.com> In-Reply-To: <1427821211-25099-1-git-send-email-dvlasenk@redhat.com> References: <1427821211-25099-1-git-send-email-dvlasenk@redhat.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org There is no reason to use MOVQ to load a nonnegative immediate into a 64-bit register. MOVL does the same, since upper 32 bits are zero extended. Signed-off-by: Denys Vlasenko CC: Linus Torvalds CC: Steven Rostedt CC: Ingo Molnar CC: Borislav Petkov CC: "H. Peter Anvin" CC: Andy Lutomirski CC: Oleg Nesterov CC: Frederic Weisbecker CC: Alexei Starovoitov CC: Will Drewry CC: Kees Cook CC: x86@kernel.org CC: linux-kernel@vger.kernel.org --- arch/x86/crypto/crc32c-pcl-intel-asm_64.S | 2 +- arch/x86/crypto/twofish-x86_64-asm_64.S | 4 ++-- arch/x86/kernel/relocate_kernel_64.S | 8 ++++---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S index 26d49eb..225be06 100644 --- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S +++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S @@ -178,7 +178,7 @@ continue_block: ## 2a) PROCESS FULL BLOCKS: ################################################################ full_block: - movq $128,%rax + movl $128,%eax lea 128*8*2(block_0), block_1 lea 128*8*3(block_0), block_2 add $128*8*1, block_0 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S index a039d21..a350c99 100644 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S @@ -264,7 +264,7 @@ ENTRY(twofish_enc_blk) movq R1, 8(%rsi) popq R1 - movq $1,%rax + movl $1,%eax ret ENDPROC(twofish_enc_blk) @@ -316,6 +316,6 @@ ENTRY(twofish_dec_blk) movq R1, 8(%rsi) popq R1 - movq $1,%rax + movl $1,%eax ret ENDPROC(twofish_dec_blk) diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S index 04cb179..98111b3 100644 --- a/arch/x86/kernel/relocate_kernel_64.S +++ b/arch/x86/kernel/relocate_kernel_64.S @@ -123,7 +123,7 @@ identity_mapped: * Set cr4 to a known state: * - physical address extension enabled */ - movq $X86_CR4_PAE, %rax + movl $X86_CR4_PAE, %eax movq %rax, %cr4 jmp 1f @@ -246,17 +246,17 @@ swap_pages: movq %rsi, %rax movq %r10, %rdi - movq $512, %rcx + movl $512, %ecx rep ; movsq movq %rax, %rdi movq %rdx, %rsi - movq $512, %rcx + movl $512, %ecx rep ; movsq movq %rdx, %rdi movq %r10, %rsi - movq $512, %rcx + movl $512, %ecx rep ; movsq lea PAGE_SIZE(%rax), %rsi -- 1.8.1.4