x86/boot: Fix kexec booting failure after SEV early boot support
diff mbox series

Message ID 20180925111020.23834-1-kasong@redhat.com
State New, archived
Headers show
Series
  • x86/boot: Fix kexec booting failure after SEV early boot support
Related show

Commit Message

Kairui Song Sept. 25, 2018, 11:10 a.m. UTC
Commit 1958b5fc4010 ("x86/boot: Add early boot support when running
with SEV active") is causing kexec becomes sometimes unstable, kexec
reboot won't start a second kernel bypassing BIOS boot process, instead,
the system got reset.

That's because, in get_sev_encryption_bit function, we are using
32-bit RIP-relative addressing to read the value of enc_bit, but
kexec may alloc the early boot up code to a higher location, which
is beyond 32-bit addressing limit. Some garbage will be read and
get_sev_encryption_bit will return the wrong value, which lead to
wrong memory page flag.

This patch adds a get_sev_encryption_bit_64 function to avoid this
problem. 64-bit early boot code will use this function instead, it
uses native RIP addressing to read the enc_bit which have no problem
with any location.

Fixes: 1958b5fc4010 ("x86/boot: Add early boot support when running with SEV active")
Signed-off-by: Kairui Song <kasong@redhat.com>
---
 arch/x86/boot/compressed/mem_encrypt.S | 64 ++++++++++++++++++--------
 1 file changed, 45 insertions(+), 19 deletions(-)

Comments

Tom Lendacky Sept. 25, 2018, 2:33 p.m. UTC | #1
On 09/25/2018 06:10 AM, Kairui Song wrote:
> Commit 1958b5fc4010 ("x86/boot: Add early boot support when running
> with SEV active") is causing kexec becomes sometimes unstable, kexec
> reboot won't start a second kernel bypassing BIOS boot process, instead,
> the system got reset.
> 
> That's because, in get_sev_encryption_bit function, we are using
> 32-bit RIP-relative addressing to read the value of enc_bit, but
> kexec may alloc the early boot up code to a higher location, which
> is beyond 32-bit addressing limit. Some garbage will be read and
> get_sev_encryption_bit will return the wrong value, which lead to
> wrong memory page flag.
> 
> This patch adds a get_sev_encryption_bit_64 function to avoid this
> problem. 64-bit early boot code will use this function instead, it
> uses native RIP addressing to read the enc_bit which have no problem
> with any location.
> 
> Fixes: 1958b5fc4010 ("x86/boot: Add early boot support when running with SEV active")
> Signed-off-by: Kairui Song <kasong@redhat.com>

Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com>

> ---
>  arch/x86/boot/compressed/mem_encrypt.S | 64 ++++++++++++++++++--------
>  1 file changed, 45 insertions(+), 19 deletions(-)
> 
> diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S
> index eaa843a52907..41933550449a 100644
> --- a/arch/x86/boot/compressed/mem_encrypt.S
> +++ b/arch/x86/boot/compressed/mem_encrypt.S
> @@ -18,27 +18,13 @@
>  
>  	.text
>  	.code32
> -ENTRY(get_sev_encryption_bit)
> +do_get_sev_encryption_bit:
>  	xor	%eax, %eax
>  
>  #ifdef CONFIG_AMD_MEM_ENCRYPT
>  	push	%ebx
>  	push	%ecx
>  	push	%edx
> -	push	%edi
> -
> -	/*
> -	 * RIP-relative addressing is needed to access the encryption bit
> -	 * variable. Since we are running in 32-bit mode we need this call/pop
> -	 * sequence to get the proper relative addressing.
> -	 */
> -	call	1f
> -1:	popl	%edi
> -	subl	$1b, %edi
> -
> -	movl	enc_bit(%edi), %eax
> -	cmpl	$0, %eax
> -	jge	.Lsev_exit
>  
>  	/* Check if running under a hypervisor */
>  	movl	$1, %eax
> @@ -69,25 +55,65 @@ ENTRY(get_sev_encryption_bit)
>  
>  	movl	%ebx, %eax
>  	andl	$0x3f, %eax		/* Return the encryption bit location */
> -	movl	%eax, enc_bit(%edi)
>  	jmp	.Lsev_exit
>  
>  .Lno_sev:
>  	xor	%eax, %eax
> -	movl	%eax, enc_bit(%edi)
>  
>  .Lsev_exit:
> -	pop	%edi
>  	pop	%edx
>  	pop	%ecx
>  	pop	%ebx
>  
> +#endif	/* CONFIG_AMD_MEM_ENCRYPT */
> +
> +	ret
> +
> +ENTRY(get_sev_encryption_bit)
> +	xor	%eax, %eax
> +
> +#ifdef CONFIG_AMD_MEM_ENCRYPT
> +	push	%edi
> +
> +	/*
> +	 * RIP-relative addressing is needed to access the encryption bit
> +	 * variable. Since we are running in 32-bit mode we need this call/pop
> +	 * sequence to get the proper relative addressing.
> +	 */
> +	call	1f
> +1:	popl	%edi
> +	subl	$1b, %edi
> +
> +	movl	enc_bit(%edi), %eax
> +	cmpl	$0, %eax
> +	jge 2f
> +
> +	call    do_get_sev_encryption_bit
> +	movl	%eax, enc_bit(%edi)
> +2:
> +	pop	%edi
>  #endif	/* CONFIG_AMD_MEM_ENCRYPT */
>  
>  	ret
>  ENDPROC(get_sev_encryption_bit)
>  
>  	.code64
> +ENTRY(get_sev_encryption_bit_64)
> +	xor	%rax, %rax
> +
> +#ifdef CONFIG_AMD_MEM_ENCRYPT
> +	movl	enc_bit(%rip), %eax
> +	cmpl	$0, %eax
> +	jge 1f
> +
> +	call    do_get_sev_encryption_bit
> +	movl	%eax, enc_bit(%rip)
> +1:
> +#endif	/* CONFIG_AMD_MEM_ENCRYPT */
> +
> +	ret
> +ENDPROC(get_sev_encryption_bit_64)
> +
>  ENTRY(set_sev_encryption_mask)
>  #ifdef CONFIG_AMD_MEM_ENCRYPT
>  	push	%rbp
> @@ -95,7 +121,7 @@ ENTRY(set_sev_encryption_mask)
>  
>  	movq	%rsp, %rbp		/* Save current stack pointer */
>  
> -	call	get_sev_encryption_bit	/* Get the encryption bit position */
> +	call	get_sev_encryption_bit_64	/* Get the encryption bit position */
>  	testl	%eax, %eax
>  	jz	.Lno_sev_mask
>  
>
Borislav Petkov Sept. 25, 2018, 5:26 p.m. UTC | #2
On Tue, Sep 25, 2018 at 02:33:48PM +0000, Lendacky, Thomas wrote:
> On 09/25/2018 06:10 AM, Kairui Song wrote:
> > Commit 1958b5fc4010 ("x86/boot: Add early boot support when running
> > with SEV active") is causing kexec becomes sometimes unstable, kexec
> > reboot won't start a second kernel bypassing BIOS boot process, instead,
> > the system got reset.
> > 
> > That's because, in get_sev_encryption_bit function, we are using
> > 32-bit RIP-relative addressing to read the value of enc_bit, but
> > kexec may alloc the early boot up code to a higher location, which
> > is beyond 32-bit addressing limit. Some garbage will be read and
> > get_sev_encryption_bit will return the wrong value, which lead to
> > wrong memory page flag.
> > 
> > This patch adds a get_sev_encryption_bit_64 function to avoid this
> > problem. 64-bit early boot code will use this function instead, it
> > uses native RIP addressing to read the enc_bit which have no problem
> > with any location.
> > 
> > Fixes: 1958b5fc4010 ("x86/boot: Add early boot support when running with SEV active")
> > Signed-off-by: Kairui Song <kasong@redhat.com>
> 
> Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com>
> 
> > ---
> >  arch/x86/boot/compressed/mem_encrypt.S | 64 ++++++++++++++++++--------
> >  1 file changed, 45 insertions(+), 19 deletions(-)
> > 
> > diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S
> > index eaa843a52907..41933550449a 100644
> > --- a/arch/x86/boot/compressed/mem_encrypt.S
> > +++ b/arch/x86/boot/compressed/mem_encrypt.S
> > @@ -18,27 +18,13 @@
> >  
> >  	.text
> >  	.code32
> > -ENTRY(get_sev_encryption_bit)
> > +do_get_sev_encryption_bit:
> >  	xor	%eax, %eax
> >  
> >  #ifdef CONFIG_AMD_MEM_ENCRYPT
> >  	push	%ebx
> >  	push	%ecx
> >  	push	%edx
> > -	push	%edi
> > -
> > -	/*
> > -	 * RIP-relative addressing is needed to access the encryption bit
> > -	 * variable. Since we are running in 32-bit mode we need this call/pop
> > -	 * sequence to get the proper relative addressing.
> > -	 */
> > -	call	1f
> > -1:	popl	%edi
> > -	subl	$1b, %edi
> > -
> > -	movl	enc_bit(%edi), %eax
> > -	cmpl	$0, %eax
> > -	jge	.Lsev_exit
> >  
> >  	/* Check if running under a hypervisor */
> >  	movl	$1, %eax
> > @@ -69,25 +55,65 @@ ENTRY(get_sev_encryption_bit)
> >  
> >  	movl	%ebx, %eax
> >  	andl	$0x3f, %eax		/* Return the encryption bit location */
> > -	movl	%eax, enc_bit(%edi)

IINM, the problem can be addressed in a simpler way by getting rid of
enc_bit and thus getting rid of the need to do relative addressing of
anything and simply doing the whole dance of figuring out the C-bit each
time. It probably wouldn't be even measurable...
Baoquan He Sept. 26, 2018, 7:32 a.m. UTC | #3
On 09/25/18 at 07:26pm, Borislav Petkov wrote:
> IINM, the problem can be addressed in a simpler way by getting rid of
> enc_bit and thus getting rid of the need to do relative addressing of
> anything and simply doing the whole dance of figuring out the C-bit each
> time. It probably wouldn't be even measurable...

Couldn't agree more.

Obviously enc_bit is redundent here. We only check eax each time,
removing it can fix the RIP-relative addressing issue in kexec.

diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S
index eaa843a52907..0b60eb867d25 100644
--- a/arch/x86/boot/compressed/mem_encrypt.S
+++ b/arch/x86/boot/compressed/mem_encrypt.S
@@ -27,19 +27,6 @@ ENTRY(get_sev_encryption_bit)
 	push	%edx
 	push	%edi
 
-	/*
-	 * RIP-relative addressing is needed to access the encryption bit
-	 * variable. Since we are running in 32-bit mode we need this call/pop
-	 * sequence to get the proper relative addressing.
-	 */
-	call	1f
-1:	popl	%edi
-	subl	$1b, %edi
-
-	movl	enc_bit(%edi), %eax
-	cmpl	$0, %eax
-	jge	.Lsev_exit
-
 	/* Check if running under a hypervisor */
 	movl	$1, %eax
 	cpuid
@@ -69,12 +56,10 @@ ENTRY(get_sev_encryption_bit)
 
 	movl	%ebx, %eax
 	andl	$0x3f, %eax		/* Return the encryption bit location */
-	movl	%eax, enc_bit(%edi)
 	jmp	.Lsev_exit
 
 .Lno_sev:
 	xor	%eax, %eax
-	movl	%eax, enc_bit(%edi)
 
 .Lsev_exit:
 	pop	%edi
@@ -113,9 +98,6 @@ ENTRY(set_sev_encryption_mask)
 ENDPROC(set_sev_encryption_mask)
 
 	.data
-enc_bit:
-	.int	0xffffffff
-
 #ifdef CONFIG_AMD_MEM_ENCRYPT
 	.balign	8
 GLOBAL(sme_me_mask)
Kairui Song Sept. 26, 2018, 10:52 a.m. UTC | #4
On Wed, Sep 26, 2018 at 3:33 PM Baoquan He <bhe@redhat.com> wrote:
>
> On 09/25/18 at 07:26pm, Borislav Petkov wrote:
> > IINM, the problem can be addressed in a simpler way by getting rid of
> > enc_bit and thus getting rid of the need to do relative addressing of
> > anything and simply doing the whole dance of figuring out the C-bit each
> > time. It probably wouldn't be even measurable...
>
> Couldn't agree more.
>
> Obviously enc_bit is redundent here. We only check eax each time,
> removing it can fix the RIP-relative addressing issue in kexec.
>
> diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S
> index eaa843a52907..0b60eb867d25 100644
> --- a/arch/x86/boot/compressed/mem_encrypt.S
> +++ b/arch/x86/boot/compressed/mem_encrypt.S
> @@ -27,19 +27,6 @@ ENTRY(get_sev_encryption_bit)
>         push    %edx
>         push    %edi
>
> -       /*
> -        * RIP-relative addressing is needed to access the encryption bit
> -        * variable. Since we are running in 32-bit mode we need this call/pop
> -        * sequence to get the proper relative addressing.
> -        */
> -       call    1f
> -1:     popl    %edi
> -       subl    $1b, %edi
> -
> -       movl    enc_bit(%edi), %eax
> -       cmpl    $0, %eax
> -       jge     .Lsev_exit
> -
>         /* Check if running under a hypervisor */
>         movl    $1, %eax
>         cpuid
> @@ -69,12 +56,10 @@ ENTRY(get_sev_encryption_bit)
>
>         movl    %ebx, %eax
>         andl    $0x3f, %eax             /* Return the encryption bit location */
> -       movl    %eax, enc_bit(%edi)
>         jmp     .Lsev_exit
>
>  .Lno_sev:
>         xor     %eax, %eax
> -       movl    %eax, enc_bit(%edi)
>
>  .Lsev_exit:
>         pop     %edi
> @@ -113,9 +98,6 @@ ENTRY(set_sev_encryption_mask)
>  ENDPROC(set_sev_encryption_mask)
>
>         .data
> -enc_bit:
> -       .int    0xffffffff
> -
>  #ifdef CONFIG_AMD_MEM_ENCRYPT
>         .balign 8
>  GLOBAL(sme_me_mask)

That is much cleaner indeed, I'm not sure if enc_bit have any other
usage, if not we can just drop it for sure.

Hi Thomas, can you help confirm if enc_bit is only a cache to avoid
doing the cpuid stuff?
Baoquan He Sept. 26, 2018, 11:22 a.m. UTC | #5
On 09/26/18 at 03:32pm, Baoquan He wrote:
> On 09/25/18 at 07:26pm, Borislav Petkov wrote:
> > IINM, the problem can be addressed in a simpler way by getting rid of
> > enc_bit and thus getting rid of the need to do relative addressing of
> > anything and simply doing the whole dance of figuring out the C-bit each
> > time. It probably wouldn't be even measurable...
> 
> Couldn't agree more.
> 
> Obviously enc_bit is redundent here. We only check eax each time,
> removing it can fix the RIP-relative addressing issue in kexec.

OK, in distros CONFIG_AMD_MEM_ENCRYPT=y is set by default usually.
enc_bit can save once in normal boot, then fetch and skip the cpuid
detection in initialize_identity_maps(). However this only speeds up in
amd system with SME, on intel cpu and amd cpu w/o sme, it still needs to
do cpuid twice. Removing it should be not measurable as Boris said.
Not sure if Tom has other concern.

Thanks
Baoquan

> 
> diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S
> index eaa843a52907..0b60eb867d25 100644
> --- a/arch/x86/boot/compressed/mem_encrypt.S
> +++ b/arch/x86/boot/compressed/mem_encrypt.S
> @@ -27,19 +27,6 @@ ENTRY(get_sev_encryption_bit)
>  	push	%edx
>  	push	%edi
>  
> -	/*
> -	 * RIP-relative addressing is needed to access the encryption bit
> -	 * variable. Since we are running in 32-bit mode we need this call/pop
> -	 * sequence to get the proper relative addressing.
> -	 */
> -	call	1f
> -1:	popl	%edi
> -	subl	$1b, %edi
> -
> -	movl	enc_bit(%edi), %eax
> -	cmpl	$0, %eax
> -	jge	.Lsev_exit
> -
>  	/* Check if running under a hypervisor */
>  	movl	$1, %eax
>  	cpuid
> @@ -69,12 +56,10 @@ ENTRY(get_sev_encryption_bit)
>  
>  	movl	%ebx, %eax
>  	andl	$0x3f, %eax		/* Return the encryption bit location */
> -	movl	%eax, enc_bit(%edi)
>  	jmp	.Lsev_exit
>  
>  .Lno_sev:
>  	xor	%eax, %eax
> -	movl	%eax, enc_bit(%edi)
>  
>  .Lsev_exit:
>  	pop	%edi
> @@ -113,9 +98,6 @@ ENTRY(set_sev_encryption_mask)
>  ENDPROC(set_sev_encryption_mask)
>  
>  	.data
> -enc_bit:
> -	.int	0xffffffff
> -
>  #ifdef CONFIG_AMD_MEM_ENCRYPT
>  	.balign	8
>  GLOBAL(sme_me_mask)
Tom Lendacky Sept. 26, 2018, 1:01 p.m. UTC | #6
On 09/26/2018 06:22 AM, Baoquan He wrote:
> On 09/26/18 at 03:32pm, Baoquan He wrote:
>> On 09/25/18 at 07:26pm, Borislav Petkov wrote:
>>> IINM, the problem can be addressed in a simpler way by getting rid of
>>> enc_bit and thus getting rid of the need to do relative addressing of
>>> anything and simply doing the whole dance of figuring out the C-bit each
>>> time. It probably wouldn't be even measurable...
>>
>> Couldn't agree more.
>>
>> Obviously enc_bit is redundent here. We only check eax each time,
>> removing it can fix the RIP-relative addressing issue in kexec.
> 
> OK, in distros CONFIG_AMD_MEM_ENCRYPT=y is set by default usually.
> enc_bit can save once in normal boot, then fetch and skip the cpuid
> detection in initialize_identity_maps(). However this only speeds up in
> amd system with SME, on intel cpu and amd cpu w/o sme, it still needs to
> do cpuid twice. Removing it should be not measurable as Boris said.
> Not sure if Tom has other concern.

No concern from me.  The original version of the patch did not cache the
value, that was added based on the patch series feedback.  So, if there
is no concern about executing some extra CPUID/RDMSR instructions, then
it would certainly simplify the code quite a bit.

Thanks,
Tom

> 
> Thanks
> Baoquan
> 
>>
>> diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S
>> index eaa843a52907..0b60eb867d25 100644
>> --- a/arch/x86/boot/compressed/mem_encrypt.S
>> +++ b/arch/x86/boot/compressed/mem_encrypt.S
>> @@ -27,19 +27,6 @@ ENTRY(get_sev_encryption_bit)
>>  	push	%edx
>>  	push	%edi
>>  
>> -	/*
>> -	 * RIP-relative addressing is needed to access the encryption bit
>> -	 * variable. Since we are running in 32-bit mode we need this call/pop
>> -	 * sequence to get the proper relative addressing.
>> -	 */
>> -	call	1f
>> -1:	popl	%edi
>> -	subl	$1b, %edi
>> -
>> -	movl	enc_bit(%edi), %eax
>> -	cmpl	$0, %eax
>> -	jge	.Lsev_exit
>> -
>>  	/* Check if running under a hypervisor */
>>  	movl	$1, %eax
>>  	cpuid
>> @@ -69,12 +56,10 @@ ENTRY(get_sev_encryption_bit)
>>  
>>  	movl	%ebx, %eax
>>  	andl	$0x3f, %eax		/* Return the encryption bit location */
>> -	movl	%eax, enc_bit(%edi)
>>  	jmp	.Lsev_exit
>>  
>>  .Lno_sev:
>>  	xor	%eax, %eax
>> -	movl	%eax, enc_bit(%edi)
>>  
>>  .Lsev_exit:
>>  	pop	%edi
>> @@ -113,9 +98,6 @@ ENTRY(set_sev_encryption_mask)
>>  ENDPROC(set_sev_encryption_mask)
>>  
>>  	.data
>> -enc_bit:
>> -	.int	0xffffffff
>> -
>>  #ifdef CONFIG_AMD_MEM_ENCRYPT
>>  	.balign	8
>>  GLOBAL(sme_me_mask)
Borislav Petkov Sept. 26, 2018, 1:18 p.m. UTC | #7
On Wed, Sep 26, 2018 at 01:01:00PM +0000, Lendacky, Thomas wrote:
> No concern from me.  The original version of the patch did not cache the
> value, that was added based on the patch series feedback.  So, if there
> is no concern about executing some extra CPUID/RDMSR instructions, then
> it would certainly simplify the code quite a bit.

Yeah, I think it was me who suggested to cache it but having simpler
code greatly outweighs the minute caching win.
Baoquan He Sept. 26, 2018, 1:21 p.m. UTC | #8
On 09/26/18 at 01:01pm, Lendacky, Thomas wrote:
> On 09/26/2018 06:22 AM, Baoquan He wrote:
> > On 09/26/18 at 03:32pm, Baoquan He wrote:
> >> On 09/25/18 at 07:26pm, Borislav Petkov wrote:
> >>> IINM, the problem can be addressed in a simpler way by getting rid of
> >>> enc_bit and thus getting rid of the need to do relative addressing of
> >>> anything and simply doing the whole dance of figuring out the C-bit each
> >>> time. It probably wouldn't be even measurable...
> >>
> >> Couldn't agree more.
> >>
> >> Obviously enc_bit is redundent here. We only check eax each time,
> >> removing it can fix the RIP-relative addressing issue in kexec.
> > 
> > OK, in distros CONFIG_AMD_MEM_ENCRYPT=y is set by default usually.
> > enc_bit can save once in normal boot, then fetch and skip the cpuid
> > detection in initialize_identity_maps(). However this only speeds up in
> > amd system with SME, on intel cpu and amd cpu w/o sme, it still needs to
> > do cpuid twice. Removing it should be not measurable as Boris said.
> > Not sure if Tom has other concern.
> 
> No concern from me.  The original version of the patch did not cache the
> value, that was added based on the patch series feedback.  So, if there
> is no concern about executing some extra CPUID/RDMSR instructions, then
> it would certainly simplify the code quite a bit.

OK, thanks for confirming this, Tom.

Then, maybe Kairui can repost below code with formal patch log after
testing. I have tested on a intel machine with 48G memory, and
CONFIG_AMD_MEM_ENCRYPT=y, it works well. Maybe add Boris's Suggested-By,
and CC me.

Thanks
Baoquan

> >>
> >> diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S
> >> index eaa843a52907..0b60eb867d25 100644
> >> --- a/arch/x86/boot/compressed/mem_encrypt.S
> >> +++ b/arch/x86/boot/compressed/mem_encrypt.S
> >> @@ -27,19 +27,6 @@ ENTRY(get_sev_encryption_bit)
> >>  	push	%edx
> >>  	push	%edi
> >>  
> >> -	/*
> >> -	 * RIP-relative addressing is needed to access the encryption bit
> >> -	 * variable. Since we are running in 32-bit mode we need this call/pop
> >> -	 * sequence to get the proper relative addressing.
> >> -	 */
> >> -	call	1f
> >> -1:	popl	%edi
> >> -	subl	$1b, %edi
> >> -
> >> -	movl	enc_bit(%edi), %eax
> >> -	cmpl	$0, %eax
> >> -	jge	.Lsev_exit
> >> -
> >>  	/* Check if running under a hypervisor */
> >>  	movl	$1, %eax
> >>  	cpuid
> >> @@ -69,12 +56,10 @@ ENTRY(get_sev_encryption_bit)
> >>  
> >>  	movl	%ebx, %eax
> >>  	andl	$0x3f, %eax		/* Return the encryption bit location */
> >> -	movl	%eax, enc_bit(%edi)
> >>  	jmp	.Lsev_exit
> >>  
> >>  .Lno_sev:
> >>  	xor	%eax, %eax
> >> -	movl	%eax, enc_bit(%edi)
> >>  
> >>  .Lsev_exit:
> >>  	pop	%edi
> >> @@ -113,9 +98,6 @@ ENTRY(set_sev_encryption_mask)
> >>  ENDPROC(set_sev_encryption_mask)
> >>  
> >>  	.data
> >> -enc_bit:
> >> -	.int	0xffffffff
> >> -
> >>  #ifdef CONFIG_AMD_MEM_ENCRYPT
> >>  	.balign	8
> >>  GLOBAL(sme_me_mask)

Patch
diff mbox series

diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S
index eaa843a52907..41933550449a 100644
--- a/arch/x86/boot/compressed/mem_encrypt.S
+++ b/arch/x86/boot/compressed/mem_encrypt.S
@@ -18,27 +18,13 @@ 
 
 	.text
 	.code32
-ENTRY(get_sev_encryption_bit)
+do_get_sev_encryption_bit:
 	xor	%eax, %eax
 
 #ifdef CONFIG_AMD_MEM_ENCRYPT
 	push	%ebx
 	push	%ecx
 	push	%edx
-	push	%edi
-
-	/*
-	 * RIP-relative addressing is needed to access the encryption bit
-	 * variable. Since we are running in 32-bit mode we need this call/pop
-	 * sequence to get the proper relative addressing.
-	 */
-	call	1f
-1:	popl	%edi
-	subl	$1b, %edi
-
-	movl	enc_bit(%edi), %eax
-	cmpl	$0, %eax
-	jge	.Lsev_exit
 
 	/* Check if running under a hypervisor */
 	movl	$1, %eax
@@ -69,25 +55,65 @@  ENTRY(get_sev_encryption_bit)
 
 	movl	%ebx, %eax
 	andl	$0x3f, %eax		/* Return the encryption bit location */
-	movl	%eax, enc_bit(%edi)
 	jmp	.Lsev_exit
 
 .Lno_sev:
 	xor	%eax, %eax
-	movl	%eax, enc_bit(%edi)
 
 .Lsev_exit:
-	pop	%edi
 	pop	%edx
 	pop	%ecx
 	pop	%ebx
 
+#endif	/* CONFIG_AMD_MEM_ENCRYPT */
+
+	ret
+
+ENTRY(get_sev_encryption_bit)
+	xor	%eax, %eax
+
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+	push	%edi
+
+	/*
+	 * RIP-relative addressing is needed to access the encryption bit
+	 * variable. Since we are running in 32-bit mode we need this call/pop
+	 * sequence to get the proper relative addressing.
+	 */
+	call	1f
+1:	popl	%edi
+	subl	$1b, %edi
+
+	movl	enc_bit(%edi), %eax
+	cmpl	$0, %eax
+	jge 2f
+
+	call    do_get_sev_encryption_bit
+	movl	%eax, enc_bit(%edi)
+2:
+	pop	%edi
 #endif	/* CONFIG_AMD_MEM_ENCRYPT */
 
 	ret
 ENDPROC(get_sev_encryption_bit)
 
 	.code64
+ENTRY(get_sev_encryption_bit_64)
+	xor	%rax, %rax
+
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+	movl	enc_bit(%rip), %eax
+	cmpl	$0, %eax
+	jge 1f
+
+	call    do_get_sev_encryption_bit
+	movl	%eax, enc_bit(%rip)
+1:
+#endif	/* CONFIG_AMD_MEM_ENCRYPT */
+
+	ret
+ENDPROC(get_sev_encryption_bit_64)
+
 ENTRY(set_sev_encryption_mask)
 #ifdef CONFIG_AMD_MEM_ENCRYPT
 	push	%rbp
@@ -95,7 +121,7 @@  ENTRY(set_sev_encryption_mask)
 
 	movq	%rsp, %rbp		/* Save current stack pointer */
 
-	call	get_sev_encryption_bit	/* Get the encryption bit position */
+	call	get_sev_encryption_bit_64	/* Get the encryption bit position */
 	testl	%eax, %eax
 	jz	.Lno_sev_mask