All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] x86, 64-bit: Move K8 B step iret fixup to fault entry asm (v2)
@ 2009-11-03 17:55 Brian Gerst
  2009-11-03 18:10 ` Ingo Molnar
  0 siblings, 1 reply; 5+ messages in thread
From: Brian Gerst @ 2009-11-03 17:55 UTC (permalink / raw)
  To: Ingo Molnar; +Cc: x86, linux-kernel

Move the handling of truncated %rip from an iret fault to the fault
entry path.

This allows x86-64 to use the standard search_extable() function.

v2: Fixed jump to error_swapgs to be unconditional.

Signed-off-by: Brian Gerst <brgerst@gmail.com>
---
 arch/x86/include/asm/uaccess.h |    1 -
 arch/x86/kernel/entry_64.S     |   11 ++++++++---
 arch/x86/mm/extable.c          |   31 -------------------------------
 3 files changed, 8 insertions(+), 35 deletions(-)

diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index d2c6c93..abd3e0e 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -570,7 +570,6 @@ extern struct movsl_mask {
 #ifdef CONFIG_X86_32
 # include "uaccess_32.h"
 #else
-# define ARCH_HAS_SEARCH_EXTABLE
 # include "uaccess_64.h"
 #endif
 
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index b5c061f..1579a6c 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1491,12 +1491,17 @@ error_kernelspace:
 	leaq irq_return(%rip),%rcx
 	cmpq %rcx,RIP+8(%rsp)
 	je error_swapgs
-	movl %ecx,%ecx	/* zero extend */
-	cmpq %rcx,RIP+8(%rsp)
-	je error_swapgs
+	movl %ecx,%eax	/* zero extend */
+	cmpq %rax,RIP+8(%rsp)
+	je bstep_iret
 	cmpq $gs_change,RIP+8(%rsp)
 	je error_swapgs
 	jmp error_sti
+
+bstep_iret:
+	/* Fix truncated RIP */
+	movq %rcx,RIP+8(%rsp)
+	jmp error_swapgs
 END(error_entry)
 
 
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 61b41ca..d0474ad 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -35,34 +35,3 @@ int fixup_exception(struct pt_regs *regs)
 
 	return 0;
 }
-
-#ifdef CONFIG_X86_64
-/*
- * Need to defined our own search_extable on X86_64 to work around
- * a B stepping K8 bug.
- */
-const struct exception_table_entry *
-search_extable(const struct exception_table_entry *first,
-	       const struct exception_table_entry *last,
-	       unsigned long value)
-{
-	/* B stepping K8 bug */
-	if ((value >> 32) == 0)
-		value |= 0xffffffffUL << 32;
-
-	while (first <= last) {
-		const struct exception_table_entry *mid;
-		long diff;
-
-		mid = (last - first) / 2 + first;
-		diff = mid->insn - value;
-		if (diff == 0)
-			return mid;
-		else if (diff < 0)
-			first = mid+1;
-		else
-			last = mid-1;
-	}
-	return NULL;
-}
-#endif
-- 
1.6.2.5


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH] x86, 64-bit: Move K8 B step iret fixup to fault entry asm (v2)
  2009-11-03 17:55 [PATCH] x86, 64-bit: Move K8 B step iret fixup to fault entry asm (v2) Brian Gerst
@ 2009-11-03 18:10 ` Ingo Molnar
  2009-11-03 18:39   ` Brian Gerst
  2009-11-03 19:02   ` [PATCH] x86, 64-bit: Fix bstep_iret jump Brian Gerst
  0 siblings, 2 replies; 5+ messages in thread
From: Ingo Molnar @ 2009-11-03 18:10 UTC (permalink / raw)
  To: Brian Gerst; +Cc: x86, linux-kernel, H. Peter Anvin, Thomas Gleixner


* Brian Gerst <brgerst@gmail.com> wrote:

> Move the handling of truncated %rip from an iret fault to the fault
> entry path.
> 
> This allows x86-64 to use the standard search_extable() function.
> 
> v2: Fixed jump to error_swapgs to be unconditional.

v1 is already in the tip:x86/asm topic tree. Mind sending a delta fix 
against:

  http://people.redhat.com/mingo/tip.git/README

?

Also, i'm having second thoughts about the change:

> Signed-off-by: Brian Gerst <brgerst@gmail.com>
> ---
>  arch/x86/include/asm/uaccess.h |    1 -
>  arch/x86/kernel/entry_64.S     |   11 ++++++++---
>  arch/x86/mm/extable.c          |   31 -------------------------------
>  3 files changed, 8 insertions(+), 35 deletions(-)
> 
> diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
> index d2c6c93..abd3e0e 100644
> --- a/arch/x86/include/asm/uaccess.h
> +++ b/arch/x86/include/asm/uaccess.h
> @@ -570,7 +570,6 @@ extern struct movsl_mask {
>  #ifdef CONFIG_X86_32
>  # include "uaccess_32.h"
>  #else
> -# define ARCH_HAS_SEARCH_EXTABLE
>  # include "uaccess_64.h"
>  #endif
>  
> diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
> index b5c061f..1579a6c 100644
> --- a/arch/x86/kernel/entry_64.S
> +++ b/arch/x86/kernel/entry_64.S
> @@ -1491,12 +1491,17 @@ error_kernelspace:
>  	leaq irq_return(%rip),%rcx
>  	cmpq %rcx,RIP+8(%rsp)
>  	je error_swapgs
> -	movl %ecx,%ecx	/* zero extend */
> -	cmpq %rcx,RIP+8(%rsp)
> -	je error_swapgs
> +	movl %ecx,%eax	/* zero extend */
> +	cmpq %rax,RIP+8(%rsp)
> +	je bstep_iret
>  	cmpq $gs_change,RIP+8(%rsp)
>  	je error_swapgs
>  	jmp error_sti
> +
> +bstep_iret:
> +	/* Fix truncated RIP */
> +	movq %rcx,RIP+8(%rsp)
> +	jmp error_swapgs
>  END(error_entry)
>  
>  
> diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
> index 61b41ca..d0474ad 100644
> --- a/arch/x86/mm/extable.c
> +++ b/arch/x86/mm/extable.c
> @@ -35,34 +35,3 @@ int fixup_exception(struct pt_regs *regs)
>  
>  	return 0;
>  }
> -
> -#ifdef CONFIG_X86_64
> -/*
> - * Need to defined our own search_extable on X86_64 to work around
> - * a B stepping K8 bug.
> - */
> -const struct exception_table_entry *
> -search_extable(const struct exception_table_entry *first,
> -	       const struct exception_table_entry *last,
> -	       unsigned long value)
> -{
> -	/* B stepping K8 bug */
> -	if ((value >> 32) == 0)
> -		value |= 0xffffffffUL << 32;
> -
> -	while (first <= last) {
> -		const struct exception_table_entry *mid;
> -		long diff;
> -
> -		mid = (last - first) / 2 + first;
> -		diff = mid->insn - value;
> -		if (diff == 0)
> -			return mid;
> -		else if (diff < 0)
> -			first = mid+1;
> -		else
> -			last = mid-1;
> -	}
> -	return NULL;
> -}
> -#endif

is this the only way how we can end up having a truncated 64-bit RIP 
passed in to search_exception_tables()/search_extable()? Before your 
commit we basically had a last-ditch safety net in 64-bit kernels that 
zero-extended truncated RIPs - no matter how they got there (via known 
or unknown erratums).

Thanks,

	Ingo

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] x86, 64-bit: Move K8 B step iret fixup to fault entry asm  (v2)
  2009-11-03 18:10 ` Ingo Molnar
@ 2009-11-03 18:39   ` Brian Gerst
  2009-11-03 19:02   ` [PATCH] x86, 64-bit: Fix bstep_iret jump Brian Gerst
  1 sibling, 0 replies; 5+ messages in thread
From: Brian Gerst @ 2009-11-03 18:39 UTC (permalink / raw)
  To: Ingo Molnar; +Cc: x86, linux-kernel, H. Peter Anvin, Thomas Gleixner

On Tue, Nov 3, 2009 at 1:10 PM, Ingo Molnar <mingo@elte.hu> wrote:
>
> * Brian Gerst <brgerst@gmail.com> wrote:
>
>> Move the handling of truncated %rip from an iret fault to the fault
>> entry path.
>>
>> This allows x86-64 to use the standard search_extable() function.
>>
>> v2: Fixed jump to error_swapgs to be unconditional.
>
> v1 is already in the tip:x86/asm topic tree. Mind sending a delta fix
> against:

Will do.

>  http://people.redhat.com/mingo/tip.git/README
>
> ?
>
> Also, i'm having second thoughts about the change:
>
>> Signed-off-by: Brian Gerst <brgerst@gmail.com>
>> ---
>>  arch/x86/include/asm/uaccess.h |    1 -
>>  arch/x86/kernel/entry_64.S     |   11 ++++++++---
>>  arch/x86/mm/extable.c          |   31 -------------------------------
>>  3 files changed, 8 insertions(+), 35 deletions(-)
>>
>> diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
>> index d2c6c93..abd3e0e 100644
>> --- a/arch/x86/include/asm/uaccess.h
>> +++ b/arch/x86/include/asm/uaccess.h
>> @@ -570,7 +570,6 @@ extern struct movsl_mask {
>>  #ifdef CONFIG_X86_32
>>  # include "uaccess_32.h"
>>  #else
>> -# define ARCH_HAS_SEARCH_EXTABLE
>>  # include "uaccess_64.h"
>>  #endif
>>
>> diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
>> index b5c061f..1579a6c 100644
>> --- a/arch/x86/kernel/entry_64.S
>> +++ b/arch/x86/kernel/entry_64.S
>> @@ -1491,12 +1491,17 @@ error_kernelspace:
>>       leaq irq_return(%rip),%rcx
>>       cmpq %rcx,RIP+8(%rsp)
>>       je error_swapgs
>> -     movl %ecx,%ecx  /* zero extend */
>> -     cmpq %rcx,RIP+8(%rsp)
>> -     je error_swapgs
>> +     movl %ecx,%eax  /* zero extend */
>> +     cmpq %rax,RIP+8(%rsp)
>> +     je bstep_iret
>>       cmpq $gs_change,RIP+8(%rsp)
>>       je error_swapgs
>>       jmp error_sti
>> +
>> +bstep_iret:
>> +     /* Fix truncated RIP */
>> +     movq %rcx,RIP+8(%rsp)
>> +     jmp error_swapgs
>>  END(error_entry)
>>
>>
>> diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
>> index 61b41ca..d0474ad 100644
>> --- a/arch/x86/mm/extable.c
>> +++ b/arch/x86/mm/extable.c
>> @@ -35,34 +35,3 @@ int fixup_exception(struct pt_regs *regs)
>>
>>       return 0;
>>  }
>> -
>> -#ifdef CONFIG_X86_64
>> -/*
>> - * Need to defined our own search_extable on X86_64 to work around
>> - * a B stepping K8 bug.
>> - */
>> -const struct exception_table_entry *
>> -search_extable(const struct exception_table_entry *first,
>> -            const struct exception_table_entry *last,
>> -            unsigned long value)
>> -{
>> -     /* B stepping K8 bug */
>> -     if ((value >> 32) == 0)
>> -             value |= 0xffffffffUL << 32;
>> -
>> -     while (first <= last) {
>> -             const struct exception_table_entry *mid;
>> -             long diff;
>> -
>> -             mid = (last - first) / 2 + first;
>> -             diff = mid->insn - value;
>> -             if (diff == 0)
>> -                     return mid;
>> -             else if (diff < 0)
>> -                     first = mid+1;
>> -             else
>> -                     last = mid-1;
>> -     }
>> -     return NULL;
>> -}
>> -#endif
>
> is this the only way how we can end up having a truncated 64-bit RIP
> passed in to search_exception_tables()/search_extable()? Before your
> commit we basically had a last-ditch safety net in 64-bit kernels that
> zero-extended truncated RIPs - no matter how they got there (via known
> or unknown erratums).

That kind of erratum would logically only happen on a transition to
32-bit mode (or even 16-bit mode?), and the only other place this
could happen is the sysret or sysexit paths, neither of which have
exception handlers.  If it were happening in those places, you would
see the truncated RIP in oops reports, since the original (truncated)
RIP in pt_regs doesn't get changed by the current code.

--
Brian Gerst

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH] x86, 64-bit: Fix bstep_iret jump
  2009-11-03 18:10 ` Ingo Molnar
  2009-11-03 18:39   ` Brian Gerst
@ 2009-11-03 19:02   ` Brian Gerst
  2009-11-04  6:33     ` [tip:x86/asm] " tip-bot for Brian Gerst
  1 sibling, 1 reply; 5+ messages in thread
From: Brian Gerst @ 2009-11-03 19:02 UTC (permalink / raw)
  To: Ingo Molnar; +Cc: x86, linux-kernel

This jump should be unconditional.

Signed-off-by: Brian Gerst <brgerst@gmail.com>
---
 arch/x86/kernel/entry_64.S |    2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index af0f4b2..1579a6c 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1501,7 +1501,7 @@ error_kernelspace:
 bstep_iret:
 	/* Fix truncated RIP */
 	movq %rcx,RIP+8(%rsp)
-	je error_swapgs
+	jmp error_swapgs
 END(error_entry)
 
 
-- 
1.6.2.5


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [tip:x86/asm] x86, 64-bit: Fix bstep_iret jump
  2009-11-03 19:02   ` [PATCH] x86, 64-bit: Fix bstep_iret jump Brian Gerst
@ 2009-11-04  6:33     ` tip-bot for Brian Gerst
  0 siblings, 0 replies; 5+ messages in thread
From: tip-bot for Brian Gerst @ 2009-11-04  6:33 UTC (permalink / raw)
  To: linux-tip-commits; +Cc: linux-kernel, hpa, mingo, brgerst, tglx, mingo

Commit-ID:  97829de5a3b88899c5f3ac8802d11868bf4180ba
Gitweb:     http://git.kernel.org/tip/97829de5a3b88899c5f3ac8802d11868bf4180ba
Author:     Brian Gerst <brgerst@gmail.com>
AuthorDate: Tue, 3 Nov 2009 14:02:05 -0500
Committer:  Ingo Molnar <mingo@elte.hu>
CommitDate: Tue, 3 Nov 2009 20:50:02 +0100

x86, 64-bit: Fix bstep_iret jump

This jump should be unconditional.

Signed-off-by: Brian Gerst <brgerst@gmail.com>
LKML-Reference: <1257274925-15713-1-git-send-email-brgerst@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
 arch/x86/kernel/entry_64.S |    2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index af0f4b2..1579a6c 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1501,7 +1501,7 @@ error_kernelspace:
 bstep_iret:
 	/* Fix truncated RIP */
 	movq %rcx,RIP+8(%rsp)
-	je error_swapgs
+	jmp error_swapgs
 END(error_entry)
 
 

^ permalink raw reply related	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2009-11-04  6:34 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2009-11-03 17:55 [PATCH] x86, 64-bit: Move K8 B step iret fixup to fault entry asm (v2) Brian Gerst
2009-11-03 18:10 ` Ingo Molnar
2009-11-03 18:39   ` Brian Gerst
2009-11-03 19:02   ` [PATCH] x86, 64-bit: Fix bstep_iret jump Brian Gerst
2009-11-04  6:33     ` [tip:x86/asm] " tip-bot for Brian Gerst

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.