linux-arm-kernel.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] arm64: Explicitly set pstate.ssbs for el0 on kernel entry
@ 2019-07-09 11:22 Neeraj Upadhyay
  2019-07-09 13:08 ` Marc Zyngier
  0 siblings, 1 reply; 6+ messages in thread
From: Neeraj Upadhyay @ 2019-07-09 11:22 UTC (permalink / raw)
  To: will, mark.rutland, marc.zyngier, julien.thierry, tglx
  Cc: parthd, gkohli, linux-arm-msm, linux-kernel, Neeraj Upadhyay,
	linux-arm-kernel

For cpus which do not support pstate.ssbs feature, el0
might not retain spsr.ssbs. This is problematic, if this
task migrates to a cpu supporting this feature, thus
relying on its state to be correct. On kernel entry,
explicitly set spsr.ssbs, so that speculation is enabled
for el0, when this task migrates to a cpu supporting
ssbs feature. Restoring state at kernel entry ensures
that el0 ssbs state is always consistent while we are
in el1.

As alternatives are applied by boot cpu, at the end of smp
init, presence/absence of ssbs feature on boot cpu, is used
for deciding, whether the capability is uniformly provided.

Signed-off-by: Neeraj Upadhyay <neeraju@codeaurora.org>
---
 arch/arm64/kernel/cpu_errata.c | 16 ++++++++++++++++
 arch/arm64/kernel/entry.S      | 26 +++++++++++++++++++++++++-
 2 files changed, 41 insertions(+), 1 deletion(-)

diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index ca11ff7..c84a56d 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -336,6 +336,22 @@ void __init arm64_enable_wa2_handling(struct alt_instr *alt,
 		*updptr = cpu_to_le32(aarch64_insn_gen_nop());
 }
 
+void __init arm64_restore_ssbs_state(struct alt_instr *alt,
+				     __le32 *origptr, __le32 *updptr,
+				     int nr_inst)
+{
+	BUG_ON(nr_inst != 1);
+	/*
+	 * Only restore EL0 SSBS state on EL1 entry if cpu does not
+	 * support the capability and capability is present for at
+	 * least one cpu and if the SSBD state allows it to
+	 * be changed.
+	 */
+	if (!this_cpu_has_cap(ARM64_SSBS) && cpus_have_cap(ARM64_SSBS) &&
+	    arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
+		*updptr = cpu_to_le32(aarch64_insn_gen_nop());
+}
+
 void arm64_set_ssbd_mitigation(bool state)
 {
 	if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 9cdc459..7e79305 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -143,6 +143,25 @@ alternative_cb_end
 #endif
 	.endm
 
+	// This macro updates spsr. It also corrupts the condition
+	// codes state.
+	.macro	restore_ssbs_state, saved_spsr, tmp
+#ifdef CONFIG_ARM64_SSBD
+alternative_cb	arm64_restore_ssbs_state
+	b	.L__asm_ssbs_skip\@
+alternative_cb_end
+	ldr	\tmp, [tsk, #TSK_TI_FLAGS]
+	tbnz	\tmp, #TIF_SSBD, .L__asm_ssbs_skip\@
+	tst	\saved_spsr, #PSR_MODE32_BIT	// native task?
+	b.ne	.L__asm_ssbs_compat\@
+	orr	\saved_spsr, \saved_spsr, #PSR_SSBS_BIT
+	b	.L__asm_ssbs_skip\@
+.L__asm_ssbs_compat\@:
+	orr	\saved_spsr, \saved_spsr, #PSR_AA32_SSBS_BIT
+.L__asm_ssbs_skip\@:
+#endif
+	.endm
+
 	.macro	kernel_entry, el, regsize = 64
 	.if	\regsize == 32
 	mov	w0, w0				// zero upper 32 bits of x0
@@ -182,8 +201,13 @@ alternative_cb_end
 	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
 	/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
 	.endif /* \el == 0 */
-	mrs	x22, elr_el1
 	mrs	x23, spsr_el1
+
+	.if	\el == 0
+	restore_ssbs_state x23, x22
+	.endif
+
+	mrs	x22, elr_el1
 	stp	lr, x21, [sp, #S_LR]
 
 	/*
-- 
QUALCOMM INDIA, on behalf of Qualcomm Innovation Center, Inc. is a
member of the Code Aurora Forum, hosted by The Linux Foundation


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH] arm64: Explicitly set pstate.ssbs for el0 on kernel entry
  2019-07-09 11:22 [PATCH] arm64: Explicitly set pstate.ssbs for el0 on kernel entry Neeraj Upadhyay
@ 2019-07-09 13:08 ` Marc Zyngier
  2019-07-09 13:55   ` Mark Rutland
  2019-07-09 14:18   ` Neeraj Upadhyay
  0 siblings, 2 replies; 6+ messages in thread
From: Marc Zyngier @ 2019-07-09 13:08 UTC (permalink / raw)
  To: Neeraj Upadhyay, will, mark.rutland, julien.thierry, tglx
  Cc: gkohli, linux-arm-msm, parthd, linux-kernel, linux-arm-kernel

Hi Neeraj,

On 09/07/2019 12:22, Neeraj Upadhyay wrote:
> For cpus which do not support pstate.ssbs feature, el0
> might not retain spsr.ssbs. This is problematic, if this
> task migrates to a cpu supporting this feature, thus
> relying on its state to be correct. On kernel entry,
> explicitly set spsr.ssbs, so that speculation is enabled
> for el0, when this task migrates to a cpu supporting
> ssbs feature. Restoring state at kernel entry ensures
> that el0 ssbs state is always consistent while we are
> in el1.
> 
> As alternatives are applied by boot cpu, at the end of smp
> init, presence/absence of ssbs feature on boot cpu, is used
> for deciding, whether the capability is uniformly provided.

I've seen the same issue, but went for a slightly different
approach, see below.

> 
> Signed-off-by: Neeraj Upadhyay <neeraju@codeaurora.org>
> ---
>  arch/arm64/kernel/cpu_errata.c | 16 ++++++++++++++++
>  arch/arm64/kernel/entry.S      | 26 +++++++++++++++++++++++++-
>  2 files changed, 41 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
> index ca11ff7..c84a56d 100644
> --- a/arch/arm64/kernel/cpu_errata.c
> +++ b/arch/arm64/kernel/cpu_errata.c
> @@ -336,6 +336,22 @@ void __init arm64_enable_wa2_handling(struct alt_instr *alt,
>  		*updptr = cpu_to_le32(aarch64_insn_gen_nop());
>  }
>  
> +void __init arm64_restore_ssbs_state(struct alt_instr *alt,
> +				     __le32 *origptr, __le32 *updptr,
> +				     int nr_inst)
> +{
> +	BUG_ON(nr_inst != 1);
> +	/*
> +	 * Only restore EL0 SSBS state on EL1 entry if cpu does not
> +	 * support the capability and capability is present for at
> +	 * least one cpu and if the SSBD state allows it to
> +	 * be changed.
> +	 */
> +	if (!this_cpu_has_cap(ARM64_SSBS) && cpus_have_cap(ARM64_SSBS) &&
> +	    arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
> +		*updptr = cpu_to_le32(aarch64_insn_gen_nop());
> +}
> +
>  void arm64_set_ssbd_mitigation(bool state)
>  {
>  	if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
> diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
> index 9cdc459..7e79305 100644
> --- a/arch/arm64/kernel/entry.S
> +++ b/arch/arm64/kernel/entry.S
> @@ -143,6 +143,25 @@ alternative_cb_end
>  #endif
>  	.endm
>  
> +	// This macro updates spsr. It also corrupts the condition
> +	// codes state.
> +	.macro	restore_ssbs_state, saved_spsr, tmp
> +#ifdef CONFIG_ARM64_SSBD
> +alternative_cb	arm64_restore_ssbs_state
> +	b	.L__asm_ssbs_skip\@
> +alternative_cb_end
> +	ldr	\tmp, [tsk, #TSK_TI_FLAGS]
> +	tbnz	\tmp, #TIF_SSBD, .L__asm_ssbs_skip\@
> +	tst	\saved_spsr, #PSR_MODE32_BIT	// native task?
> +	b.ne	.L__asm_ssbs_compat\@
> +	orr	\saved_spsr, \saved_spsr, #PSR_SSBS_BIT
> +	b	.L__asm_ssbs_skip\@
> +.L__asm_ssbs_compat\@:
> +	orr	\saved_spsr, \saved_spsr, #PSR_AA32_SSBS_BIT
> +.L__asm_ssbs_skip\@:
> +#endif
> +	.endm

Although this is in keeping with the rest of entry.S (perfectly
unreadable ;-), I think we can do something a bit simpler, that
doesn't rely on patching. Also, this doesn't seem to take the
SSBD options such as ARM64_SSBD_FORCE_ENABLE into account.

> +
>  	.macro	kernel_entry, el, regsize = 64
>  	.if	\regsize == 32
>  	mov	w0, w0				// zero upper 32 bits of x0
> @@ -182,8 +201,13 @@ alternative_cb_end
>  	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
>  	/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
>  	.endif /* \el == 0 */
> -	mrs	x22, elr_el1
>  	mrs	x23, spsr_el1
> +
> +	.if	\el == 0
> +	restore_ssbs_state x23, x22
> +	.endif
> +
> +	mrs	x22, elr_el1
>  	stp	lr, x21, [sp, #S_LR]
>  
>  	/*
> 

How about the patch below?

Thanks,

	M.

From 7d4314d1ef3122d8bf56a7ef239c8c68e0c81277 Mon Sep 17 00:00:00 2001
From: Marc Zyngier <marc.zyngier@arm.com>
Date: Tue, 4 Jun 2019 17:35:18 +0100
Subject: [PATCH] arm64: Force SSBS on context switch

On a CPU that doesn't support SSBS, PSTATE[12] is RES0.  In a system
where only some of the CPUs implement SSBS, we end-up losing track of
the SSBS bit across task migration.

To address this issue, let's force the SSBS bit on context switch.

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
 arch/arm64/include/asm/processor.h | 14 ++++++++++++--
 arch/arm64/kernel/process.c        | 14 ++++++++++++++
 2 files changed, 26 insertions(+), 2 deletions(-)

diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index fd5b1a4efc70..844e2964b0f5 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -193,6 +193,16 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
 		regs->pmr_save = GIC_PRIO_IRQON;
 }
 
+static inline void set_ssbs_bit(struct pt_regs *regs)
+{
+	regs->pstate |= PSR_SSBS_BIT;
+}
+
+static inline void set_compat_ssbs_bit(struct pt_regs *regs)
+{
+	regs->pstate |= PSR_AA32_SSBS_BIT;
+}
+
 static inline void start_thread(struct pt_regs *regs, unsigned long pc,
 				unsigned long sp)
 {
@@ -200,7 +210,7 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc,
 	regs->pstate = PSR_MODE_EL0t;
 
 	if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
-		regs->pstate |= PSR_SSBS_BIT;
+		set_ssbs_bit(regs);
 
 	regs->sp = sp;
 }
@@ -219,7 +229,7 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
 #endif
 
 	if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
-		regs->pstate |= PSR_AA32_SSBS_BIT;
+		set_compat_ssbs_bit(regs);
 
 	regs->compat_sp = sp;
 }
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 9856395ccdb7..d451b3b248cf 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -442,6 +442,19 @@ void uao_thread_switch(struct task_struct *next)
 	}
 }
 
+static void ssbs_thread_switch(struct task_struct *next)
+{
+	if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE &&
+	    !test_tsk_thread_flag(next, TIF_SSBD)) {
+		struct pt_regs *regs = task_pt_regs(next);
+
+		if (compat_user_mode(regs))
+			set_compat_ssbs_bit(regs);
+		else if (user_mode(regs))
+			set_ssbs_bit(regs);
+	}
+}
+
 /*
  * We store our current task in sp_el0, which is clobbered by userspace. Keep a
  * shadow copy so that we can restore this upon entry from userspace.
@@ -471,6 +484,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
 	entry_task_switch(next);
 	uao_thread_switch(next);
 	ptrauth_thread_switch(next);
+	ssbs_thread_switch(next);
 
 	/*
 	 * Complete any pending TLB or cache maintenance on this CPU in case
-- 
2.20.1


-- 
Jazz is not dead. It just smells funny...

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH] arm64: Explicitly set pstate.ssbs for el0 on kernel entry
  2019-07-09 13:08 ` Marc Zyngier
@ 2019-07-09 13:55   ` Mark Rutland
  2019-07-09 14:18   ` Neeraj Upadhyay
  1 sibling, 0 replies; 6+ messages in thread
From: Mark Rutland @ 2019-07-09 13:55 UTC (permalink / raw)
  To: Marc Zyngier
  Cc: parthd, julien.thierry, gkohli, linux-arm-msm, linux-kernel,
	tglx, will, Neeraj Upadhyay, linux-arm-kernel

Hi Marc,

On Tue, Jul 09, 2019 at 02:08:28PM +0100, Marc Zyngier wrote:
> From 7d4314d1ef3122d8bf56a7ef239c8c68e0c81277 Mon Sep 17 00:00:00 2001
> From: Marc Zyngier <marc.zyngier@arm.com>
> Date: Tue, 4 Jun 2019 17:35:18 +0100
> Subject: [PATCH] arm64: Force SSBS on context switch
> 
> On a CPU that doesn't support SSBS, PSTATE[12] is RES0.  In a system
> where only some of the CPUs implement SSBS, we end-up losing track of
> the SSBS bit across task migration.
> 
> To address this issue, let's force the SSBS bit on context switch.
> 
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
>  arch/arm64/include/asm/processor.h | 14 ++++++++++++--
>  arch/arm64/kernel/process.c        | 14 ++++++++++++++
>  2 files changed, 26 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
> index fd5b1a4efc70..844e2964b0f5 100644
> --- a/arch/arm64/include/asm/processor.h
> +++ b/arch/arm64/include/asm/processor.h
> @@ -193,6 +193,16 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
>  		regs->pmr_save = GIC_PRIO_IRQON;
>  }
>  
> +static inline void set_ssbs_bit(struct pt_regs *regs)
> +{
> +	regs->pstate |= PSR_SSBS_BIT;
> +}
> +
> +static inline void set_compat_ssbs_bit(struct pt_regs *regs)
> +{
> +	regs->pstate |= PSR_AA32_SSBS_BIT;
> +}
> +
>  static inline void start_thread(struct pt_regs *regs, unsigned long pc,
>  				unsigned long sp)
>  {
> @@ -200,7 +210,7 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc,
>  	regs->pstate = PSR_MODE_EL0t;
>  
>  	if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
> -		regs->pstate |= PSR_SSBS_BIT;
> +		set_ssbs_bit(regs);
>  
>  	regs->sp = sp;
>  }
> @@ -219,7 +229,7 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
>  #endif
>  
>  	if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
> -		regs->pstate |= PSR_AA32_SSBS_BIT;
> +		set_compat_ssbs_bit(regs);
>  
>  	regs->compat_sp = sp;
>  }
> diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
> index 9856395ccdb7..d451b3b248cf 100644
> --- a/arch/arm64/kernel/process.c
> +++ b/arch/arm64/kernel/process.c
> @@ -442,6 +442,19 @@ void uao_thread_switch(struct task_struct *next)
>  	}
>  }
>  
> +static void ssbs_thread_switch(struct task_struct *next)
> +{
> +	if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE &&
> +	    !test_tsk_thread_flag(next, TIF_SSBD)) {
> +		struct pt_regs *regs = task_pt_regs(next);
> +
> +		if (compat_user_mode(regs))
> +			set_compat_ssbs_bit(regs);
> +		else if (user_mode(regs))
> +			set_ssbs_bit(regs);
> +	}
> +}

I think this isn't quite right, and it's not always safe to call
task_pt_regs() on a task.

For user tasks, the kernel stack looks like:

  +---------+ <=== task_stack_page(tsk) + THREAD_SIZE;
  |         |
  | pt_regs |
  |         |
  +---------+ <=== task_pt_regs(tsk)
  |         |
  |         |
  |         |
  |  stack  |
  |         |
  |         |
  |         |
  +---------+ <=== task_stack_page(tsk)

... where:

#define task_pt_regs(p) \
	((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)

... and in copy_thread() we initialize a new tsk's SP to start at
task_pt_regs(tsk).

However, in __cpu_up() we start the idle threads stacks without the
pt_regs bias, at task_stack_page(tsk) + THREAD_SIZE. Likewise for the
initial thread in __primary_switched(). So task_pt_regs(idle) will
return an aliasing portion of stack, rather than a pt_regs.

So when switching to those, we'll look at unrelated stack, and corrupt
it.

We could add a pt_regs bias to those to prevent stack corruption, though
assuming stacks are zero-initialized, user_mode(task_pt_regs(tsk))
should always be true, since:

#define PSR_MODE_EL0t      0x00000000

#define user_mode(regs) \
	(((regs)->pstate & PSR_MODE_MASK) == PSR_MODE_EL0t)

We could:

(a) Check for PF_KTRHEAD in ssbs_thread_switch(), and skip when this is set.

(b) Add the pt_regs bias to all stacks, and not care about the pointless
    manipulation of the junk regs.

(c) Make task_pt_regs() return NULL for kthreads, and fix up the fallout. I'm
    very tempted to do this longer term even if we do (a) or (b) for now.

Thanks,
Mark.

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] arm64: Explicitly set pstate.ssbs for el0 on kernel entry
  2019-07-09 13:08 ` Marc Zyngier
  2019-07-09 13:55   ` Mark Rutland
@ 2019-07-09 14:18   ` Neeraj Upadhyay
  2019-07-09 14:22     ` Marc Zyngier
  1 sibling, 1 reply; 6+ messages in thread
From: Neeraj Upadhyay @ 2019-07-09 14:18 UTC (permalink / raw)
  To: Marc Zyngier, will, mark.rutland, julien.thierry, tglx
  Cc: gkohli, linux-arm-msm, parthd, linux-kernel, linux-arm-kernel

Hi Marc,

On 7/9/19 6:38 PM, Marc Zyngier wrote:
> Hi Neeraj,
>
> On 09/07/2019 12:22, Neeraj Upadhyay wrote:
>> For cpus which do not support pstate.ssbs feature, el0
>> might not retain spsr.ssbs. This is problematic, if this
>> task migrates to a cpu supporting this feature, thus
>> relying on its state to be correct. On kernel entry,
>> explicitly set spsr.ssbs, so that speculation is enabled
>> for el0, when this task migrates to a cpu supporting
>> ssbs feature. Restoring state at kernel entry ensures
>> that el0 ssbs state is always consistent while we are
>> in el1.
>>
>> As alternatives are applied by boot cpu, at the end of smp
>> init, presence/absence of ssbs feature on boot cpu, is used
>> for deciding, whether the capability is uniformly provided.
> I've seen the same issue, but went for a slightly different
> approach, see below.
>
>> Signed-off-by: Neeraj Upadhyay <neeraju@codeaurora.org>
>> ---
>>   arch/arm64/kernel/cpu_errata.c | 16 ++++++++++++++++
>>   arch/arm64/kernel/entry.S      | 26 +++++++++++++++++++++++++-
>>   2 files changed, 41 insertions(+), 1 deletion(-)
>>
>> diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
>> index ca11ff7..c84a56d 100644
>> --- a/arch/arm64/kernel/cpu_errata.c
>> +++ b/arch/arm64/kernel/cpu_errata.c
>> @@ -336,6 +336,22 @@ void __init arm64_enable_wa2_handling(struct alt_instr *alt,
>>   		*updptr = cpu_to_le32(aarch64_insn_gen_nop());
>>   }
>>   
>> +void __init arm64_restore_ssbs_state(struct alt_instr *alt,
>> +				     __le32 *origptr, __le32 *updptr,
>> +				     int nr_inst)
>> +{
>> +	BUG_ON(nr_inst != 1);
>> +	/*
>> +	 * Only restore EL0 SSBS state on EL1 entry if cpu does not
>> +	 * support the capability and capability is present for at
>> +	 * least one cpu and if the SSBD state allows it to
>> +	 * be changed.
>> +	 */
>> +	if (!this_cpu_has_cap(ARM64_SSBS) && cpus_have_cap(ARM64_SSBS) &&
>> +	    arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
>> +		*updptr = cpu_to_le32(aarch64_insn_gen_nop());
>> +}
>> +
>>   void arm64_set_ssbd_mitigation(bool state)
>>   {
>>   	if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
>> diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
>> index 9cdc459..7e79305 100644
>> --- a/arch/arm64/kernel/entry.S
>> +++ b/arch/arm64/kernel/entry.S
>> @@ -143,6 +143,25 @@ alternative_cb_end
>>   #endif
>>   	.endm
>>   
>> +	// This macro updates spsr. It also corrupts the condition
>> +	// codes state.
>> +	.macro	restore_ssbs_state, saved_spsr, tmp
>> +#ifdef CONFIG_ARM64_SSBD
>> +alternative_cb	arm64_restore_ssbs_state
>> +	b	.L__asm_ssbs_skip\@
>> +alternative_cb_end
>> +	ldr	\tmp, [tsk, #TSK_TI_FLAGS]
>> +	tbnz	\tmp, #TIF_SSBD, .L__asm_ssbs_skip\@
>> +	tst	\saved_spsr, #PSR_MODE32_BIT	// native task?
>> +	b.ne	.L__asm_ssbs_compat\@
>> +	orr	\saved_spsr, \saved_spsr, #PSR_SSBS_BIT
>> +	b	.L__asm_ssbs_skip\@
>> +.L__asm_ssbs_compat\@:
>> +	orr	\saved_spsr, \saved_spsr, #PSR_AA32_SSBS_BIT
>> +.L__asm_ssbs_skip\@:
>> +#endif
>> +	.endm
> Although this is in keeping with the rest of entry.S (perfectly
> unreadable ;-), I think we can do something a bit simpler, that
> doesn't rely on patching. Also, this doesn't seem to take the
> SSBD options such as ARM64_SSBD_FORCE_ENABLE into account.

arm64_restore_ssbs_state has a check for ARM64_SSBD_FORCE_ENABLE,

does that look wrong?

>
>> +
>>   	.macro	kernel_entry, el, regsize = 64
>>   	.if	\regsize == 32
>>   	mov	w0, w0				// zero upper 32 bits of x0
>> @@ -182,8 +201,13 @@ alternative_cb_end
>>   	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
>>   	/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
>>   	.endif /* \el == 0 */
>> -	mrs	x22, elr_el1
>>   	mrs	x23, spsr_el1
>> +
>> +	.if	\el == 0
>> +	restore_ssbs_state x23, x22
>> +	.endif
>> +
>> +	mrs	x22, elr_el1
>>   	stp	lr, x21, [sp, #S_LR]
>>   
>>   	/*
>>
> How about the patch below?

Looks good; was just going to mention PF_KTHREAD check, but Mark R. has 
already

given detailed information about it.


Thanks

Neeraj

>
> Thanks,
>
> 	M.
>
>  From 7d4314d1ef3122d8bf56a7ef239c8c68e0c81277 Mon Sep 17 00:00:00 2001
> From: Marc Zyngier <marc.zyngier@arm.com>
> Date: Tue, 4 Jun 2019 17:35:18 +0100
> Subject: [PATCH] arm64: Force SSBS on context switch
>
> On a CPU that doesn't support SSBS, PSTATE[12] is RES0.  In a system
> where only some of the CPUs implement SSBS, we end-up losing track of
> the SSBS bit across task migration.
>
> To address this issue, let's force the SSBS bit on context switch.
>
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
>   arch/arm64/include/asm/processor.h | 14 ++++++++++++--
>   arch/arm64/kernel/process.c        | 14 ++++++++++++++
>   2 files changed, 26 insertions(+), 2 deletions(-)
>
> diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
> index fd5b1a4efc70..844e2964b0f5 100644
> --- a/arch/arm64/include/asm/processor.h
> +++ b/arch/arm64/include/asm/processor.h
> @@ -193,6 +193,16 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
>   		regs->pmr_save = GIC_PRIO_IRQON;
>   }
>   
> +static inline void set_ssbs_bit(struct pt_regs *regs)
> +{
> +	regs->pstate |= PSR_SSBS_BIT;
> +}
> +
> +static inline void set_compat_ssbs_bit(struct pt_regs *regs)
> +{
> +	regs->pstate |= PSR_AA32_SSBS_BIT;
> +}
> +
>   static inline void start_thread(struct pt_regs *regs, unsigned long pc,
>   				unsigned long sp)
>   {
> @@ -200,7 +210,7 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc,
>   	regs->pstate = PSR_MODE_EL0t;
>   
>   	if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
> -		regs->pstate |= PSR_SSBS_BIT;
> +		set_ssbs_bit(regs);
>   
>   	regs->sp = sp;
>   }
> @@ -219,7 +229,7 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
>   #endif
>   
>   	if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
> -		regs->pstate |= PSR_AA32_SSBS_BIT;
> +		set_compat_ssbs_bit(regs);
>   
>   	regs->compat_sp = sp;
>   }
> diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
> index 9856395ccdb7..d451b3b248cf 100644
> --- a/arch/arm64/kernel/process.c
> +++ b/arch/arm64/kernel/process.c
> @@ -442,6 +442,19 @@ void uao_thread_switch(struct task_struct *next)
>   	}
>   }
>   
> +static void ssbs_thread_switch(struct task_struct *next)
> +{
> +	if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE &&
> +	    !test_tsk_thread_flag(next, TIF_SSBD)) {
> +		struct pt_regs *regs = task_pt_regs(next);
> +
> +		if (compat_user_mode(regs))
> +			set_compat_ssbs_bit(regs);
> +		else if (user_mode(regs))
> +			set_ssbs_bit(regs);
> +	}
> +}
> +
>   /*
>    * We store our current task in sp_el0, which is clobbered by userspace. Keep a
>    * shadow copy so that we can restore this upon entry from userspace.
> @@ -471,6 +484,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
>   	entry_task_switch(next);
>   	uao_thread_switch(next);
>   	ptrauth_thread_switch(next);
> +	ssbs_thread_switch(next);
>   
>   	/*
>   	 * Complete any pending TLB or cache maintenance on this CPU in case

-- 
QUALCOMM INDIA, on behalf of Qualcomm Innovation Center, Inc. is a
member of the Code Aurora Forum, hosted by The Linux Foundation


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] arm64: Explicitly set pstate.ssbs for el0 on kernel entry
  2019-07-09 14:18   ` Neeraj Upadhyay
@ 2019-07-09 14:22     ` Marc Zyngier
  2019-07-19  4:53       ` Neeraj Upadhyay
  0 siblings, 1 reply; 6+ messages in thread
From: Marc Zyngier @ 2019-07-09 14:22 UTC (permalink / raw)
  To: Neeraj Upadhyay, will, mark.rutland, julien.thierry, tglx
  Cc: gkohli, linux-arm-msm, parthd, linux-kernel, linux-arm-kernel

On 09/07/2019 15:18, Neeraj Upadhyay wrote:
> Hi Marc,
> 
> On 7/9/19 6:38 PM, Marc Zyngier wrote:
>> Hi Neeraj,
>>
>> On 09/07/2019 12:22, Neeraj Upadhyay wrote:
>>> For cpus which do not support pstate.ssbs feature, el0
>>> might not retain spsr.ssbs. This is problematic, if this
>>> task migrates to a cpu supporting this feature, thus
>>> relying on its state to be correct. On kernel entry,
>>> explicitly set spsr.ssbs, so that speculation is enabled
>>> for el0, when this task migrates to a cpu supporting
>>> ssbs feature. Restoring state at kernel entry ensures
>>> that el0 ssbs state is always consistent while we are
>>> in el1.
>>>
>>> As alternatives are applied by boot cpu, at the end of smp
>>> init, presence/absence of ssbs feature on boot cpu, is used
>>> for deciding, whether the capability is uniformly provided.
>> I've seen the same issue, but went for a slightly different
>> approach, see below.
>>
>>> Signed-off-by: Neeraj Upadhyay <neeraju@codeaurora.org>
>>> ---
>>>   arch/arm64/kernel/cpu_errata.c | 16 ++++++++++++++++
>>>   arch/arm64/kernel/entry.S      | 26 +++++++++++++++++++++++++-
>>>   2 files changed, 41 insertions(+), 1 deletion(-)
>>>
>>> diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
>>> index ca11ff7..c84a56d 100644
>>> --- a/arch/arm64/kernel/cpu_errata.c
>>> +++ b/arch/arm64/kernel/cpu_errata.c
>>> @@ -336,6 +336,22 @@ void __init arm64_enable_wa2_handling(struct alt_instr *alt,
>>>   		*updptr = cpu_to_le32(aarch64_insn_gen_nop());
>>>   }
>>>   
>>> +void __init arm64_restore_ssbs_state(struct alt_instr *alt,
>>> +				     __le32 *origptr, __le32 *updptr,
>>> +				     int nr_inst)
>>> +{
>>> +	BUG_ON(nr_inst != 1);
>>> +	/*
>>> +	 * Only restore EL0 SSBS state on EL1 entry if cpu does not
>>> +	 * support the capability and capability is present for at
>>> +	 * least one cpu and if the SSBD state allows it to
>>> +	 * be changed.
>>> +	 */
>>> +	if (!this_cpu_has_cap(ARM64_SSBS) && cpus_have_cap(ARM64_SSBS) &&
>>> +	    arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
>>> +		*updptr = cpu_to_le32(aarch64_insn_gen_nop());
>>> +}
>>> +
>>>   void arm64_set_ssbd_mitigation(bool state)
>>>   {
>>>   	if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
>>> diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
>>> index 9cdc459..7e79305 100644
>>> --- a/arch/arm64/kernel/entry.S
>>> +++ b/arch/arm64/kernel/entry.S
>>> @@ -143,6 +143,25 @@ alternative_cb_end
>>>   #endif
>>>   	.endm
>>>   
>>> +	// This macro updates spsr. It also corrupts the condition
>>> +	// codes state.
>>> +	.macro	restore_ssbs_state, saved_spsr, tmp
>>> +#ifdef CONFIG_ARM64_SSBD
>>> +alternative_cb	arm64_restore_ssbs_state
>>> +	b	.L__asm_ssbs_skip\@
>>> +alternative_cb_end
>>> +	ldr	\tmp, [tsk, #TSK_TI_FLAGS]
>>> +	tbnz	\tmp, #TIF_SSBD, .L__asm_ssbs_skip\@
>>> +	tst	\saved_spsr, #PSR_MODE32_BIT	// native task?
>>> +	b.ne	.L__asm_ssbs_compat\@
>>> +	orr	\saved_spsr, \saved_spsr, #PSR_SSBS_BIT
>>> +	b	.L__asm_ssbs_skip\@
>>> +.L__asm_ssbs_compat\@:
>>> +	orr	\saved_spsr, \saved_spsr, #PSR_AA32_SSBS_BIT
>>> +.L__asm_ssbs_skip\@:
>>> +#endif
>>> +	.endm
>> Although this is in keeping with the rest of entry.S (perfectly
>> unreadable ;-), I think we can do something a bit simpler, that
>> doesn't rely on patching. Also, this doesn't seem to take the
>> SSBD options such as ARM64_SSBD_FORCE_ENABLE into account.
> 
> arm64_restore_ssbs_state has a check for ARM64_SSBD_FORCE_ENABLE,
> 
> does that look wrong?

No, I just focused on the rest of the asm code and missed it, apologies.

> 
>>
>>> +
>>>   	.macro	kernel_entry, el, regsize = 64
>>>   	.if	\regsize == 32
>>>   	mov	w0, w0				// zero upper 32 bits of x0
>>> @@ -182,8 +201,13 @@ alternative_cb_end
>>>   	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
>>>   	/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
>>>   	.endif /* \el == 0 */
>>> -	mrs	x22, elr_el1
>>>   	mrs	x23, spsr_el1
>>> +
>>> +	.if	\el == 0
>>> +	restore_ssbs_state x23, x22
>>> +	.endif
>>> +
>>> +	mrs	x22, elr_el1
>>>   	stp	lr, x21, [sp, #S_LR]
>>>   
>>>   	/*
>>>
>> How about the patch below?
> 
> Looks good; was just going to mention PF_KTHREAD check, but Mark R. has 
> already
> 
> given detailed information about it.

Yup, well spotted. I'll respin it shortly and we can then work out
whether that's really a better approach.

Thanks,

	M.
-- 
Jazz is not dead. It just smells funny...

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] arm64: Explicitly set pstate.ssbs for el0 on kernel entry
  2019-07-09 14:22     ` Marc Zyngier
@ 2019-07-19  4:53       ` Neeraj Upadhyay
  0 siblings, 0 replies; 6+ messages in thread
From: Neeraj Upadhyay @ 2019-07-19  4:53 UTC (permalink / raw)
  To: Marc Zyngier, will, mark.rutland, julien.thierry, tglx
  Cc: tsoni, parthd, gkohli, linux-arm-msm, linux-kernel, linux-arm-kernel

Hi Marc,


On 7/9/19 7:52 PM, Marc Zyngier wrote:
> On 09/07/2019 15:18, Neeraj Upadhyay wrote:
>> Hi Marc,
>>
>> On 7/9/19 6:38 PM, Marc Zyngier wrote:
>>> Hi Neeraj,
>>>
>>> On 09/07/2019 12:22, Neeraj Upadhyay wrote:
>>>> For cpus which do not support pstate.ssbs feature, el0
>>>> might not retain spsr.ssbs. This is problematic, if this
>>>> task migrates to a cpu supporting this feature, thus
>>>> relying on its state to be correct. On kernel entry,
>>>> explicitly set spsr.ssbs, so that speculation is enabled
>>>> for el0, when this task migrates to a cpu supporting
>>>> ssbs feature. Restoring state at kernel entry ensures
>>>> that el0 ssbs state is always consistent while we are
>>>> in el1.
>>>>
>>>> As alternatives are applied by boot cpu, at the end of smp
>>>> init, presence/absence of ssbs feature on boot cpu, is used
>>>> for deciding, whether the capability is uniformly provided.
>>> I've seen the same issue, but went for a slightly different
>>> approach, see below.
>>>
>>>> Signed-off-by: Neeraj Upadhyay <neeraju@codeaurora.org>
>>>> ---
>>>>    arch/arm64/kernel/cpu_errata.c | 16 ++++++++++++++++
>>>>    arch/arm64/kernel/entry.S      | 26 +++++++++++++++++++++++++-
>>>>    2 files changed, 41 insertions(+), 1 deletion(-)
>>>>
>>>> diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
>>>> index ca11ff7..c84a56d 100644
>>>> --- a/arch/arm64/kernel/cpu_errata.c
>>>> +++ b/arch/arm64/kernel/cpu_errata.c
>>>> @@ -336,6 +336,22 @@ void __init arm64_enable_wa2_handling(struct alt_instr *alt,
>>>>    		*updptr = cpu_to_le32(aarch64_insn_gen_nop());
>>>>    }
>>>>    
>>>> +void __init arm64_restore_ssbs_state(struct alt_instr *alt,
>>>> +				     __le32 *origptr, __le32 *updptr,
>>>> +				     int nr_inst)
>>>> +{
>>>> +	BUG_ON(nr_inst != 1);
>>>> +	/*
>>>> +	 * Only restore EL0 SSBS state on EL1 entry if cpu does not
>>>> +	 * support the capability and capability is present for at
>>>> +	 * least one cpu and if the SSBD state allows it to
>>>> +	 * be changed.
>>>> +	 */
>>>> +	if (!this_cpu_has_cap(ARM64_SSBS) && cpus_have_cap(ARM64_SSBS) &&
>>>> +	    arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
>>>> +		*updptr = cpu_to_le32(aarch64_insn_gen_nop());
>>>> +}
>>>> +
>>>>    void arm64_set_ssbd_mitigation(bool state)
>>>>    {
>>>>    	if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
>>>> diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
>>>> index 9cdc459..7e79305 100644
>>>> --- a/arch/arm64/kernel/entry.S
>>>> +++ b/arch/arm64/kernel/entry.S
>>>> @@ -143,6 +143,25 @@ alternative_cb_end
>>>>    #endif
>>>>    	.endm
>>>>    
>>>> +	// This macro updates spsr. It also corrupts the condition
>>>> +	// codes state.
>>>> +	.macro	restore_ssbs_state, saved_spsr, tmp
>>>> +#ifdef CONFIG_ARM64_SSBD
>>>> +alternative_cb	arm64_restore_ssbs_state
>>>> +	b	.L__asm_ssbs_skip\@
>>>> +alternative_cb_end
>>>> +	ldr	\tmp, [tsk, #TSK_TI_FLAGS]
>>>> +	tbnz	\tmp, #TIF_SSBD, .L__asm_ssbs_skip\@
>>>> +	tst	\saved_spsr, #PSR_MODE32_BIT	// native task?
>>>> +	b.ne	.L__asm_ssbs_compat\@
>>>> +	orr	\saved_spsr, \saved_spsr, #PSR_SSBS_BIT
>>>> +	b	.L__asm_ssbs_skip\@
>>>> +.L__asm_ssbs_compat\@:
>>>> +	orr	\saved_spsr, \saved_spsr, #PSR_AA32_SSBS_BIT
>>>> +.L__asm_ssbs_skip\@:
>>>> +#endif
>>>> +	.endm
>>> Although this is in keeping with the rest of entry.S (perfectly
>>> unreadable ;-), I think we can do something a bit simpler, that
>>> doesn't rely on patching. Also, this doesn't seem to take the
>>> SSBD options such as ARM64_SSBD_FORCE_ENABLE into account.
>> arm64_restore_ssbs_state has a check for ARM64_SSBD_FORCE_ENABLE,
>>
>> does that look wrong?
> No, I just focused on the rest of the asm code and missed it, apologies.
>
>>>> +
>>>>    	.macro	kernel_entry, el, regsize = 64
>>>>    	.if	\regsize == 32
>>>>    	mov	w0, w0				// zero upper 32 bits of x0
>>>> @@ -182,8 +201,13 @@ alternative_cb_end
>>>>    	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
>>>>    	/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
>>>>    	.endif /* \el == 0 */
>>>> -	mrs	x22, elr_el1
>>>>    	mrs	x23, spsr_el1
>>>> +
>>>> +	.if	\el == 0
>>>> +	restore_ssbs_state x23, x22
>>>> +	.endif
>>>> +
>>>> +	mrs	x22, elr_el1
>>>>    	stp	lr, x21, [sp, #S_LR]
>>>>    
>>>>    	/*
>>>>
>>> How about the patch below?
>> Looks good; was just going to mention PF_KTHREAD check, but Mark R. has
>> already
>>
>> given detailed information about it.
> Yup, well spotted. I'll respin it shortly and we can then work out
> whether that's really a better approach.

Did you get chance to recheck it?


Thanks

Neeraj

>
> Thanks,
>
> 	M.

-- 
QUALCOMM INDIA, on behalf of Qualcomm Innovation Center, Inc. is a
member of the Code Aurora Forum, hosted by The Linux Foundation


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2019-07-19  4:54 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-07-09 11:22 [PATCH] arm64: Explicitly set pstate.ssbs for el0 on kernel entry Neeraj Upadhyay
2019-07-09 13:08 ` Marc Zyngier
2019-07-09 13:55   ` Mark Rutland
2019-07-09 14:18   ` Neeraj Upadhyay
2019-07-09 14:22     ` Marc Zyngier
2019-07-19  4:53       ` Neeraj Upadhyay

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).