linux-arm-kernel.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] arm64: Force SSBS on context switch
@ 2019-07-19 17:16 Marc Zyngier
  2019-07-19 18:16 ` Neeraj Upadhyay
  2019-07-22  5:53 ` Anshuman Khandual
  0 siblings, 2 replies; 5+ messages in thread
From: Marc Zyngier @ 2019-07-19 17:16 UTC (permalink / raw)
  To: Will Deacon, Catalin Marinas, Mark Rutland, Neeraj Upadhyay
  Cc: linux-arm-kernel

On a CPU that doesn't support SSBS, PSTATE[12] is RES0.  In a system
where only some of the CPUs implement SSBS, we end-up losing track of
the SSBS bit across task migration.

To address this issue, let's force the SSBS bit on context switch.

Fixes: 8f04e8e6e29c ("arm64: ssbd: Add support for PSTATE.SSBS rather than trapping to EL3")
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
 arch/arm64/include/asm/processor.h | 14 ++++++++++++--
 arch/arm64/kernel/process.c        | 15 +++++++++++++++
 2 files changed, 27 insertions(+), 2 deletions(-)

diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index fd5b1a4efc70..844e2964b0f5 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -193,6 +193,16 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
 		regs->pmr_save = GIC_PRIO_IRQON;
 }
 
+static inline void set_ssbs_bit(struct pt_regs *regs)
+{
+	regs->pstate |= PSR_SSBS_BIT;
+}
+
+static inline void set_compat_ssbs_bit(struct pt_regs *regs)
+{
+	regs->pstate |= PSR_AA32_SSBS_BIT;
+}
+
 static inline void start_thread(struct pt_regs *regs, unsigned long pc,
 				unsigned long sp)
 {
@@ -200,7 +210,7 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc,
 	regs->pstate = PSR_MODE_EL0t;
 
 	if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
-		regs->pstate |= PSR_SSBS_BIT;
+		set_ssbs_bit(regs);
 
 	regs->sp = sp;
 }
@@ -219,7 +229,7 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
 #endif
 
 	if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
-		regs->pstate |= PSR_AA32_SSBS_BIT;
+		set_compat_ssbs_bit(regs);
 
 	regs->compat_sp = sp;
 }
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 9856395ccdb7..2fb91c8728ae 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -442,6 +442,20 @@ void uao_thread_switch(struct task_struct *next)
 	}
 }
 
+static void ssbs_thread_switch(struct task_struct *next)
+{
+	if (likely(!(next->flags & PF_KTHREAD)) &&
+	    arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE &&
+	    !test_tsk_thread_flag(next, TIF_SSBD)) {
+		struct pt_regs *regs = task_pt_regs(next);
+
+		if (compat_user_mode(regs))
+			set_compat_ssbs_bit(regs);
+		else if (user_mode(regs))
+			set_ssbs_bit(regs);
+	}
+}
+
 /*
  * We store our current task in sp_el0, which is clobbered by userspace. Keep a
  * shadow copy so that we can restore this upon entry from userspace.
@@ -471,6 +485,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
 	entry_task_switch(next);
 	uao_thread_switch(next);
 	ptrauth_thread_switch(next);
+	ssbs_thread_switch(next);
 
 	/*
 	 * Complete any pending TLB or cache maintenance on this CPU in case
-- 
2.20.1


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH] arm64: Force SSBS on context switch
  2019-07-19 17:16 [PATCH] arm64: Force SSBS on context switch Marc Zyngier
@ 2019-07-19 18:16 ` Neeraj Upadhyay
  2019-07-22 13:42   ` Marc Zyngier
  2019-07-22  5:53 ` Anshuman Khandual
  1 sibling, 1 reply; 5+ messages in thread
From: Neeraj Upadhyay @ 2019-07-19 18:16 UTC (permalink / raw)
  To: Marc Zyngier, Will Deacon, Catalin Marinas, Mark Rutland; +Cc: linux-arm-kernel

Hi Marc,

Patch looks good to me. Thanks!

On 7/19/19 10:46 PM, Marc Zyngier wrote:
> On a CPU that doesn't support SSBS, PSTATE[12] is RES0.  In a system
> where only some of the CPUs implement SSBS, we end-up losing track of
> the SSBS bit across task migration.
>
> To address this issue, let's force the SSBS bit on context switch.
>
> Fixes: 8f04e8e6e29c ("arm64: ssbd: Add support for PSTATE.SSBS rather than trapping to EL3")
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
>   arch/arm64/include/asm/processor.h | 14 ++++++++++++--
>   arch/arm64/kernel/process.c        | 15 +++++++++++++++
>   2 files changed, 27 insertions(+), 2 deletions(-)
>
> diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
> index fd5b1a4efc70..844e2964b0f5 100644
> --- a/arch/arm64/include/asm/processor.h
> +++ b/arch/arm64/include/asm/processor.h
> @@ -193,6 +193,16 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
>   		regs->pmr_save = GIC_PRIO_IRQON;
>   }
>   
> +static inline void set_ssbs_bit(struct pt_regs *regs)
> +{
> +	regs->pstate |= PSR_SSBS_BIT;
> +}
> +
> +static inline void set_compat_ssbs_bit(struct pt_regs *regs)
> +{
> +	regs->pstate |= PSR_AA32_SSBS_BIT;
> +}
> +
>   static inline void start_thread(struct pt_regs *regs, unsigned long pc,
>   				unsigned long sp)
>   {
> @@ -200,7 +210,7 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc,
>   	regs->pstate = PSR_MODE_EL0t;
>   
>   	if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
> -		regs->pstate |= PSR_SSBS_BIT;
> +		set_ssbs_bit(regs);
>   
>   	regs->sp = sp;
>   }
> @@ -219,7 +229,7 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
>   #endif
>   
>   	if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
> -		regs->pstate |= PSR_AA32_SSBS_BIT;
> +		set_compat_ssbs_bit(regs);
>   
>   	regs->compat_sp = sp;
>   }
> diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
> index 9856395ccdb7..2fb91c8728ae 100644
> --- a/arch/arm64/kernel/process.c
> +++ b/arch/arm64/kernel/process.c
> @@ -442,6 +442,20 @@ void uao_thread_switch(struct task_struct *next)
>   	}
>   }
>   
> +static void ssbs_thread_switch(struct task_struct *next)
> +{
> +	if (likely(!(next->flags & PF_KTHREAD)) &&
> +	    arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE &&
> +	    !test_tsk_thread_flag(next, TIF_SSBD)) {
> +		struct pt_regs *regs = task_pt_regs(next);
> +
> +		if (compat_user_mode(regs))
> +			set_compat_ssbs_bit(regs);
> +		else if (user_mode(regs))
> +			set_ssbs_bit(regs);
> +	}
> +}
> +

Minor: Can also modify copy_thread() to use set_ssbs_bit(childregs)


Thanks

Neeraj

>   /*
>    * We store our current task in sp_el0, which is clobbered by userspace. Keep a
>    * shadow copy so that we can restore this upon entry from userspace.
> @@ -471,6 +485,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
>   	entry_task_switch(next);
>   	uao_thread_switch(next);
>   	ptrauth_thread_switch(next);
> +	ssbs_thread_switch(next);
>   
>   	/*
>   	 * Complete any pending TLB or cache maintenance on this CPU in case

-- 
QUALCOMM INDIA, on behalf of Qualcomm Innovation Center, Inc. is a
member of the Code Aurora Forum, hosted by The Linux Foundation


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] arm64: Force SSBS on context switch
  2019-07-19 17:16 [PATCH] arm64: Force SSBS on context switch Marc Zyngier
  2019-07-19 18:16 ` Neeraj Upadhyay
@ 2019-07-22  5:53 ` Anshuman Khandual
  2019-07-22  7:05   ` Marc Zyngier
  1 sibling, 1 reply; 5+ messages in thread
From: Anshuman Khandual @ 2019-07-22  5:53 UTC (permalink / raw)
  To: Marc Zyngier, Will Deacon, Catalin Marinas, Mark Rutland,
	Neeraj Upadhyay
  Cc: linux-arm-kernel



On 07/19/2019 10:46 PM, Marc Zyngier wrote:
> +static void ssbs_thread_switch(struct task_struct *next)
> +{
> +	if (likely(!(next->flags & PF_KTHREAD)) &&
> +	    arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE &&
> +	    !test_tsk_thread_flag(next, TIF_SSBD)) {
> +		struct pt_regs *regs = task_pt_regs(next);
> +
> +		if (compat_user_mode(regs))
> +			set_compat_ssbs_bit(regs);
> +		else if (user_mode(regs))
> +			set_ssbs_bit(regs);
> +	}
> +}

While still trying not to get confused between SSBD/SSBS and possible
inverted polarity between them I assume that the cleared flag TIF_SSBD
on the next user thread prompts setting SSBS bit on it's regs->pstate
for later.

Now when this next task moves to a non-supporting CPU, restoring SSBS
bit from it's stored regs->pstate is inconsequential because its RES0
on that CPU's pstate. But when the next CPU supports SSBS it will just
enable it. SSBD/SSBS context needs to be moved from thread flags into
the saved pstate. Is that correct ?

Also the commit message should include some description about how SSBS
bit was getting lost during task migration.

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] arm64: Force SSBS on context switch
  2019-07-22  5:53 ` Anshuman Khandual
@ 2019-07-22  7:05   ` Marc Zyngier
  0 siblings, 0 replies; 5+ messages in thread
From: Marc Zyngier @ 2019-07-22  7:05 UTC (permalink / raw)
  To: Anshuman Khandual
  Cc: Mark Rutland, Catalin Marinas, Will Deacon, Neeraj Upadhyay,
	linux-arm-kernel

On Mon, 22 Jul 2019 06:53:24 +0100,
Anshuman Khandual <anshuman.khandual@arm.com> wrote:
> 
> 
> 
> On 07/19/2019 10:46 PM, Marc Zyngier wrote:
> > +static void ssbs_thread_switch(struct task_struct *next)
> > +{
> > +	if (likely(!(next->flags & PF_KTHREAD)) &&
> > +	    arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE &&
> > +	    !test_tsk_thread_flag(next, TIF_SSBD)) {
> > +		struct pt_regs *regs = task_pt_regs(next);
> > +
> > +		if (compat_user_mode(regs))
> > +			set_compat_ssbs_bit(regs);
> > +		else if (user_mode(regs))
> > +			set_ssbs_bit(regs);
> > +	}
> > +}
> 
> While still trying not to get confused between SSBD/SSBS and possible
> inverted polarity between them I assume that the cleared flag TIF_SSBD
> on the next user thread prompts setting SSBS bit on it's regs->pstate
> for later.

You need to read this patch in conjunction with 8f04e8e6e29c, which
gives the required context.

> Now when this next task moves to a non-supporting CPU, restoring SSBS
> bit from it's stored regs->pstate is inconsequential because its RES0
> on that CPU's pstate. But when the next CPU supports SSBS it will just
> enable it. SSBD/SSBS context needs to be moved from thread flags into
> the saved pstate. Is that correct ?

If what you suggest is a migration from a non-SSBS CPU to a SSBS CPU,
then yes, this is correct.

> Also the commit message should include some description about how SSBS
> bit was getting lost during task migration.

I believe it does. It you think it could be improved, please suggest
what you'd like to see there.

Thanks,

	M.

-- 
Jazz is not dead, it just smells funny.

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] arm64: Force SSBS on context switch
  2019-07-19 18:16 ` Neeraj Upadhyay
@ 2019-07-22 13:42   ` Marc Zyngier
  0 siblings, 0 replies; 5+ messages in thread
From: Marc Zyngier @ 2019-07-22 13:42 UTC (permalink / raw)
  To: Neeraj Upadhyay, Will Deacon, Catalin Marinas, Mark Rutland
  Cc: linux-arm-kernel

On 19/07/2019 19:16, Neeraj Upadhyay wrote:
> Hi Marc,
> 
> Patch looks good to me. Thanks!
> 
> On 7/19/19 10:46 PM, Marc Zyngier wrote:
>> On a CPU that doesn't support SSBS, PSTATE[12] is RES0.  In a system
>> where only some of the CPUs implement SSBS, we end-up losing track of
>> the SSBS bit across task migration.
>>
>> To address this issue, let's force the SSBS bit on context switch.
>>
>> Fixes: 8f04e8e6e29c ("arm64: ssbd: Add support for PSTATE.SSBS rather than trapping to EL3")
>> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
>> ---
>>   arch/arm64/include/asm/processor.h | 14 ++++++++++++--
>>   arch/arm64/kernel/process.c        | 15 +++++++++++++++
>>   2 files changed, 27 insertions(+), 2 deletions(-)
>>
>> diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
>> index fd5b1a4efc70..844e2964b0f5 100644
>> --- a/arch/arm64/include/asm/processor.h
>> +++ b/arch/arm64/include/asm/processor.h
>> @@ -193,6 +193,16 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
>>   		regs->pmr_save = GIC_PRIO_IRQON;
>>   }
>>   
>> +static inline void set_ssbs_bit(struct pt_regs *regs)
>> +{
>> +	regs->pstate |= PSR_SSBS_BIT;
>> +}
>> +
>> +static inline void set_compat_ssbs_bit(struct pt_regs *regs)
>> +{
>> +	regs->pstate |= PSR_AA32_SSBS_BIT;
>> +}
>> +
>>   static inline void start_thread(struct pt_regs *regs, unsigned long pc,
>>   				unsigned long sp)
>>   {
>> @@ -200,7 +210,7 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc,
>>   	regs->pstate = PSR_MODE_EL0t;
>>   
>>   	if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
>> -		regs->pstate |= PSR_SSBS_BIT;
>> +		set_ssbs_bit(regs);
>>   
>>   	regs->sp = sp;
>>   }
>> @@ -219,7 +229,7 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
>>   #endif
>>   
>>   	if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
>> -		regs->pstate |= PSR_AA32_SSBS_BIT;
>> +		set_compat_ssbs_bit(regs);
>>   
>>   	regs->compat_sp = sp;
>>   }
>> diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
>> index 9856395ccdb7..2fb91c8728ae 100644
>> --- a/arch/arm64/kernel/process.c
>> +++ b/arch/arm64/kernel/process.c
>> @@ -442,6 +442,20 @@ void uao_thread_switch(struct task_struct *next)
>>   	}
>>   }
>>   
>> +static void ssbs_thread_switch(struct task_struct *next)
>> +{
>> +	if (likely(!(next->flags & PF_KTHREAD)) &&
>> +	    arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE &&
>> +	    !test_tsk_thread_flag(next, TIF_SSBD)) {
>> +		struct pt_regs *regs = task_pt_regs(next);
>> +
>> +		if (compat_user_mode(regs))
>> +			set_compat_ssbs_bit(regs);
>> +		else if (user_mode(regs))
>> +			set_ssbs_bit(regs);
>> +	}
>> +}
>> +
> 
> Minor: Can also modify copy_thread() to use set_ssbs_bit(childregs)

Sure, this seems like a valuable cleanup. v2 coming up shortly.

Thanks,

	M.
-- 
Jazz is not dead. It just smells funny...

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2019-07-22 13:42 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-07-19 17:16 [PATCH] arm64: Force SSBS on context switch Marc Zyngier
2019-07-19 18:16 ` Neeraj Upadhyay
2019-07-22 13:42   ` Marc Zyngier
2019-07-22  5:53 ` Anshuman Khandual
2019-07-22  7:05   ` Marc Zyngier

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).