All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] arm64: context: Free up kernel ASIDs if KPTI is not in use
@ 2020-01-07 10:28 Vladimir Murzin
  2020-01-09 12:08 ` Catalin Marinas
  0 siblings, 1 reply; 2+ messages in thread
From: Vladimir Murzin @ 2020-01-07 10:28 UTC (permalink / raw)
  To: linux-arm-kernel

We can extend user ASID space if it turns out that system does not
require KPTI. We start with kernel ASIDs reserved because CPU caps are
not finalized yet and free them up lazily on the next rollover if we
confirm than KPTI is not in use.

Signed-off-by: Vladimir Murzin <vladimir.murzin@arm.com>
---
 arch/arm64/mm/context.c | 38 ++++++++++++++++++++++++++++++--------
 1 file changed, 30 insertions(+), 8 deletions(-)

diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index b5e329f..8ef73e8 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -29,15 +29,9 @@ static cpumask_t tlb_flush_pending;
 #define ASID_MASK		(~GENMASK(asid_bits - 1, 0))
 #define ASID_FIRST_VERSION	(1UL << asid_bits)
 
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-#define NUM_USER_ASIDS		(ASID_FIRST_VERSION >> 1)
-#define asid2idx(asid)		(((asid) & ~ASID_MASK) >> 1)
-#define idx2asid(idx)		(((idx) << 1) & ~ASID_MASK)
-#else
-#define NUM_USER_ASIDS		(ASID_FIRST_VERSION)
+#define NUM_USER_ASIDS		ASID_FIRST_VERSION
 #define asid2idx(asid)		((asid) & ~ASID_MASK)
 #define idx2asid(idx)		asid2idx(idx)
-#endif
 
 /* Get the ASIDBits supported by the current CPU */
 static u32 get_cpu_asid_bits(void)
@@ -77,13 +71,33 @@ void verify_cpu_asid_bits(void)
 	}
 }
 
+static void set_kpti_asid_bits(void)
+{
+	unsigned int len = BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(unsigned long);
+	/*
+	 * In case of KPTI kernel/user ASIDs are allocated in
+	 * pairs, the bottom bit distinguishes the two: if it
+	 * is set, then the ASID will map only userspace. Thus
+	 * mark even as reserved for kernel.
+	 */
+	memset(asid_map, 0xaa, len);
+}
+
+static void set_reserved_asid_bits(void)
+{
+	if (arm64_kernel_unmapped_at_el0())
+		set_kpti_asid_bits();
+	else
+		bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
+}
+
 static void flush_context(void)
 {
 	int i;
 	u64 asid;
 
 	/* Update the list of reserved ASIDs and the ASID bitmap. */
-	bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
+	set_reserved_asid_bits();
 
 	for_each_possible_cpu(i) {
 		asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
@@ -261,6 +275,14 @@ static int asids_init(void)
 		panic("Failed to allocate bitmap for %lu ASIDs\n",
 		      NUM_USER_ASIDS);
 
+	/*
+	 * We cannot call set_reserved_asid_bits() here because CPU
+	 * caps are not finalized yet, so it is safer to assume KPTI
+	 * and reserve kernel ASID's from beginning.
+	 */
+	if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
+		set_kpti_asid_bits();
+
 	pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
 	return 0;
 }
-- 
2.7.4


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH] arm64: context: Free up kernel ASIDs if KPTI is not in use
  2020-01-07 10:28 [PATCH] arm64: context: Free up kernel ASIDs if KPTI is not in use Vladimir Murzin
@ 2020-01-09 12:08 ` Catalin Marinas
  0 siblings, 0 replies; 2+ messages in thread
From: Catalin Marinas @ 2020-01-09 12:08 UTC (permalink / raw)
  To: Vladimir Murzin; +Cc: linux-arm-kernel

On Tue, Jan 07, 2020 at 10:28:03AM +0000, Vladimir Murzin wrote:
> We can extend user ASID space if it turns out that system does not
> require KPTI. We start with kernel ASIDs reserved because CPU caps are
> not finalized yet and free them up lazily on the next rollover if we
> confirm than KPTI is not in use.
> 
> Signed-off-by: Vladimir Murzin <vladimir.murzin@arm.com>
> ---
>  arch/arm64/mm/context.c | 38 ++++++++++++++++++++++++++++++--------
>  1 file changed, 30 insertions(+), 8 deletions(-)
> 
> diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
> index b5e329f..8ef73e8 100644
> --- a/arch/arm64/mm/context.c
> +++ b/arch/arm64/mm/context.c
> @@ -29,15 +29,9 @@ static cpumask_t tlb_flush_pending;
>  #define ASID_MASK		(~GENMASK(asid_bits - 1, 0))
>  #define ASID_FIRST_VERSION	(1UL << asid_bits)
>  
> -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
> -#define NUM_USER_ASIDS		(ASID_FIRST_VERSION >> 1)
> -#define asid2idx(asid)		(((asid) & ~ASID_MASK) >> 1)
> -#define idx2asid(idx)		(((idx) << 1) & ~ASID_MASK)
> -#else
> -#define NUM_USER_ASIDS		(ASID_FIRST_VERSION)
> +#define NUM_USER_ASIDS		ASID_FIRST_VERSION
>  #define asid2idx(asid)		((asid) & ~ASID_MASK)
>  #define idx2asid(idx)		asid2idx(idx)
> -#endif
>  
>  /* Get the ASIDBits supported by the current CPU */
>  static u32 get_cpu_asid_bits(void)
> @@ -77,13 +71,33 @@ void verify_cpu_asid_bits(void)
>  	}
>  }
>  
> +static void set_kpti_asid_bits(void)
> +{
> +	unsigned int len = BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(unsigned long);
> +	/*
> +	 * In case of KPTI kernel/user ASIDs are allocated in
> +	 * pairs, the bottom bit distinguishes the two: if it
> +	 * is set, then the ASID will map only userspace. Thus
> +	 * mark even as reserved for kernel.
> +	 */
> +	memset(asid_map, 0xaa, len);
> +}
> +
> +static void set_reserved_asid_bits(void)
> +{
> +	if (arm64_kernel_unmapped_at_el0())
> +		set_kpti_asid_bits();
> +	else
> +		bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
> +}
> +
>  static void flush_context(void)
>  {
>  	int i;
>  	u64 asid;
>  
>  	/* Update the list of reserved ASIDs and the ASID bitmap. */
> -	bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
> +	set_reserved_asid_bits();
>  
>  	for_each_possible_cpu(i) {
>  		asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
> @@ -261,6 +275,14 @@ static int asids_init(void)
>  		panic("Failed to allocate bitmap for %lu ASIDs\n",
>  		      NUM_USER_ASIDS);
>  
> +	/*
> +	 * We cannot call set_reserved_asid_bits() here because CPU
> +	 * caps are not finalized yet, so it is safer to assume KPTI
> +	 * and reserve kernel ASID's from beginning.
> +	 */
> +	if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
> +		set_kpti_asid_bits();
> +
>  	pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
>  	return 0;
>  }

Even if we don't need KPTI, we still reserve half of ASIDs until the
first roll-over but that's fine. I was hoping we can get rid of
IS_ENABLED() and call set_reserved_asid_bits() directly in asids_init()
once the patch below is merged:

https://lore.kernel.org/linux-arm-kernel/20191209181217.44890-5-broonie@kernel.org/

but we can still turn KPTI on during boot even if the primary CPU didn't
have it. Also deferring asids_init() may not interact well with EFI
run-time services which need an ASID.

Anyway, your patch looks fine to me:

Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2020-01-09 12:08 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-01-07 10:28 [PATCH] arm64: context: Free up kernel ASIDs if KPTI is not in use Vladimir Murzin
2020-01-09 12:08 ` Catalin Marinas

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.