All of lore.kernel.org
 help / color / mirror / Atom feed
From: Will Deacon <will@kernel.org>
To: Mark Salter <msalter@redhat.com>
Cc: linux-arm-kernel@lists.infradead.org,
	linux-kernel@vger.kernel.org,
	Catalin Marinas <catalin.marinas@arm.com>,
	Mark Rutland <mark.rutland@arm.com>
Subject: Re: [PATCH] arm64: mm: fix runtime fallback to 48-bt VA when 52-bit VA is enabled
Date: Wed, 10 Mar 2021 11:16:49 +0000	[thread overview]
Message-ID: <20210310111649.GA29413@willie-the-truck> (raw)
In-Reply-To: <20210310003216.410037-1-msalter@redhat.com>

On Tue, Mar 09, 2021 at 07:32:16PM -0500, Mark Salter wrote:
> I ran into an early boot soft lockup on a Qualcomm Amberwing using a v5.11
> kernel configured for 52-bit VA. This turned into a panic with a v5.12-rc2
> kernel.
> 
> The problem is that when we fall back to 48-bit VA, idmap_t0sz is not
> updated. Later, the kvm hypervisor uses idmap_t0sz to set its tcr_el2 and
> hangs (v5.11). After commit 1401bef703a4 ("arm64: mm: Always update TCR_EL1
> from __cpu_set_tcr_t0sz()"), the kernel panics when trying to use the idmap
> to call idmap_cpu_replace_ttbr1().
> 
> Oddly, other systems (thunderX2 and Ampere eMag) which don't support 52-bit
> VA seem to handle the setting of an unsupported t0sz without any apparent
> problems. Indeed, if one reads back the tcr written with t0sz==12, the
> value read has t0sz==16. Not so with Amberwing.

Nice, you have one of those elusive platforms!

> Fixes: 90ec95cda91a ("arm64: mm: Introduce VA_BITS_MIN")
> Signed-off-by: Mark Salter <msalter@redhat.com>
> ---
>  arch/arm64/kernel/head.S | 10 +++++++++-
>  1 file changed, 9 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
> index 66b0e0b66e31..2bcbbb26292e 100644
> --- a/arch/arm64/kernel/head.S
> +++ b/arch/arm64/kernel/head.S
> @@ -291,6 +291,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
>  	 */
>  	adrp	x0, idmap_pg_dir
>  	adrp	x3, __idmap_text_start		// __pa(__idmap_text_start)
> +	mov	x4, TCR_T0SZ(VA_BITS)
>  
>  #ifdef CONFIG_ARM64_VA_BITS_52
>  	mrs_s	x6, SYS_ID_AA64MMFR2_EL1
> @@ -299,6 +300,13 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
>  	cbnz	x6, 1f
>  #endif
>  	mov	x5, #VA_BITS_MIN
> +#ifdef CONFIG_ARM64_VA_BITS_52
> +	mov	x4, TCR_T0SZ(VA_BITS_MIN)
> +	adr_l	x6, idmap_t0sz
> +	str	x4, [x6]
> +	dmb	sy
> +	dc	ivac, x6		// Invalidate potentially stale cache line
> +#endif
>  1:
>  	adr_l	x6, vabits_actual
>  	str	x5, [x6]
> @@ -319,7 +327,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
>  	 */
>  	adrp	x5, __idmap_text_end
>  	clz	x5, x5
> -	cmp	x5, TCR_T0SZ(VA_BITS)	// default T0SZ small enough?
> +	cmp	x5, x4			// default T0SZ small enough?
>  	b.ge	1f			// .. then skip VA range extension

Could we instead have the default value be 48-bit, and then avoid having
to update the variable in both cases? e.g. something along the lines of
the entirely untested diff below?

Cheers,

Will

--->8

diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 66b0e0b66e31..fb795123896f 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -319,7 +319,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
         */
        adrp    x5, __idmap_text_end
        clz     x5, x5
-       cmp     x5, TCR_T0SZ(VA_BITS)   // default T0SZ small enough?
+       cmp     x5, TCR_T0SZ(VA_BITS_MIN)       // default T0SZ small enough?
        b.ge    1f                      // .. then skip VA range extension
 
        adr_l   x6, idmap_t0sz
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 3802cfbdd20d..4c5603c41870 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -40,7 +40,7 @@
 #define NO_BLOCK_MAPPINGS      BIT(0)
 #define NO_CONT_MAPPINGS       BIT(1)
 
-u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
+u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN);
 u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
 
 u64 __section(".mmuoff.data.write") vabits_actual;


WARNING: multiple messages have this Message-ID (diff)
From: Will Deacon <will@kernel.org>
To: Mark Salter <msalter@redhat.com>
Cc: linux-arm-kernel@lists.infradead.org,
	linux-kernel@vger.kernel.org,
	Catalin Marinas <catalin.marinas@arm.com>,
	Mark Rutland <mark.rutland@arm.com>
Subject: Re: [PATCH] arm64: mm: fix runtime fallback to 48-bt VA when 52-bit VA is enabled
Date: Wed, 10 Mar 2021 11:16:49 +0000	[thread overview]
Message-ID: <20210310111649.GA29413@willie-the-truck> (raw)
In-Reply-To: <20210310003216.410037-1-msalter@redhat.com>

On Tue, Mar 09, 2021 at 07:32:16PM -0500, Mark Salter wrote:
> I ran into an early boot soft lockup on a Qualcomm Amberwing using a v5.11
> kernel configured for 52-bit VA. This turned into a panic with a v5.12-rc2
> kernel.
> 
> The problem is that when we fall back to 48-bit VA, idmap_t0sz is not
> updated. Later, the kvm hypervisor uses idmap_t0sz to set its tcr_el2 and
> hangs (v5.11). After commit 1401bef703a4 ("arm64: mm: Always update TCR_EL1
> from __cpu_set_tcr_t0sz()"), the kernel panics when trying to use the idmap
> to call idmap_cpu_replace_ttbr1().
> 
> Oddly, other systems (thunderX2 and Ampere eMag) which don't support 52-bit
> VA seem to handle the setting of an unsupported t0sz without any apparent
> problems. Indeed, if one reads back the tcr written with t0sz==12, the
> value read has t0sz==16. Not so with Amberwing.

Nice, you have one of those elusive platforms!

> Fixes: 90ec95cda91a ("arm64: mm: Introduce VA_BITS_MIN")
> Signed-off-by: Mark Salter <msalter@redhat.com>
> ---
>  arch/arm64/kernel/head.S | 10 +++++++++-
>  1 file changed, 9 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
> index 66b0e0b66e31..2bcbbb26292e 100644
> --- a/arch/arm64/kernel/head.S
> +++ b/arch/arm64/kernel/head.S
> @@ -291,6 +291,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
>  	 */
>  	adrp	x0, idmap_pg_dir
>  	adrp	x3, __idmap_text_start		// __pa(__idmap_text_start)
> +	mov	x4, TCR_T0SZ(VA_BITS)
>  
>  #ifdef CONFIG_ARM64_VA_BITS_52
>  	mrs_s	x6, SYS_ID_AA64MMFR2_EL1
> @@ -299,6 +300,13 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
>  	cbnz	x6, 1f
>  #endif
>  	mov	x5, #VA_BITS_MIN
> +#ifdef CONFIG_ARM64_VA_BITS_52
> +	mov	x4, TCR_T0SZ(VA_BITS_MIN)
> +	adr_l	x6, idmap_t0sz
> +	str	x4, [x6]
> +	dmb	sy
> +	dc	ivac, x6		// Invalidate potentially stale cache line
> +#endif
>  1:
>  	adr_l	x6, vabits_actual
>  	str	x5, [x6]
> @@ -319,7 +327,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
>  	 */
>  	adrp	x5, __idmap_text_end
>  	clz	x5, x5
> -	cmp	x5, TCR_T0SZ(VA_BITS)	// default T0SZ small enough?
> +	cmp	x5, x4			// default T0SZ small enough?
>  	b.ge	1f			// .. then skip VA range extension

Could we instead have the default value be 48-bit, and then avoid having
to update the variable in both cases? e.g. something along the lines of
the entirely untested diff below?

Cheers,

Will

--->8

diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 66b0e0b66e31..fb795123896f 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -319,7 +319,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
         */
        adrp    x5, __idmap_text_end
        clz     x5, x5
-       cmp     x5, TCR_T0SZ(VA_BITS)   // default T0SZ small enough?
+       cmp     x5, TCR_T0SZ(VA_BITS_MIN)       // default T0SZ small enough?
        b.ge    1f                      // .. then skip VA range extension
 
        adr_l   x6, idmap_t0sz
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 3802cfbdd20d..4c5603c41870 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -40,7 +40,7 @@
 #define NO_BLOCK_MAPPINGS      BIT(0)
 #define NO_CONT_MAPPINGS       BIT(1)
 
-u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
+u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN);
 u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
 
 u64 __section(".mmuoff.data.write") vabits_actual;


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  reply	other threads:[~2021-03-10 11:17 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-03-10  0:32 [PATCH] arm64: mm: fix runtime fallback to 48-bt VA when 52-bit VA is enabled Mark Salter
2021-03-10  0:32 ` Mark Salter
2021-03-10 11:16 ` Will Deacon [this message]
2021-03-10 11:16   ` Will Deacon
2021-03-10 12:41   ` Ard Biesheuvel
2021-03-10 12:41     ` Ard Biesheuvel
2021-03-10 13:29     ` Ard Biesheuvel
2021-03-10 13:29       ` Ard Biesheuvel
2021-03-10 14:08       ` Mark Salter
2021-03-10 14:08         ` Mark Salter

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210310111649.GA29413@willie-the-truck \
    --to=will@kernel.org \
    --cc=catalin.marinas@arm.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mark.rutland@arm.com \
    --cc=msalter@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.