All of lore.kernel.org
 help / color / mirror / Atom feed
* [U-Boot] [PATCH] ARM: non-sec: Add spin table reserved memory support
@ 2014-08-07  1:54 Xiubo Li
  2014-08-07 14:57 ` Marc Zyngier
  0 siblings, 1 reply; 3+ messages in thread
From: Xiubo Li @ 2014-08-07  1:54 UTC (permalink / raw)
  To: u-boot

The memory where loaded the smp_waitloop code section probablly
be corrupted by Linux Kernel, then the secondary cores will be
running the random code, leading booting the secondary cores
failed.

Signed-off-by: Xiubo Li <Li.Xiubo@freescale.com>
---
 arch/arm/cpu/armv7/nonsec_virt.S |  6 ++++++
 arch/arm/include/asm/nonsecure.h | 14 ++++++++++++++
 arch/arm/lib/Makefile            |  4 ++++
 arch/arm/lib/nonsecure.c         | 34 ++++++++++++++++++++++++++++++++++
 4 files changed, 58 insertions(+)
 create mode 100644 arch/arm/include/asm/nonsecure.h
 create mode 100644 arch/arm/lib/nonsecure.c

diff --git a/arch/arm/cpu/armv7/nonsec_virt.S b/arch/arm/cpu/armv7/nonsec_virt.S
index c334a15..e3a62af 100644
--- a/arch/arm/cpu/armv7/nonsec_virt.S
+++ b/arch/arm/cpu/armv7/nonsec_virt.S
@@ -175,6 +175,9 @@ ENTRY(_nonsec_init)
 	bx	lr
 ENDPROC(_nonsec_init)
 
+.globl smp_waitloop_start
+smp_waitloop_start:
+	.word .
 #ifdef CONFIG_SMP_PEN_ADDR
 /* void __weak smp_waitloop(unsigned previous_address); */
 ENTRY(smp_waitloop)
@@ -190,6 +193,9 @@ ENTRY(smp_waitloop)
 ENDPROC(smp_waitloop)
 .weak smp_waitloop
 #endif
+.globl smp_waitloop_end
+smp_waitloop_end:
+	.word .
 
 ENTRY(_switch_to_hyp)
 	mov	r0, lr
diff --git a/arch/arm/include/asm/nonsecure.h b/arch/arm/include/asm/nonsecure.h
new file mode 100644
index 0000000..31a4071
--- /dev/null
+++ b/arch/arm/include/asm/nonsecure.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2014 Freescale Semiconductor, Inc.
+ *
+ * SPDX-License-Identifier:	GPL-2.0+
+ */
+
+#ifndef _ARM_NONSECURE_H_
+#define _ARM_NONSECURE_H_
+
+extern ulong smp_waitloop_start;         /* start of image for smp spin table */
+extern ulong smp_waitloop_end;         /* end of image for smp spin table */
+int fdt_add_smp_waitloop_mem_rsv(void *blob);
+
+#endif /* _ARM_NONSECURE_H_ */
diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
index 321997c..fe707e9 100644
--- a/arch/arm/lib/Makefile
+++ b/arch/arm/lib/Makefile
@@ -55,6 +55,10 @@ ifndef CONFIG_ARM64
 obj-y	+= cache-cp15.o
 endif
 
+ifneq ($(CONFIG_ARMV7_NONSEC)$(CONFIG_ARMV7_VIRT),)
+obj-y   += nonsecure.o
+endif
+
 # For EABI conformant tool chains, provide eabi_compat()
 ifneq (,$(findstring -mabi=aapcs-linux,$(PLATFORM_CPPFLAGS)))
 extra-y	+= eabi_compat.o
diff --git a/arch/arm/lib/nonsecure.c b/arch/arm/lib/nonsecure.c
new file mode 100644
index 0000000..2a83669
--- /dev/null
+++ b/arch/arm/lib/nonsecure.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2014 Freescale Semiconductor, Inc.
+ *
+ * SPDX-License-Identifier:	GPL-2.0+
+ */
+#include <common.h>
+#include <asm/nonsecure.h>
+#include <libfdt.h>
+
+#define PG_4K_ALING ~((1 << 12) - 1)
+#define PG_4K (1 << 12)
+
+int fdt_add_smp_waitloop_mem_rsv(void *blob)
+{
+	unsigned long rsv_start = smp_waitloop_start & PG_4K_ALING;
+	unsigned long rsv_end = smp_waitloop_end & PG_4K_ALING;
+	unsigned long rsv_size;
+	int off;
+
+	if (smp_waitloop_end == smp_waitloop_start + 0x4)
+		return 0;
+
+	if (rsv_start != rsv_end)
+		rsv_size = 2 * PG_4K;
+	else
+		rsv_size = PG_4K;
+
+	off = fdt_add_mem_rsv(blob, rsv_start, (u64)rsv_size);
+	if (off < 0)
+		printf("Failed to reserve memory for smp waitloop: %s\n",
+				fdt_strerror(off));
+
+	return off;
+}
-- 
1.8.5

^ permalink raw reply related	[flat|nested] 3+ messages in thread

* [U-Boot] [PATCH] ARM: non-sec: Add spin table reserved memory support
  2014-08-07  1:54 [U-Boot] [PATCH] ARM: non-sec: Add spin table reserved memory support Xiubo Li
@ 2014-08-07 14:57 ` Marc Zyngier
  2014-08-08  1:47   ` Li.Xiubo at freescale.com
  0 siblings, 1 reply; 3+ messages in thread
From: Marc Zyngier @ 2014-08-07 14:57 UTC (permalink / raw)
  To: u-boot

On 07/08/14 02:54, Xiubo Li wrote:
> The memory where loaded the smp_waitloop code section probablly
> be corrupted by Linux Kernel, then the secondary cores will be
> running the random code, leading booting the secondary cores
> failed.

There is now similar reservation code in virt-dt.c. Probably some form
of consolidation is in order.

Thanks,

	M.

> Signed-off-by: Xiubo Li <Li.Xiubo@freescale.com>
> ---
>  arch/arm/cpu/armv7/nonsec_virt.S |  6 ++++++
>  arch/arm/include/asm/nonsecure.h | 14 ++++++++++++++
>  arch/arm/lib/Makefile            |  4 ++++
>  arch/arm/lib/nonsecure.c         | 34 ++++++++++++++++++++++++++++++++++
>  4 files changed, 58 insertions(+)
>  create mode 100644 arch/arm/include/asm/nonsecure.h
>  create mode 100644 arch/arm/lib/nonsecure.c
> 
> diff --git a/arch/arm/cpu/armv7/nonsec_virt.S b/arch/arm/cpu/armv7/nonsec_virt.S
> index c334a15..e3a62af 100644
> --- a/arch/arm/cpu/armv7/nonsec_virt.S
> +++ b/arch/arm/cpu/armv7/nonsec_virt.S
> @@ -175,6 +175,9 @@ ENTRY(_nonsec_init)
>  	bx	lr
>  ENDPROC(_nonsec_init)
>  
> +.globl smp_waitloop_start
> +smp_waitloop_start:
> +	.word .
>  #ifdef CONFIG_SMP_PEN_ADDR
>  /* void __weak smp_waitloop(unsigned previous_address); */
>  ENTRY(smp_waitloop)
> @@ -190,6 +193,9 @@ ENTRY(smp_waitloop)
>  ENDPROC(smp_waitloop)
>  .weak smp_waitloop
>  #endif
> +.globl smp_waitloop_end
> +smp_waitloop_end:
> +	.word .
>  
>  ENTRY(_switch_to_hyp)
>  	mov	r0, lr
> diff --git a/arch/arm/include/asm/nonsecure.h b/arch/arm/include/asm/nonsecure.h
> new file mode 100644
> index 0000000..31a4071
> --- /dev/null
> +++ b/arch/arm/include/asm/nonsecure.h
> @@ -0,0 +1,14 @@
> +/*
> + * Copyright (c) 2014 Freescale Semiconductor, Inc.
> + *
> + * SPDX-License-Identifier:	GPL-2.0+
> + */
> +
> +#ifndef _ARM_NONSECURE_H_
> +#define _ARM_NONSECURE_H_
> +
> +extern ulong smp_waitloop_start;         /* start of image for smp spin table */
> +extern ulong smp_waitloop_end;         /* end of image for smp spin table */
> +int fdt_add_smp_waitloop_mem_rsv(void *blob);
> +
> +#endif /* _ARM_NONSECURE_H_ */
> diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
> index 321997c..fe707e9 100644
> --- a/arch/arm/lib/Makefile
> +++ b/arch/arm/lib/Makefile
> @@ -55,6 +55,10 @@ ifndef CONFIG_ARM64
>  obj-y	+= cache-cp15.o
>  endif
>  
> +ifneq ($(CONFIG_ARMV7_NONSEC)$(CONFIG_ARMV7_VIRT),)
> +obj-y   += nonsecure.o
> +endif
> +
>  # For EABI conformant tool chains, provide eabi_compat()
>  ifneq (,$(findstring -mabi=aapcs-linux,$(PLATFORM_CPPFLAGS)))
>  extra-y	+= eabi_compat.o
> diff --git a/arch/arm/lib/nonsecure.c b/arch/arm/lib/nonsecure.c
> new file mode 100644
> index 0000000..2a83669
> --- /dev/null
> +++ b/arch/arm/lib/nonsecure.c
> @@ -0,0 +1,34 @@
> +/*
> + * Copyright (c) 2014 Freescale Semiconductor, Inc.
> + *
> + * SPDX-License-Identifier:	GPL-2.0+
> + */
> +#include <common.h>
> +#include <asm/nonsecure.h>
> +#include <libfdt.h>
> +
> +#define PG_4K_ALING ~((1 << 12) - 1)
> +#define PG_4K (1 << 12)
> +
> +int fdt_add_smp_waitloop_mem_rsv(void *blob)
> +{
> +	unsigned long rsv_start = smp_waitloop_start & PG_4K_ALING;
> +	unsigned long rsv_end = smp_waitloop_end & PG_4K_ALING;
> +	unsigned long rsv_size;
> +	int off;
> +
> +	if (smp_waitloop_end == smp_waitloop_start + 0x4)
> +		return 0;
> +
> +	if (rsv_start != rsv_end)
> +		rsv_size = 2 * PG_4K;
> +	else
> +		rsv_size = PG_4K;
> +
> +	off = fdt_add_mem_rsv(blob, rsv_start, (u64)rsv_size);
> +	if (off < 0)
> +		printf("Failed to reserve memory for smp waitloop: %s\n",
> +				fdt_strerror(off));
> +
> +	return off;
> +}
> 


-- 
Jazz is not dead. It just smells funny...

^ permalink raw reply	[flat|nested] 3+ messages in thread

* [U-Boot] [PATCH] ARM: non-sec: Add spin table reserved memory support
  2014-08-07 14:57 ` Marc Zyngier
@ 2014-08-08  1:47   ` Li.Xiubo at freescale.com
  0 siblings, 0 replies; 3+ messages in thread
From: Li.Xiubo at freescale.com @ 2014-08-08  1:47 UTC (permalink / raw)
  To: u-boot

> Subject: Re: [PATCH] ARM: non-sec: Add spin table reserved memory support
> 
> On 07/08/14 02:54, Xiubo Li wrote:
> > The memory where loaded the smp_waitloop code section probablly
> > be corrupted by Linux Kernel, then the secondary cores will be
> > running the random code, leading booting the secondary cores
> > failed.
> 
> There is now similar reservation code in virt-dt.c. Probably some form
> of consolidation is in order.
> 
> Thanks,
> 
> 	M.
> 	

That's good, I will check that.

Thanks,

BRs
Xiubo





> > Signed-off-by: Xiubo Li <Li.Xiubo@freescale.com>
> > ---
> >  arch/arm/cpu/armv7/nonsec_virt.S |  6 ++++++
> >  arch/arm/include/asm/nonsecure.h | 14 ++++++++++++++
> >  arch/arm/lib/Makefile            |  4 ++++
> >  arch/arm/lib/nonsecure.c         | 34 ++++++++++++++++++++++++++++++++++
> >  4 files changed, 58 insertions(+)
> >  create mode 100644 arch/arm/include/asm/nonsecure.h
> >  create mode 100644 arch/arm/lib/nonsecure.c
> >
> > diff --git a/arch/arm/cpu/armv7/nonsec_virt.S
> b/arch/arm/cpu/armv7/nonsec_virt.S
> > index c334a15..e3a62af 100644
> > --- a/arch/arm/cpu/armv7/nonsec_virt.S
> > +++ b/arch/arm/cpu/armv7/nonsec_virt.S
> > @@ -175,6 +175,9 @@ ENTRY(_nonsec_init)
> >  	bx	lr
> >  ENDPROC(_nonsec_init)
> >
> > +.globl smp_waitloop_start
> > +smp_waitloop_start:
> > +	.word .
> >  #ifdef CONFIG_SMP_PEN_ADDR
> >  /* void __weak smp_waitloop(unsigned previous_address); */
> >  ENTRY(smp_waitloop)
> > @@ -190,6 +193,9 @@ ENTRY(smp_waitloop)
> >  ENDPROC(smp_waitloop)
> >  .weak smp_waitloop
> >  #endif
> > +.globl smp_waitloop_end
> > +smp_waitloop_end:
> > +	.word .
> >
> >  ENTRY(_switch_to_hyp)
> >  	mov	r0, lr
> > diff --git a/arch/arm/include/asm/nonsecure.h
> b/arch/arm/include/asm/nonsecure.h
> > new file mode 100644
> > index 0000000..31a4071
> > --- /dev/null
> > +++ b/arch/arm/include/asm/nonsecure.h
> > @@ -0,0 +1,14 @@
> > +/*
> > + * Copyright (c) 2014 Freescale Semiconductor, Inc.
> > + *
> > + * SPDX-License-Identifier:	GPL-2.0+
> > + */
> > +
> > +#ifndef _ARM_NONSECURE_H_
> > +#define _ARM_NONSECURE_H_
> > +
> > +extern ulong smp_waitloop_start;         /* start of image for smp spin
> table */
> > +extern ulong smp_waitloop_end;         /* end of image for smp spin table
> */
> > +int fdt_add_smp_waitloop_mem_rsv(void *blob);
> > +
> > +#endif /* _ARM_NONSECURE_H_ */
> > diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
> > index 321997c..fe707e9 100644
> > --- a/arch/arm/lib/Makefile
> > +++ b/arch/arm/lib/Makefile
> > @@ -55,6 +55,10 @@ ifndef CONFIG_ARM64
> >  obj-y	+= cache-cp15.o
> >  endif
> >
> > +ifneq ($(CONFIG_ARMV7_NONSEC)$(CONFIG_ARMV7_VIRT),)
> > +obj-y   += nonsecure.o
> > +endif
> > +
> >  # For EABI conformant tool chains, provide eabi_compat()
> >  ifneq (,$(findstring -mabi=aapcs-linux,$(PLATFORM_CPPFLAGS)))
> >  extra-y	+= eabi_compat.o
> > diff --git a/arch/arm/lib/nonsecure.c b/arch/arm/lib/nonsecure.c
> > new file mode 100644
> > index 0000000..2a83669
> > --- /dev/null
> > +++ b/arch/arm/lib/nonsecure.c
> > @@ -0,0 +1,34 @@
> > +/*
> > + * Copyright (c) 2014 Freescale Semiconductor, Inc.
> > + *
> > + * SPDX-License-Identifier:	GPL-2.0+
> > + */
> > +#include <common.h>
> > +#include <asm/nonsecure.h>
> > +#include <libfdt.h>
> > +
> > +#define PG_4K_ALING ~((1 << 12) - 1)
> > +#define PG_4K (1 << 12)
> > +
> > +int fdt_add_smp_waitloop_mem_rsv(void *blob)
> > +{
> > +	unsigned long rsv_start = smp_waitloop_start & PG_4K_ALING;
> > +	unsigned long rsv_end = smp_waitloop_end & PG_4K_ALING;
> > +	unsigned long rsv_size;
> > +	int off;
> > +
> > +	if (smp_waitloop_end == smp_waitloop_start + 0x4)
> > +		return 0;
> > +
> > +	if (rsv_start != rsv_end)
> > +		rsv_size = 2 * PG_4K;
> > +	else
> > +		rsv_size = PG_4K;
> > +
> > +	off = fdt_add_mem_rsv(blob, rsv_start, (u64)rsv_size);
> > +	if (off < 0)
> > +		printf("Failed to reserve memory for smp waitloop: %s\n",
> > +				fdt_strerror(off));
> > +
> > +	return off;
> > +}
> >
> 
> 
> --
> Jazz is not dead. It just smells funny...

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2014-08-08  1:47 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-08-07  1:54 [U-Boot] [PATCH] ARM: non-sec: Add spin table reserved memory support Xiubo Li
2014-08-07 14:57 ` Marc Zyngier
2014-08-08  1:47   ` Li.Xiubo at freescale.com

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.