All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v1] arm64:mm: enable the kernel execute attribute for HEAD_TEXT segment
@ 2015-03-26 13:53 ` Zhichang Yuan
  0 siblings, 0 replies; 8+ messages in thread
From: Zhichang Yuan @ 2015-03-26 13:53 UTC (permalink / raw)
  To: Catalin.Marinas, will.deacon
  Cc: linux-arm-kernel, linaro-kernel, linux-kernel, liguozhu,
	wangzhou1, yuanzhichang

From: yuanzhichang <yuanzhichang@hisilicon.com>

In the patch whose title is "add better page protections to arm64"
(commit da141706aea52c1a9fbd28cb8d289b78819f5436), The direct mapping
page table entries for HEAD_TEXT segment were configured as PAGE_KERNEL,
without the executable attribute. But when the secondary CPUs are booting
based on spin-table mechanism, some functions in head.S are needed to run.
Only PAGE_KERNEL dosen't work for this case.
This patch will configure the page attributes as PAGE_KERNEL_EXEC for
HEAD_TEXT segment.

Signed-off-by: Zhichang Yuan <yuanzhichang@hisilicon.com>
---
 arch/arm64/mm/mmu.c | 24 ++++++++++++++++++------
 1 file changed, 18 insertions(+), 6 deletions(-)

diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index c6daaf6..ad08dfd 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -305,8 +305,8 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
 	 * for now. This will get more fine grained later once all memory
 	 * is mapped
 	 */
-	unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
-	unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
+	phys_addr_t kernel_x_start = round_down(__pa(_text), SECTION_SIZE);
+	phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
 
 	if (end < kernel_x_start) {
 		create_mapping(start, __phys_to_virt(start),
@@ -315,6 +315,18 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
 		create_mapping(start, __phys_to_virt(start),
 			end - start, PAGE_KERNEL);
 	} else {
+	/*
+	 * At this moment, the text segment must reside in valid physical
+	 * memory section range to make sure the text are totally mapped. 
+	 * If mapping from non-section aligned address is support, then
+	 * _text can be used here directly in replace to kernel_x_start.
+	 */
+		phys_addr_t max_left, min_right;
+
+		max_left = max(kernel_x_start, start);
+		min_right = min(kernel_x_end, end);
+		BUG_ON(max_left != kernel_x_start || min_right != kernel_x_end);
+
 		if (start < kernel_x_start)
 			create_mapping(start, __phys_to_virt(start),
 				kernel_x_start - start,
@@ -394,12 +406,12 @@ void __init fixup_executable(void)
 {
 #ifdef CONFIG_DEBUG_RODATA
 	/* now that we are actually fully mapped, make the start/end more fine grained */
-	if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) {
-		unsigned long aligned_start = round_down(__pa(_stext),
+	if (!IS_ALIGNED((unsigned long)_text, SECTION_SIZE)) {
+		unsigned long aligned_start = round_down(__pa(_text),
 							SECTION_SIZE);
 
 		create_mapping(aligned_start, __phys_to_virt(aligned_start),
-				__pa(_stext) - aligned_start,
+				__pa(_text) - aligned_start,
 				PAGE_KERNEL);
 	}
 
@@ -418,7 +430,7 @@ void mark_rodata_ro(void)
 {
 	create_mapping_late(__pa(_stext), (unsigned long)_stext,
 				(unsigned long)_etext - (unsigned long)_stext,
-				PAGE_KERNEL_EXEC | PTE_RDONLY);
+				(PAGE_KERNEL_EXEC | PTE_RDONLY) & ~PTE_WRITE);
 
 }
 #endif
-- 
1.9.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v1] arm64:mm: enable the kernel execute attribute for HEAD_TEXT segment
@ 2015-03-26 13:53 ` Zhichang Yuan
  0 siblings, 0 replies; 8+ messages in thread
From: Zhichang Yuan @ 2015-03-26 13:53 UTC (permalink / raw)
  To: linux-arm-kernel

From: yuanzhichang <yuanzhichang@hisilicon.com>

In the patch whose title is "add better page protections to arm64"
(commit da141706aea52c1a9fbd28cb8d289b78819f5436), The direct mapping
page table entries for HEAD_TEXT segment were configured as PAGE_KERNEL,
without the executable attribute. But when the secondary CPUs are booting
based on spin-table mechanism, some functions in head.S are needed to run.
Only PAGE_KERNEL dosen't work for this case.
This patch will configure the page attributes as PAGE_KERNEL_EXEC for
HEAD_TEXT segment.

Signed-off-by: Zhichang Yuan <yuanzhichang@hisilicon.com>
---
 arch/arm64/mm/mmu.c | 24 ++++++++++++++++++------
 1 file changed, 18 insertions(+), 6 deletions(-)

diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index c6daaf6..ad08dfd 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -305,8 +305,8 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
 	 * for now. This will get more fine grained later once all memory
 	 * is mapped
 	 */
-	unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
-	unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
+	phys_addr_t kernel_x_start = round_down(__pa(_text), SECTION_SIZE);
+	phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
 
 	if (end < kernel_x_start) {
 		create_mapping(start, __phys_to_virt(start),
@@ -315,6 +315,18 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
 		create_mapping(start, __phys_to_virt(start),
 			end - start, PAGE_KERNEL);
 	} else {
+	/*
+	 * At this moment, the text segment must reside in valid physical
+	 * memory section range to make sure the text are totally mapped. 
+	 * If mapping from non-section aligned address is support, then
+	 * _text can be used here directly in replace to kernel_x_start.
+	 */
+		phys_addr_t max_left, min_right;
+
+		max_left = max(kernel_x_start, start);
+		min_right = min(kernel_x_end, end);
+		BUG_ON(max_left != kernel_x_start || min_right != kernel_x_end);
+
 		if (start < kernel_x_start)
 			create_mapping(start, __phys_to_virt(start),
 				kernel_x_start - start,
@@ -394,12 +406,12 @@ void __init fixup_executable(void)
 {
 #ifdef CONFIG_DEBUG_RODATA
 	/* now that we are actually fully mapped, make the start/end more fine grained */
-	if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) {
-		unsigned long aligned_start = round_down(__pa(_stext),
+	if (!IS_ALIGNED((unsigned long)_text, SECTION_SIZE)) {
+		unsigned long aligned_start = round_down(__pa(_text),
 							SECTION_SIZE);
 
 		create_mapping(aligned_start, __phys_to_virt(aligned_start),
-				__pa(_stext) - aligned_start,
+				__pa(_text) - aligned_start,
 				PAGE_KERNEL);
 	}
 
@@ -418,7 +430,7 @@ void mark_rodata_ro(void)
 {
 	create_mapping_late(__pa(_stext), (unsigned long)_stext,
 				(unsigned long)_etext - (unsigned long)_stext,
-				PAGE_KERNEL_EXEC | PTE_RDONLY);
+				(PAGE_KERNEL_EXEC | PTE_RDONLY) & ~PTE_WRITE);
 
 }
 #endif
-- 
1.9.1

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH v1] arm64:mm: enable the kernel execute attribute for HEAD_TEXT segment
  2015-03-26 13:53 ` Zhichang Yuan
@ 2015-03-26 14:34   ` Ard Biesheuvel
  -1 siblings, 0 replies; 8+ messages in thread
From: Ard Biesheuvel @ 2015-03-26 14:34 UTC (permalink / raw)
  To: Zhichang Yuan
  Cc: Catalin Marinas, Will Deacon, Linaro Kernel Mailman List,
	Kenneth Lee, linux-kernel, wangzhou1, linux-arm-kernel

On 26 March 2015 at 14:53, Zhichang Yuan <yuanzhichang@hisilicon.com> wrote:
> From: yuanzhichang <yuanzhichang@hisilicon.com>
>
> In the patch whose title is "add better page protections to arm64"
> (commit da141706aea52c1a9fbd28cb8d289b78819f5436), The direct mapping
> page table entries for HEAD_TEXT segment were configured as PAGE_KERNEL,
> without the executable attribute. But when the secondary CPUs are booting
> based on spin-table mechanism, some functions in head.S are needed to run.

Which function is that exactly? If the secondaries need to execute it,
you should probably move it out of .head.text into the normal .text
section instead of changing the protections here

> Only PAGE_KERNEL dosen't work for this case.
> This patch will configure the page attributes as PAGE_KERNEL_EXEC for
> HEAD_TEXT segment.
>
> Signed-off-by: Zhichang Yuan <yuanzhichang@hisilicon.com>
> ---
>  arch/arm64/mm/mmu.c | 24 ++++++++++++++++++------
>  1 file changed, 18 insertions(+), 6 deletions(-)
>
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index c6daaf6..ad08dfd 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -305,8 +305,8 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
>          * for now. This will get more fine grained later once all memory
>          * is mapped
>          */
> -       unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
> -       unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
> +       phys_addr_t kernel_x_start = round_down(__pa(_text), SECTION_SIZE);
> +       phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
>
>         if (end < kernel_x_start) {
>                 create_mapping(start, __phys_to_virt(start),
> @@ -315,6 +315,18 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
>                 create_mapping(start, __phys_to_virt(start),
>                         end - start, PAGE_KERNEL);
>         } else {
> +       /*
> +        * At this moment, the text segment must reside in valid physical
> +        * memory section range to make sure the text are totally mapped.
> +        * If mapping from non-section aligned address is support, then
> +        * _text can be used here directly in replace to kernel_x_start.
> +        */
> +               phys_addr_t max_left, min_right;
> +
> +               max_left = max(kernel_x_start, start);
> +               min_right = min(kernel_x_end, end);
> +               BUG_ON(max_left != kernel_x_start || min_right != kernel_x_end);
> +
>                 if (start < kernel_x_start)
>                         create_mapping(start, __phys_to_virt(start),
>                                 kernel_x_start - start,
> @@ -394,12 +406,12 @@ void __init fixup_executable(void)
>  {
>  #ifdef CONFIG_DEBUG_RODATA
>         /* now that we are actually fully mapped, make the start/end more fine grained */
> -       if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) {
> -               unsigned long aligned_start = round_down(__pa(_stext),
> +       if (!IS_ALIGNED((unsigned long)_text, SECTION_SIZE)) {
> +               unsigned long aligned_start = round_down(__pa(_text),
>                                                         SECTION_SIZE);
>
>                 create_mapping(aligned_start, __phys_to_virt(aligned_start),
> -                               __pa(_stext) - aligned_start,
> +                               __pa(_text) - aligned_start,
>                                 PAGE_KERNEL);
>         }
>
> @@ -418,7 +430,7 @@ void mark_rodata_ro(void)
>  {
>         create_mapping_late(__pa(_stext), (unsigned long)_stext,
>                                 (unsigned long)_etext - (unsigned long)_stext,
> -                               PAGE_KERNEL_EXEC | PTE_RDONLY);
> +                               (PAGE_KERNEL_EXEC | PTE_RDONLY) & ~PTE_WRITE);
>
>  }
>  #endif
> --
> 1.9.1
>
>
> _______________________________________________
> linux-arm-kernel mailing list
> linux-arm-kernel@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH v1] arm64:mm: enable the kernel execute attribute for HEAD_TEXT segment
@ 2015-03-26 14:34   ` Ard Biesheuvel
  0 siblings, 0 replies; 8+ messages in thread
From: Ard Biesheuvel @ 2015-03-26 14:34 UTC (permalink / raw)
  To: linux-arm-kernel

On 26 March 2015 at 14:53, Zhichang Yuan <yuanzhichang@hisilicon.com> wrote:
> From: yuanzhichang <yuanzhichang@hisilicon.com>
>
> In the patch whose title is "add better page protections to arm64"
> (commit da141706aea52c1a9fbd28cb8d289b78819f5436), The direct mapping
> page table entries for HEAD_TEXT segment were configured as PAGE_KERNEL,
> without the executable attribute. But when the secondary CPUs are booting
> based on spin-table mechanism, some functions in head.S are needed to run.

Which function is that exactly? If the secondaries need to execute it,
you should probably move it out of .head.text into the normal .text
section instead of changing the protections here

> Only PAGE_KERNEL dosen't work for this case.
> This patch will configure the page attributes as PAGE_KERNEL_EXEC for
> HEAD_TEXT segment.
>
> Signed-off-by: Zhichang Yuan <yuanzhichang@hisilicon.com>
> ---
>  arch/arm64/mm/mmu.c | 24 ++++++++++++++++++------
>  1 file changed, 18 insertions(+), 6 deletions(-)
>
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index c6daaf6..ad08dfd 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -305,8 +305,8 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
>          * for now. This will get more fine grained later once all memory
>          * is mapped
>          */
> -       unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
> -       unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
> +       phys_addr_t kernel_x_start = round_down(__pa(_text), SECTION_SIZE);
> +       phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
>
>         if (end < kernel_x_start) {
>                 create_mapping(start, __phys_to_virt(start),
> @@ -315,6 +315,18 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
>                 create_mapping(start, __phys_to_virt(start),
>                         end - start, PAGE_KERNEL);
>         } else {
> +       /*
> +        * At this moment, the text segment must reside in valid physical
> +        * memory section range to make sure the text are totally mapped.
> +        * If mapping from non-section aligned address is support, then
> +        * _text can be used here directly in replace to kernel_x_start.
> +        */
> +               phys_addr_t max_left, min_right;
> +
> +               max_left = max(kernel_x_start, start);
> +               min_right = min(kernel_x_end, end);
> +               BUG_ON(max_left != kernel_x_start || min_right != kernel_x_end);
> +
>                 if (start < kernel_x_start)
>                         create_mapping(start, __phys_to_virt(start),
>                                 kernel_x_start - start,
> @@ -394,12 +406,12 @@ void __init fixup_executable(void)
>  {
>  #ifdef CONFIG_DEBUG_RODATA
>         /* now that we are actually fully mapped, make the start/end more fine grained */
> -       if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) {
> -               unsigned long aligned_start = round_down(__pa(_stext),
> +       if (!IS_ALIGNED((unsigned long)_text, SECTION_SIZE)) {
> +               unsigned long aligned_start = round_down(__pa(_text),
>                                                         SECTION_SIZE);
>
>                 create_mapping(aligned_start, __phys_to_virt(aligned_start),
> -                               __pa(_stext) - aligned_start,
> +                               __pa(_text) - aligned_start,
>                                 PAGE_KERNEL);
>         }
>
> @@ -418,7 +430,7 @@ void mark_rodata_ro(void)
>  {
>         create_mapping_late(__pa(_stext), (unsigned long)_stext,
>                                 (unsigned long)_etext - (unsigned long)_stext,
> -                               PAGE_KERNEL_EXEC | PTE_RDONLY);
> +                               (PAGE_KERNEL_EXEC | PTE_RDONLY) & ~PTE_WRITE);
>
>  }
>  #endif
> --
> 1.9.1
>
>
> _______________________________________________
> linux-arm-kernel mailing list
> linux-arm-kernel at lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v1] arm64:mm: enable the kernel execute attribute for HEAD_TEXT segment
  2015-03-26 13:53 ` Zhichang Yuan
@ 2015-03-26 22:10   ` Mark Rutland
  -1 siblings, 0 replies; 8+ messages in thread
From: Mark Rutland @ 2015-03-26 22:10 UTC (permalink / raw)
  To: Zhichang Yuan
  Cc: Catalin Marinas, Will Deacon, linaro-kernel, liguozhu,
	linux-kernel, wangzhou1, linux-arm-kernel

On Thu, Mar 26, 2015 at 01:53:48PM +0000, Zhichang Yuan wrote:
> From: yuanzhichang <yuanzhichang@hisilicon.com>
> 
> In the patch whose title is "add better page protections to arm64"
> (commit da141706aea52c1a9fbd28cb8d289b78819f5436), The direct mapping
> page table entries for HEAD_TEXT segment were configured as PAGE_KERNEL,
> without the executable attribute. But when the secondary CPUs are booting
> based on spin-table mechanism, some functions in head.S are needed to run.

In mainline today, the only functions I see in head.S before the .section
change (and hence are not executable) are:

* stext
* __vet_fdt
* __create_page_tables
* __mmap_switched

These are never executed by secondary CPUs. So your problem does not seem to be
related to functions falling withing HEAD_TEXT -- all other functions in head.S
are placed in .text, and thus will be executable regardless.

If you had a problem with spin-table, then I don't see why it wouldn't also
apply to PSCI -- in both cases we go via secondary_startup before we enable the
MMU.

So I suspect that you have another bug, and some layout change (or difference
in maintenance) is masking that when the better protections are enabled. It's
also possible that we have a bug in the logic updating the page tables.

Have you actually seen an issue, or was this theoretical?

What exactly do you see happen when booting secondary CPUs?

Do you see the issue in mainline?

> Only PAGE_KERNEL dosen't work for this case.
> This patch will configure the page attributes as PAGE_KERNEL_EXEC for
> HEAD_TEXT segment.

I don't see how this should be necessary.  All the text in that section should
only be executed on the first CPU, prior to permissions being applied, and
prior to the MMU being enabled.

> Signed-off-by: Zhichang Yuan <yuanzhichang@hisilicon.com>
> ---
>  arch/arm64/mm/mmu.c | 24 ++++++++++++++++++------
>  1 file changed, 18 insertions(+), 6 deletions(-)
> 
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index c6daaf6..ad08dfd 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -305,8 +305,8 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
>  	 * for now. This will get more fine grained later once all memory
>  	 * is mapped
>  	 */
> -	unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
> -	unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
> +	phys_addr_t kernel_x_start = round_down(__pa(_text), SECTION_SIZE);
> +	phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);

As mentioned above, none of the text in this section needs to be run with the
MMU on. So I don't think this is necessary.

>  
>  	if (end < kernel_x_start) {
>  		create_mapping(start, __phys_to_virt(start),
> @@ -315,6 +315,18 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
>  		create_mapping(start, __phys_to_virt(start),
>  			end - start, PAGE_KERNEL);
>  	} else {
> +	/*
> +	 * At this moment, the text segment must reside in valid physical
> +	 * memory section range to make sure the text are totally mapped. 
> +	 * If mapping from non-section aligned address is support, then
> +	 * _text can be used here directly in replace to kernel_x_start.
> +	 */
> +		phys_addr_t max_left, min_right;
> +
> +		max_left = max(kernel_x_start, start);
> +		min_right = min(kernel_x_end, end);
> +		BUG_ON(max_left != kernel_x_start || min_right != kernel_x_end);

Huh?

Mark.

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH v1] arm64:mm: enable the kernel execute attribute for HEAD_TEXT segment
@ 2015-03-26 22:10   ` Mark Rutland
  0 siblings, 0 replies; 8+ messages in thread
From: Mark Rutland @ 2015-03-26 22:10 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, Mar 26, 2015 at 01:53:48PM +0000, Zhichang Yuan wrote:
> From: yuanzhichang <yuanzhichang@hisilicon.com>
> 
> In the patch whose title is "add better page protections to arm64"
> (commit da141706aea52c1a9fbd28cb8d289b78819f5436), The direct mapping
> page table entries for HEAD_TEXT segment were configured as PAGE_KERNEL,
> without the executable attribute. But when the secondary CPUs are booting
> based on spin-table mechanism, some functions in head.S are needed to run.

In mainline today, the only functions I see in head.S before the .section
change (and hence are not executable) are:

* stext
* __vet_fdt
* __create_page_tables
* __mmap_switched

These are never executed by secondary CPUs. So your problem does not seem to be
related to functions falling withing HEAD_TEXT -- all other functions in head.S
are placed in .text, and thus will be executable regardless.

If you had a problem with spin-table, then I don't see why it wouldn't also
apply to PSCI -- in both cases we go via secondary_startup before we enable the
MMU.

So I suspect that you have another bug, and some layout change (or difference
in maintenance) is masking that when the better protections are enabled. It's
also possible that we have a bug in the logic updating the page tables.

Have you actually seen an issue, or was this theoretical?

What exactly do you see happen when booting secondary CPUs?

Do you see the issue in mainline?

> Only PAGE_KERNEL dosen't work for this case.
> This patch will configure the page attributes as PAGE_KERNEL_EXEC for
> HEAD_TEXT segment.

I don't see how this should be necessary.  All the text in that section should
only be executed on the first CPU, prior to permissions being applied, and
prior to the MMU being enabled.

> Signed-off-by: Zhichang Yuan <yuanzhichang@hisilicon.com>
> ---
>  arch/arm64/mm/mmu.c | 24 ++++++++++++++++++------
>  1 file changed, 18 insertions(+), 6 deletions(-)
> 
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index c6daaf6..ad08dfd 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -305,8 +305,8 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
>  	 * for now. This will get more fine grained later once all memory
>  	 * is mapped
>  	 */
> -	unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
> -	unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
> +	phys_addr_t kernel_x_start = round_down(__pa(_text), SECTION_SIZE);
> +	phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);

As mentioned above, none of the text in this section needs to be run with the
MMU on. So I don't think this is necessary.

>  
>  	if (end < kernel_x_start) {
>  		create_mapping(start, __phys_to_virt(start),
> @@ -315,6 +315,18 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
>  		create_mapping(start, __phys_to_virt(start),
>  			end - start, PAGE_KERNEL);
>  	} else {
> +	/*
> +	 * At this moment, the text segment must reside in valid physical
> +	 * memory section range to make sure the text are totally mapped. 
> +	 * If mapping from non-section aligned address is support, then
> +	 * _text can be used here directly in replace to kernel_x_start.
> +	 */
> +		phys_addr_t max_left, min_right;
> +
> +		max_left = max(kernel_x_start, start);
> +		min_right = min(kernel_x_end, end);
> +		BUG_ON(max_left != kernel_x_start || min_right != kernel_x_end);

Huh?

Mark.

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v1] arm64:mm: enable the kernel execute attribute for HEAD_TEXT segment
  2015-03-26 22:10   ` Mark Rutland
@ 2015-03-27  5:08     ` yuanzhichang
  -1 siblings, 0 replies; 8+ messages in thread
From: yuanzhichang @ 2015-03-27  5:08 UTC (permalink / raw)
  To: Mark Rutland
  Cc: Catalin Marinas, Will Deacon, linaro-kernel, liguozhu,
	linux-kernel, wangzhou1, linux-arm-kernel

Hi, Mark



On 2015/3/27 6:10, Mark Rutland wrote:
> On Thu, Mar 26, 2015 at 01:53:48PM +0000, Zhichang Yuan wrote:
>> From: yuanzhichang <yuanzhichang@hisilicon.com>
>>
>> In the patch whose title is "add better page protections to arm64"
>> (commit da141706aea52c1a9fbd28cb8d289b78819f5436), The direct mapping
>> page table entries for HEAD_TEXT segment were configured as PAGE_KERNEL,
>> without the executable attribute. But when the secondary CPUs are booting
>> based on spin-table mechanism, some functions in head.S are needed to run.
>
> In mainline today, the only functions I see in head.S before the .section
> change (and hence are not executable) are:
>
> * stext
> * __vet_fdt
> * __create_page_tables
> * __mmap_switched
>
> These are never executed by secondary CPUs. So your problem does not seem to be
> related to functions falling withing HEAD_TEXT -- all other functions in head.S
> are placed in .text, and thus will be executable regardless.
>

Yes. It is my fault. We had not used the new kernel version to do the 
test. The functions needed for secondary CPUs and CPU restarting are not 
put into the text section. I checked the head.S in the source tree for 
our board, there is no this instruction in head.S:
	.section ".text","ax"

Thank you very much!

Sorry for the disturbance caused by this patch:(

-Zhichang

-

> If you had a problem with spin-table, then I don't see why it wouldn't also
> apply to PSCI -- in both cases we go via secondary_startup before we enable the
> MMU.
>
> So I suspect that you have another bug, and some layout change (or difference
> in maintenance) is masking that when the better protections are enabled. It's
> also possible that we have a bug in the logic updating the page tables.
>
> Have you actually seen an issue, or was this theoretical?
>
> What exactly do you see happen when booting secondary CPUs?
>
> Do you see the issue in mainline?
>
>> Only PAGE_KERNEL dosen't work for this case.
>> This patch will configure the page attributes as PAGE_KERNEL_EXEC for
>> HEAD_TEXT segment.
>
> I don't see how this should be necessary.  All the text in that section should
> only be executed on the first CPU, prior to permissions being applied, and
> prior to the MMU being enabled.
>
>> Signed-off-by: Zhichang Yuan <yuanzhichang@hisilicon.com>
>> ---
>>   arch/arm64/mm/mmu.c | 24 ++++++++++++++++++------
>>   1 file changed, 18 insertions(+), 6 deletions(-)
>>
>> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
>> index c6daaf6..ad08dfd 100644
>> --- a/arch/arm64/mm/mmu.c
>> +++ b/arch/arm64/mm/mmu.c
>> @@ -305,8 +305,8 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
>>   	 * for now. This will get more fine grained later once all memory
>>   	 * is mapped
>>   	 */
>> -	unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
>> -	unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
>> +	phys_addr_t kernel_x_start = round_down(__pa(_text), SECTION_SIZE);
>> +	phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
>
> As mentioned above, none of the text in this section needs to be run with the
> MMU on. So I don't think this is necessary.
>
>>
>>   	if (end < kernel_x_start) {
>>   		create_mapping(start, __phys_to_virt(start),
>> @@ -315,6 +315,18 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
>>   		create_mapping(start, __phys_to_virt(start),
>>   			end - start, PAGE_KERNEL);
>>   	} else {
>> +	/*
>> +	 * At this moment, the text segment must reside in valid physical
>> +	 * memory section range to make sure the text are totally mapped.
>> +	 * If mapping from non-section aligned address is support, then
>> +	 * _text can be used here directly in replace to kernel_x_start.
>> +	 */
>> +		phys_addr_t max_left, min_right;
>> +
>> +		max_left = max(kernel_x_start, start);
>> +		min_right = min(kernel_x_end, end);
>> +		BUG_ON(max_left != kernel_x_start || min_right != kernel_x_end);
>
> Huh?
>
> Mark.
>
> .
>


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH v1] arm64:mm: enable the kernel execute attribute for HEAD_TEXT segment
@ 2015-03-27  5:08     ` yuanzhichang
  0 siblings, 0 replies; 8+ messages in thread
From: yuanzhichang @ 2015-03-27  5:08 UTC (permalink / raw)
  To: linux-arm-kernel

Hi, Mark



On 2015/3/27 6:10, Mark Rutland wrote:
> On Thu, Mar 26, 2015 at 01:53:48PM +0000, Zhichang Yuan wrote:
>> From: yuanzhichang <yuanzhichang@hisilicon.com>
>>
>> In the patch whose title is "add better page protections to arm64"
>> (commit da141706aea52c1a9fbd28cb8d289b78819f5436), The direct mapping
>> page table entries for HEAD_TEXT segment were configured as PAGE_KERNEL,
>> without the executable attribute. But when the secondary CPUs are booting
>> based on spin-table mechanism, some functions in head.S are needed to run.
>
> In mainline today, the only functions I see in head.S before the .section
> change (and hence are not executable) are:
>
> * stext
> * __vet_fdt
> * __create_page_tables
> * __mmap_switched
>
> These are never executed by secondary CPUs. So your problem does not seem to be
> related to functions falling withing HEAD_TEXT -- all other functions in head.S
> are placed in .text, and thus will be executable regardless.
>

Yes. It is my fault. We had not used the new kernel version to do the 
test. The functions needed for secondary CPUs and CPU restarting are not 
put into the text section. I checked the head.S in the source tree for 
our board, there is no this instruction in head.S:
	.section ".text","ax"

Thank you very much!

Sorry for the disturbance caused by this patch:(

-Zhichang

-

> If you had a problem with spin-table, then I don't see why it wouldn't also
> apply to PSCI -- in both cases we go via secondary_startup before we enable the
> MMU.
>
> So I suspect that you have another bug, and some layout change (or difference
> in maintenance) is masking that when the better protections are enabled. It's
> also possible that we have a bug in the logic updating the page tables.
>
> Have you actually seen an issue, or was this theoretical?
>
> What exactly do you see happen when booting secondary CPUs?
>
> Do you see the issue in mainline?
>
>> Only PAGE_KERNEL dosen't work for this case.
>> This patch will configure the page attributes as PAGE_KERNEL_EXEC for
>> HEAD_TEXT segment.
>
> I don't see how this should be necessary.  All the text in that section should
> only be executed on the first CPU, prior to permissions being applied, and
> prior to the MMU being enabled.
>
>> Signed-off-by: Zhichang Yuan <yuanzhichang@hisilicon.com>
>> ---
>>   arch/arm64/mm/mmu.c | 24 ++++++++++++++++++------
>>   1 file changed, 18 insertions(+), 6 deletions(-)
>>
>> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
>> index c6daaf6..ad08dfd 100644
>> --- a/arch/arm64/mm/mmu.c
>> +++ b/arch/arm64/mm/mmu.c
>> @@ -305,8 +305,8 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
>>   	 * for now. This will get more fine grained later once all memory
>>   	 * is mapped
>>   	 */
>> -	unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
>> -	unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
>> +	phys_addr_t kernel_x_start = round_down(__pa(_text), SECTION_SIZE);
>> +	phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
>
> As mentioned above, none of the text in this section needs to be run with the
> MMU on. So I don't think this is necessary.
>
>>
>>   	if (end < kernel_x_start) {
>>   		create_mapping(start, __phys_to_virt(start),
>> @@ -315,6 +315,18 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
>>   		create_mapping(start, __phys_to_virt(start),
>>   			end - start, PAGE_KERNEL);
>>   	} else {
>> +	/*
>> +	 * At this moment, the text segment must reside in valid physical
>> +	 * memory section range to make sure the text are totally mapped.
>> +	 * If mapping from non-section aligned address is support, then
>> +	 * _text can be used here directly in replace to kernel_x_start.
>> +	 */
>> +		phys_addr_t max_left, min_right;
>> +
>> +		max_left = max(kernel_x_start, start);
>> +		min_right = min(kernel_x_end, end);
>> +		BUG_ON(max_left != kernel_x_start || min_right != kernel_x_end);
>
> Huh?
>
> Mark.
>
> .
>

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2015-03-27  5:09 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-03-26 13:53 [PATCH v1] arm64:mm: enable the kernel execute attribute for HEAD_TEXT segment Zhichang Yuan
2015-03-26 13:53 ` Zhichang Yuan
2015-03-26 14:34 ` Ard Biesheuvel
2015-03-26 14:34   ` Ard Biesheuvel
2015-03-26 22:10 ` Mark Rutland
2015-03-26 22:10   ` Mark Rutland
2015-03-27  5:08   ` yuanzhichang
2015-03-27  5:08     ` yuanzhichang

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.