All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] riscv: Add KASAN_VMALLOC support
@ 2021-02-25  7:42 ` Alexandre Ghiti
  0 siblings, 0 replies; 8+ messages in thread
From: Alexandre Ghiti @ 2021-02-25  7:42 UTC (permalink / raw)
  To: Paul Walmsley, Palmer Dabbelt, Albert Ou, Nylon Chen, Nick Hu,
	Andrey Ryabinin, Alexander Potapenko, Dmitry Vyukov, linux-riscv,
	linux-kernel, kasan-dev
  Cc: Alexandre Ghiti

Populate the top-level of the kernel page table to implement KASAN_VMALLOC,
lower levels are filled dynamically upon memory allocation at runtime.

Co-developed-by: Nylon Chen <nylon7@andestech.com>
Signed-off-by: Nylon Chen <nylon7@andestech.com>
Co-developed-by: Nick Hu <nickhu@andestech.com>
Signed-off-by: Nick Hu <nickhu@andestech.com>
Signed-off-by: Alexandre Ghiti <alex@ghiti.fr>
---
 arch/riscv/Kconfig         |  1 +
 arch/riscv/mm/kasan_init.c | 35 ++++++++++++++++++++++++++++++++++-
 2 files changed, 35 insertions(+), 1 deletion(-)

diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 8eadd1cbd524..3832a537c5d6 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -57,6 +57,7 @@ config RISCV
 	select HAVE_ARCH_JUMP_LABEL
 	select HAVE_ARCH_JUMP_LABEL_RELATIVE
 	select HAVE_ARCH_KASAN if MMU && 64BIT
+	select HAVE_ARCH_KASAN_VMALLOC if MMU && 64BIT
 	select HAVE_ARCH_KGDB
 	select HAVE_ARCH_KGDB_QXFER_PKT
 	select HAVE_ARCH_MMAP_RND_BITS if MMU
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
index 719b6e4d6075..171569df4334 100644
--- a/arch/riscv/mm/kasan_init.c
+++ b/arch/riscv/mm/kasan_init.c
@@ -142,6 +142,31 @@ static void __init kasan_populate(void *start, void *end)
 	memset(start, KASAN_SHADOW_INIT, end - start);
 }
 
+void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
+{
+	unsigned long next;
+	void *p;
+	pgd_t *pgd_k = pgd_offset_k(vaddr);
+
+	do {
+		next = pgd_addr_end(vaddr, end);
+		if (pgd_page_vaddr(*pgd_k) == (unsigned long)lm_alias(kasan_early_shadow_pmd)) {
+			p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+			set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
+		}
+	} while (pgd_k++, vaddr = next, vaddr != end);
+}
+
+void __init kasan_shallow_populate(void *start, void *end)
+{
+	unsigned long vaddr = (unsigned long)start & PAGE_MASK;
+	unsigned long vend = PAGE_ALIGN((unsigned long)end);
+
+	kasan_shallow_populate_pgd(vaddr, vend);
+
+	local_flush_tlb_all();
+}
+
 void __init kasan_init(void)
 {
 	phys_addr_t _start, _end;
@@ -149,7 +174,15 @@ void __init kasan_init(void)
 
 	kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
 				    (void *)kasan_mem_to_shadow((void *)
-								VMALLOC_END));
+								VMEMMAP_END));
+	if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
+		kasan_shallow_populate(
+			(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
+			(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
+	else
+		kasan_populate_early_shadow(
+			(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
+			(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
 
 	for_each_mem_range(i, &_start, &_end) {
 		void *start = (void *)_start;
-- 
2.20.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH] riscv: Add KASAN_VMALLOC support
@ 2021-02-25  7:42 ` Alexandre Ghiti
  0 siblings, 0 replies; 8+ messages in thread
From: Alexandre Ghiti @ 2021-02-25  7:42 UTC (permalink / raw)
  To: Paul Walmsley, Palmer Dabbelt, Albert Ou, Nylon Chen, Nick Hu,
	Andrey Ryabinin, Alexander Potapenko, Dmitry Vyukov, linux-riscv,
	linux-kernel, kasan-dev
  Cc: Alexandre Ghiti

Populate the top-level of the kernel page table to implement KASAN_VMALLOC,
lower levels are filled dynamically upon memory allocation at runtime.

Co-developed-by: Nylon Chen <nylon7@andestech.com>
Signed-off-by: Nylon Chen <nylon7@andestech.com>
Co-developed-by: Nick Hu <nickhu@andestech.com>
Signed-off-by: Nick Hu <nickhu@andestech.com>
Signed-off-by: Alexandre Ghiti <alex@ghiti.fr>
---
 arch/riscv/Kconfig         |  1 +
 arch/riscv/mm/kasan_init.c | 35 ++++++++++++++++++++++++++++++++++-
 2 files changed, 35 insertions(+), 1 deletion(-)

diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 8eadd1cbd524..3832a537c5d6 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -57,6 +57,7 @@ config RISCV
 	select HAVE_ARCH_JUMP_LABEL
 	select HAVE_ARCH_JUMP_LABEL_RELATIVE
 	select HAVE_ARCH_KASAN if MMU && 64BIT
+	select HAVE_ARCH_KASAN_VMALLOC if MMU && 64BIT
 	select HAVE_ARCH_KGDB
 	select HAVE_ARCH_KGDB_QXFER_PKT
 	select HAVE_ARCH_MMAP_RND_BITS if MMU
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
index 719b6e4d6075..171569df4334 100644
--- a/arch/riscv/mm/kasan_init.c
+++ b/arch/riscv/mm/kasan_init.c
@@ -142,6 +142,31 @@ static void __init kasan_populate(void *start, void *end)
 	memset(start, KASAN_SHADOW_INIT, end - start);
 }
 
+void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
+{
+	unsigned long next;
+	void *p;
+	pgd_t *pgd_k = pgd_offset_k(vaddr);
+
+	do {
+		next = pgd_addr_end(vaddr, end);
+		if (pgd_page_vaddr(*pgd_k) == (unsigned long)lm_alias(kasan_early_shadow_pmd)) {
+			p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+			set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
+		}
+	} while (pgd_k++, vaddr = next, vaddr != end);
+}
+
+void __init kasan_shallow_populate(void *start, void *end)
+{
+	unsigned long vaddr = (unsigned long)start & PAGE_MASK;
+	unsigned long vend = PAGE_ALIGN((unsigned long)end);
+
+	kasan_shallow_populate_pgd(vaddr, vend);
+
+	local_flush_tlb_all();
+}
+
 void __init kasan_init(void)
 {
 	phys_addr_t _start, _end;
@@ -149,7 +174,15 @@ void __init kasan_init(void)
 
 	kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
 				    (void *)kasan_mem_to_shadow((void *)
-								VMALLOC_END));
+								VMEMMAP_END));
+	if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
+		kasan_shallow_populate(
+			(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
+			(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
+	else
+		kasan_populate_early_shadow(
+			(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
+			(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
 
 	for_each_mem_range(i, &_start, &_end) {
 		void *start = (void *)_start;
-- 
2.20.1


_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH] riscv: Add KASAN_VMALLOC support
  2021-02-25  7:42 ` Alexandre Ghiti
@ 2021-02-25  7:48   ` Alex Ghiti
  -1 siblings, 0 replies; 8+ messages in thread
From: Alex Ghiti @ 2021-02-25  7:48 UTC (permalink / raw)
  To: Paul Walmsley, Palmer Dabbelt, Albert Ou, Nylon Chen, Nick Hu,
	Andrey Ryabinin, Alexander Potapenko, Dmitry Vyukov, linux-riscv,
	linux-kernel, kasan-dev

Le 2/25/21 à 2:42 AM, Alexandre Ghiti a écrit :
> Populate the top-level of the kernel page table to implement KASAN_VMALLOC,
> lower levels are filled dynamically upon memory allocation at runtime.
> 
> Co-developed-by: Nylon Chen <nylon7@andestech.com>
> Signed-off-by: Nylon Chen <nylon7@andestech.com>
> Co-developed-by: Nick Hu <nickhu@andestech.com>
> Signed-off-by: Nick Hu <nickhu@andestech.com>
> Signed-off-by: Alexandre Ghiti <alex@ghiti.fr>
> ---
>   arch/riscv/Kconfig         |  1 +
>   arch/riscv/mm/kasan_init.c | 35 ++++++++++++++++++++++++++++++++++-
>   2 files changed, 35 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
> index 8eadd1cbd524..3832a537c5d6 100644
> --- a/arch/riscv/Kconfig
> +++ b/arch/riscv/Kconfig
> @@ -57,6 +57,7 @@ config RISCV
>   	select HAVE_ARCH_JUMP_LABEL
>   	select HAVE_ARCH_JUMP_LABEL_RELATIVE
>   	select HAVE_ARCH_KASAN if MMU && 64BIT
> +	select HAVE_ARCH_KASAN_VMALLOC if MMU && 64BIT
>   	select HAVE_ARCH_KGDB
>   	select HAVE_ARCH_KGDB_QXFER_PKT
>   	select HAVE_ARCH_MMAP_RND_BITS if MMU
> diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
> index 719b6e4d6075..171569df4334 100644
> --- a/arch/riscv/mm/kasan_init.c
> +++ b/arch/riscv/mm/kasan_init.c
> @@ -142,6 +142,31 @@ static void __init kasan_populate(void *start, void *end)
>   	memset(start, KASAN_SHADOW_INIT, end - start);
>   }
>   
> +void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
> +{
> +	unsigned long next;
> +	void *p;
> +	pgd_t *pgd_k = pgd_offset_k(vaddr);
> +
> +	do {
> +		next = pgd_addr_end(vaddr, end);
> +		if (pgd_page_vaddr(*pgd_k) == (unsigned long)lm_alias(kasan_early_shadow_pmd)) {
> +			p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
> +			set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
> +		}
> +	} while (pgd_k++, vaddr = next, vaddr != end);
> +}
> +
> +void __init kasan_shallow_populate(void *start, void *end)
> +{
> +	unsigned long vaddr = (unsigned long)start & PAGE_MASK;
> +	unsigned long vend = PAGE_ALIGN((unsigned long)end);
> +
> +	kasan_shallow_populate_pgd(vaddr, vend);
> +
> +	local_flush_tlb_all();
> +}
> +
>   void __init kasan_init(void)
>   {
>   	phys_addr_t _start, _end;
> @@ -149,7 +174,15 @@ void __init kasan_init(void)
>   
>   	kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
>   				    (void *)kasan_mem_to_shadow((void *)
> -								VMALLOC_END));
> +								VMEMMAP_END));
> +	if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
> +		kasan_shallow_populate(
> +			(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
> +			(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
> +	else
> +		kasan_populate_early_shadow(
> +			(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
> +			(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
>   
>   	for_each_mem_range(i, &_start, &_end) {
>   		void *start = (void *)_start;
> 

Palmer, this commit should replace (if everyone agrees) Nylon and Nick's 
Commit e178d670f251 ("riscv/kasan: add KASAN_VMALLOC support") that is 
already in for-next.

Thanks,

Alex

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] riscv: Add KASAN_VMALLOC support
@ 2021-02-25  7:48   ` Alex Ghiti
  0 siblings, 0 replies; 8+ messages in thread
From: Alex Ghiti @ 2021-02-25  7:48 UTC (permalink / raw)
  To: Paul Walmsley, Palmer Dabbelt, Albert Ou, Nylon Chen, Nick Hu,
	Andrey Ryabinin, Alexander Potapenko, Dmitry Vyukov, linux-riscv,
	linux-kernel, kasan-dev

Le 2/25/21 à 2:42 AM, Alexandre Ghiti a écrit :
> Populate the top-level of the kernel page table to implement KASAN_VMALLOC,
> lower levels are filled dynamically upon memory allocation at runtime.
> 
> Co-developed-by: Nylon Chen <nylon7@andestech.com>
> Signed-off-by: Nylon Chen <nylon7@andestech.com>
> Co-developed-by: Nick Hu <nickhu@andestech.com>
> Signed-off-by: Nick Hu <nickhu@andestech.com>
> Signed-off-by: Alexandre Ghiti <alex@ghiti.fr>
> ---
>   arch/riscv/Kconfig         |  1 +
>   arch/riscv/mm/kasan_init.c | 35 ++++++++++++++++++++++++++++++++++-
>   2 files changed, 35 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
> index 8eadd1cbd524..3832a537c5d6 100644
> --- a/arch/riscv/Kconfig
> +++ b/arch/riscv/Kconfig
> @@ -57,6 +57,7 @@ config RISCV
>   	select HAVE_ARCH_JUMP_LABEL
>   	select HAVE_ARCH_JUMP_LABEL_RELATIVE
>   	select HAVE_ARCH_KASAN if MMU && 64BIT
> +	select HAVE_ARCH_KASAN_VMALLOC if MMU && 64BIT
>   	select HAVE_ARCH_KGDB
>   	select HAVE_ARCH_KGDB_QXFER_PKT
>   	select HAVE_ARCH_MMAP_RND_BITS if MMU
> diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
> index 719b6e4d6075..171569df4334 100644
> --- a/arch/riscv/mm/kasan_init.c
> +++ b/arch/riscv/mm/kasan_init.c
> @@ -142,6 +142,31 @@ static void __init kasan_populate(void *start, void *end)
>   	memset(start, KASAN_SHADOW_INIT, end - start);
>   }
>   
> +void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
> +{
> +	unsigned long next;
> +	void *p;
> +	pgd_t *pgd_k = pgd_offset_k(vaddr);
> +
> +	do {
> +		next = pgd_addr_end(vaddr, end);
> +		if (pgd_page_vaddr(*pgd_k) == (unsigned long)lm_alias(kasan_early_shadow_pmd)) {
> +			p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
> +			set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
> +		}
> +	} while (pgd_k++, vaddr = next, vaddr != end);
> +}
> +
> +void __init kasan_shallow_populate(void *start, void *end)
> +{
> +	unsigned long vaddr = (unsigned long)start & PAGE_MASK;
> +	unsigned long vend = PAGE_ALIGN((unsigned long)end);
> +
> +	kasan_shallow_populate_pgd(vaddr, vend);
> +
> +	local_flush_tlb_all();
> +}
> +
>   void __init kasan_init(void)
>   {
>   	phys_addr_t _start, _end;
> @@ -149,7 +174,15 @@ void __init kasan_init(void)
>   
>   	kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
>   				    (void *)kasan_mem_to_shadow((void *)
> -								VMALLOC_END));
> +								VMEMMAP_END));
> +	if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
> +		kasan_shallow_populate(
> +			(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
> +			(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
> +	else
> +		kasan_populate_early_shadow(
> +			(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
> +			(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
>   
>   	for_each_mem_range(i, &_start, &_end) {
>   		void *start = (void *)_start;
> 

Palmer, this commit should replace (if everyone agrees) Nylon and Nick's 
Commit e178d670f251 ("riscv/kasan: add KASAN_VMALLOC support") that is 
already in for-next.

Thanks,

Alex

_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] riscv: Add KASAN_VMALLOC support
  2021-02-25  7:48   ` Alex Ghiti
@ 2021-02-26  5:32     ` Palmer Dabbelt
  -1 siblings, 0 replies; 8+ messages in thread
From: Palmer Dabbelt @ 2021-02-26  5:32 UTC (permalink / raw)
  To: alex
  Cc: Paul Walmsley, aou, nylon7, nickhu, aryabinin, glider, dvyukov,
	linux-riscv, linux-kernel, kasan-dev

On Wed, 24 Feb 2021 23:48:13 PST (-0800), alex@ghiti.fr wrote:
> Le 2/25/21 à 2:42 AM, Alexandre Ghiti a écrit :
>> Populate the top-level of the kernel page table to implement KASAN_VMALLOC,
>> lower levels are filled dynamically upon memory allocation at runtime.
>>
>> Co-developed-by: Nylon Chen <nylon7@andestech.com>
>> Signed-off-by: Nylon Chen <nylon7@andestech.com>
>> Co-developed-by: Nick Hu <nickhu@andestech.com>
>> Signed-off-by: Nick Hu <nickhu@andestech.com>
>> Signed-off-by: Alexandre Ghiti <alex@ghiti.fr>
>> ---
>>   arch/riscv/Kconfig         |  1 +
>>   arch/riscv/mm/kasan_init.c | 35 ++++++++++++++++++++++++++++++++++-
>>   2 files changed, 35 insertions(+), 1 deletion(-)
>>
>> diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
>> index 8eadd1cbd524..3832a537c5d6 100644
>> --- a/arch/riscv/Kconfig
>> +++ b/arch/riscv/Kconfig
>> @@ -57,6 +57,7 @@ config RISCV
>>   	select HAVE_ARCH_JUMP_LABEL
>>   	select HAVE_ARCH_JUMP_LABEL_RELATIVE
>>   	select HAVE_ARCH_KASAN if MMU && 64BIT
>> +	select HAVE_ARCH_KASAN_VMALLOC if MMU && 64BIT
>>   	select HAVE_ARCH_KGDB
>>   	select HAVE_ARCH_KGDB_QXFER_PKT
>>   	select HAVE_ARCH_MMAP_RND_BITS if MMU
>> diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
>> index 719b6e4d6075..171569df4334 100644
>> --- a/arch/riscv/mm/kasan_init.c
>> +++ b/arch/riscv/mm/kasan_init.c
>> @@ -142,6 +142,31 @@ static void __init kasan_populate(void *start, void *end)
>>   	memset(start, KASAN_SHADOW_INIT, end - start);
>>   }
>>
>> +void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
>> +{
>> +	unsigned long next;
>> +	void *p;
>> +	pgd_t *pgd_k = pgd_offset_k(vaddr);
>> +
>> +	do {
>> +		next = pgd_addr_end(vaddr, end);
>> +		if (pgd_page_vaddr(*pgd_k) == (unsigned long)lm_alias(kasan_early_shadow_pmd)) {
>> +			p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
>> +			set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
>> +		}
>> +	} while (pgd_k++, vaddr = next, vaddr != end);
>> +}
>> +
>> +void __init kasan_shallow_populate(void *start, void *end)
>> +{
>> +	unsigned long vaddr = (unsigned long)start & PAGE_MASK;
>> +	unsigned long vend = PAGE_ALIGN((unsigned long)end);
>> +
>> +	kasan_shallow_populate_pgd(vaddr, vend);
>> +
>> +	local_flush_tlb_all();
>> +}
>> +
>>   void __init kasan_init(void)
>>   {
>>   	phys_addr_t _start, _end;
>> @@ -149,7 +174,15 @@ void __init kasan_init(void)
>>
>>   	kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
>>   				    (void *)kasan_mem_to_shadow((void *)
>> -								VMALLOC_END));
>> +								VMEMMAP_END));
>> +	if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
>> +		kasan_shallow_populate(
>> +			(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
>> +			(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
>> +	else
>> +		kasan_populate_early_shadow(
>> +			(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
>> +			(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
>>
>>   	for_each_mem_range(i, &_start, &_end) {
>>   		void *start = (void *)_start;
>>
>
> Palmer, this commit should replace (if everyone agrees) Nylon and Nick's
> Commit e178d670f251 ("riscv/kasan: add KASAN_VMALLOC support") that is
> already in for-next.

Sorry, but it's way too late to be rebasing things.  I can get trying to have
the history clean, but in this case we're better off having this as an explicit
fix patch -- changing hashes this late in the process messes with all the
testing.

I'm not sure what the issue actually is, so it'd be great if you could send the
fix patch.  If not then LMK and I'll try to figure out what's going on.  Either
way, having the fix will make sure this gets tested properly as whatever's
going on isn't failing for me.

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] riscv: Add KASAN_VMALLOC support
@ 2021-02-26  5:32     ` Palmer Dabbelt
  0 siblings, 0 replies; 8+ messages in thread
From: Palmer Dabbelt @ 2021-02-26  5:32 UTC (permalink / raw)
  To: alex
  Cc: aou, nickhu, linux-kernel, kasan-dev, nylon7, glider,
	Paul Walmsley, aryabinin, linux-riscv, dvyukov

On Wed, 24 Feb 2021 23:48:13 PST (-0800), alex@ghiti.fr wrote:
> Le 2/25/21 à 2:42 AM, Alexandre Ghiti a écrit :
>> Populate the top-level of the kernel page table to implement KASAN_VMALLOC,
>> lower levels are filled dynamically upon memory allocation at runtime.
>>
>> Co-developed-by: Nylon Chen <nylon7@andestech.com>
>> Signed-off-by: Nylon Chen <nylon7@andestech.com>
>> Co-developed-by: Nick Hu <nickhu@andestech.com>
>> Signed-off-by: Nick Hu <nickhu@andestech.com>
>> Signed-off-by: Alexandre Ghiti <alex@ghiti.fr>
>> ---
>>   arch/riscv/Kconfig         |  1 +
>>   arch/riscv/mm/kasan_init.c | 35 ++++++++++++++++++++++++++++++++++-
>>   2 files changed, 35 insertions(+), 1 deletion(-)
>>
>> diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
>> index 8eadd1cbd524..3832a537c5d6 100644
>> --- a/arch/riscv/Kconfig
>> +++ b/arch/riscv/Kconfig
>> @@ -57,6 +57,7 @@ config RISCV
>>   	select HAVE_ARCH_JUMP_LABEL
>>   	select HAVE_ARCH_JUMP_LABEL_RELATIVE
>>   	select HAVE_ARCH_KASAN if MMU && 64BIT
>> +	select HAVE_ARCH_KASAN_VMALLOC if MMU && 64BIT
>>   	select HAVE_ARCH_KGDB
>>   	select HAVE_ARCH_KGDB_QXFER_PKT
>>   	select HAVE_ARCH_MMAP_RND_BITS if MMU
>> diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
>> index 719b6e4d6075..171569df4334 100644
>> --- a/arch/riscv/mm/kasan_init.c
>> +++ b/arch/riscv/mm/kasan_init.c
>> @@ -142,6 +142,31 @@ static void __init kasan_populate(void *start, void *end)
>>   	memset(start, KASAN_SHADOW_INIT, end - start);
>>   }
>>
>> +void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
>> +{
>> +	unsigned long next;
>> +	void *p;
>> +	pgd_t *pgd_k = pgd_offset_k(vaddr);
>> +
>> +	do {
>> +		next = pgd_addr_end(vaddr, end);
>> +		if (pgd_page_vaddr(*pgd_k) == (unsigned long)lm_alias(kasan_early_shadow_pmd)) {
>> +			p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
>> +			set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
>> +		}
>> +	} while (pgd_k++, vaddr = next, vaddr != end);
>> +}
>> +
>> +void __init kasan_shallow_populate(void *start, void *end)
>> +{
>> +	unsigned long vaddr = (unsigned long)start & PAGE_MASK;
>> +	unsigned long vend = PAGE_ALIGN((unsigned long)end);
>> +
>> +	kasan_shallow_populate_pgd(vaddr, vend);
>> +
>> +	local_flush_tlb_all();
>> +}
>> +
>>   void __init kasan_init(void)
>>   {
>>   	phys_addr_t _start, _end;
>> @@ -149,7 +174,15 @@ void __init kasan_init(void)
>>
>>   	kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
>>   				    (void *)kasan_mem_to_shadow((void *)
>> -								VMALLOC_END));
>> +								VMEMMAP_END));
>> +	if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
>> +		kasan_shallow_populate(
>> +			(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
>> +			(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
>> +	else
>> +		kasan_populate_early_shadow(
>> +			(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
>> +			(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
>>
>>   	for_each_mem_range(i, &_start, &_end) {
>>   		void *start = (void *)_start;
>>
>
> Palmer, this commit should replace (if everyone agrees) Nylon and Nick's
> Commit e178d670f251 ("riscv/kasan: add KASAN_VMALLOC support") that is
> already in for-next.

Sorry, but it's way too late to be rebasing things.  I can get trying to have
the history clean, but in this case we're better off having this as an explicit
fix patch -- changing hashes this late in the process messes with all the
testing.

I'm not sure what the issue actually is, so it'd be great if you could send the
fix patch.  If not then LMK and I'll try to figure out what's going on.  Either
way, having the fix will make sure this gets tested properly as whatever's
going on isn't failing for me.

_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] riscv: Add KASAN_VMALLOC support
  2021-02-26  5:32     ` Palmer Dabbelt
@ 2021-02-26  6:14       ` Alex Ghiti
  -1 siblings, 0 replies; 8+ messages in thread
From: Alex Ghiti @ 2021-02-26  6:14 UTC (permalink / raw)
  To: Palmer Dabbelt
  Cc: aou, nickhu, linux-kernel, kasan-dev, nylon7, glider,
	Paul Walmsley, aryabinin, linux-riscv, dvyukov

Hi Palmer,

Le 2/26/21 à 12:32 AM, Palmer Dabbelt a écrit :
> On Wed, 24 Feb 2021 23:48:13 PST (-0800), alex@ghiti.fr wrote:
>> Le 2/25/21 à 2:42 AM, Alexandre Ghiti a écrit :
>>> Populate the top-level of the kernel page table to implement 
>>> KASAN_VMALLOC,
>>> lower levels are filled dynamically upon memory allocation at runtime.
>>>
>>> Co-developed-by: Nylon Chen <nylon7@andestech.com>
>>> Signed-off-by: Nylon Chen <nylon7@andestech.com>
>>> Co-developed-by: Nick Hu <nickhu@andestech.com>
>>> Signed-off-by: Nick Hu <nickhu@andestech.com>
>>> Signed-off-by: Alexandre Ghiti <alex@ghiti.fr>
>>> ---
>>>   arch/riscv/Kconfig         |  1 +
>>>   arch/riscv/mm/kasan_init.c | 35 ++++++++++++++++++++++++++++++++++-
>>>   2 files changed, 35 insertions(+), 1 deletion(-)
>>>
>>> diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
>>> index 8eadd1cbd524..3832a537c5d6 100644
>>> --- a/arch/riscv/Kconfig
>>> +++ b/arch/riscv/Kconfig
>>> @@ -57,6 +57,7 @@ config RISCV
>>>       select HAVE_ARCH_JUMP_LABEL
>>>       select HAVE_ARCH_JUMP_LABEL_RELATIVE
>>>       select HAVE_ARCH_KASAN if MMU && 64BIT
>>> +    select HAVE_ARCH_KASAN_VMALLOC if MMU && 64BIT
>>>       select HAVE_ARCH_KGDB
>>>       select HAVE_ARCH_KGDB_QXFER_PKT
>>>       select HAVE_ARCH_MMAP_RND_BITS if MMU
>>> diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
>>> index 719b6e4d6075..171569df4334 100644
>>> --- a/arch/riscv/mm/kasan_init.c
>>> +++ b/arch/riscv/mm/kasan_init.c
>>> @@ -142,6 +142,31 @@ static void __init kasan_populate(void *start, 
>>> void *end)
>>>       memset(start, KASAN_SHADOW_INIT, end - start);
>>>   }
>>>
>>> +void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned 
>>> long end)
>>> +{
>>> +    unsigned long next;
>>> +    void *p;
>>> +    pgd_t *pgd_k = pgd_offset_k(vaddr);
>>> +
>>> +    do {
>>> +        next = pgd_addr_end(vaddr, end);
>>> +        if (pgd_page_vaddr(*pgd_k) == (unsigned 
>>> long)lm_alias(kasan_early_shadow_pmd)) {
>>> +            p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
>>> +            set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
>>> +        }
>>> +    } while (pgd_k++, vaddr = next, vaddr != end);
>>> +}
>>> +
>>> +void __init kasan_shallow_populate(void *start, void *end)
>>> +{
>>> +    unsigned long vaddr = (unsigned long)start & PAGE_MASK;
>>> +    unsigned long vend = PAGE_ALIGN((unsigned long)end);
>>> +
>>> +    kasan_shallow_populate_pgd(vaddr, vend);
>>> +
>>> +    local_flush_tlb_all();
>>> +}
>>> +
>>>   void __init kasan_init(void)
>>>   {
>>>       phys_addr_t _start, _end;
>>> @@ -149,7 +174,15 @@ void __init kasan_init(void)
>>>
>>>       kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
>>>                       (void *)kasan_mem_to_shadow((void *)
>>> -                                VMALLOC_END));
>>> +                                VMEMMAP_END));
>>> +    if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
>>> +        kasan_shallow_populate(
>>> +            (void *)kasan_mem_to_shadow((void *)VMALLOC_START),
>>> +            (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
>>> +    else
>>> +        kasan_populate_early_shadow(
>>> +            (void *)kasan_mem_to_shadow((void *)VMALLOC_START),
>>> +            (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
>>>
>>>       for_each_mem_range(i, &_start, &_end) {
>>>           void *start = (void *)_start;
>>>
>>
>> Palmer, this commit should replace (if everyone agrees) Nylon and Nick's
>> Commit e178d670f251 ("riscv/kasan: add KASAN_VMALLOC support") that is
>> already in for-next.
> 
> Sorry, but it's way too late to be rebasing things.  I can get trying to 
> have
> the history clean, but in this case we're better off having this as an 
> explicit
> fix patch -- changing hashes this late in the process messes with all the
> testing.
> 
> I'm not sure what the issue actually is, so it'd be great if you could 
> send the
> fix patch.  If not then LMK and I'll try to figure out what's going on.  
> Either
> way, having the fix will make sure this gets tested properly as whatever's
> going on isn't failing for me.
> 

Nylon's patch is functional as is, but as I mentioned here 
https://patchwork.kernel.org/project/linux-riscv/patch/20210116055836.22366-2-nylon7@andestech.com/, 
it does unnecessary things (like trying to walk a user page table that 
does not exist at this point in the boot process).

Anyway, I will send another patch rebased on top of Nylon's.

Thanks,

Alex


> _______________________________________________
> linux-riscv mailing list
> linux-riscv@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-riscv

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] riscv: Add KASAN_VMALLOC support
@ 2021-02-26  6:14       ` Alex Ghiti
  0 siblings, 0 replies; 8+ messages in thread
From: Alex Ghiti @ 2021-02-26  6:14 UTC (permalink / raw)
  To: Palmer Dabbelt
  Cc: aou, nickhu, linux-kernel, kasan-dev, nylon7, glider,
	Paul Walmsley, aryabinin, linux-riscv, dvyukov

Hi Palmer,

Le 2/26/21 à 12:32 AM, Palmer Dabbelt a écrit :
> On Wed, 24 Feb 2021 23:48:13 PST (-0800), alex@ghiti.fr wrote:
>> Le 2/25/21 à 2:42 AM, Alexandre Ghiti a écrit :
>>> Populate the top-level of the kernel page table to implement 
>>> KASAN_VMALLOC,
>>> lower levels are filled dynamically upon memory allocation at runtime.
>>>
>>> Co-developed-by: Nylon Chen <nylon7@andestech.com>
>>> Signed-off-by: Nylon Chen <nylon7@andestech.com>
>>> Co-developed-by: Nick Hu <nickhu@andestech.com>
>>> Signed-off-by: Nick Hu <nickhu@andestech.com>
>>> Signed-off-by: Alexandre Ghiti <alex@ghiti.fr>
>>> ---
>>>   arch/riscv/Kconfig         |  1 +
>>>   arch/riscv/mm/kasan_init.c | 35 ++++++++++++++++++++++++++++++++++-
>>>   2 files changed, 35 insertions(+), 1 deletion(-)
>>>
>>> diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
>>> index 8eadd1cbd524..3832a537c5d6 100644
>>> --- a/arch/riscv/Kconfig
>>> +++ b/arch/riscv/Kconfig
>>> @@ -57,6 +57,7 @@ config RISCV
>>>       select HAVE_ARCH_JUMP_LABEL
>>>       select HAVE_ARCH_JUMP_LABEL_RELATIVE
>>>       select HAVE_ARCH_KASAN if MMU && 64BIT
>>> +    select HAVE_ARCH_KASAN_VMALLOC if MMU && 64BIT
>>>       select HAVE_ARCH_KGDB
>>>       select HAVE_ARCH_KGDB_QXFER_PKT
>>>       select HAVE_ARCH_MMAP_RND_BITS if MMU
>>> diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
>>> index 719b6e4d6075..171569df4334 100644
>>> --- a/arch/riscv/mm/kasan_init.c
>>> +++ b/arch/riscv/mm/kasan_init.c
>>> @@ -142,6 +142,31 @@ static void __init kasan_populate(void *start, 
>>> void *end)
>>>       memset(start, KASAN_SHADOW_INIT, end - start);
>>>   }
>>>
>>> +void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned 
>>> long end)
>>> +{
>>> +    unsigned long next;
>>> +    void *p;
>>> +    pgd_t *pgd_k = pgd_offset_k(vaddr);
>>> +
>>> +    do {
>>> +        next = pgd_addr_end(vaddr, end);
>>> +        if (pgd_page_vaddr(*pgd_k) == (unsigned 
>>> long)lm_alias(kasan_early_shadow_pmd)) {
>>> +            p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
>>> +            set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
>>> +        }
>>> +    } while (pgd_k++, vaddr = next, vaddr != end);
>>> +}
>>> +
>>> +void __init kasan_shallow_populate(void *start, void *end)
>>> +{
>>> +    unsigned long vaddr = (unsigned long)start & PAGE_MASK;
>>> +    unsigned long vend = PAGE_ALIGN((unsigned long)end);
>>> +
>>> +    kasan_shallow_populate_pgd(vaddr, vend);
>>> +
>>> +    local_flush_tlb_all();
>>> +}
>>> +
>>>   void __init kasan_init(void)
>>>   {
>>>       phys_addr_t _start, _end;
>>> @@ -149,7 +174,15 @@ void __init kasan_init(void)
>>>
>>>       kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
>>>                       (void *)kasan_mem_to_shadow((void *)
>>> -                                VMALLOC_END));
>>> +                                VMEMMAP_END));
>>> +    if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
>>> +        kasan_shallow_populate(
>>> +            (void *)kasan_mem_to_shadow((void *)VMALLOC_START),
>>> +            (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
>>> +    else
>>> +        kasan_populate_early_shadow(
>>> +            (void *)kasan_mem_to_shadow((void *)VMALLOC_START),
>>> +            (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
>>>
>>>       for_each_mem_range(i, &_start, &_end) {
>>>           void *start = (void *)_start;
>>>
>>
>> Palmer, this commit should replace (if everyone agrees) Nylon and Nick's
>> Commit e178d670f251 ("riscv/kasan: add KASAN_VMALLOC support") that is
>> already in for-next.
> 
> Sorry, but it's way too late to be rebasing things.  I can get trying to 
> have
> the history clean, but in this case we're better off having this as an 
> explicit
> fix patch -- changing hashes this late in the process messes with all the
> testing.
> 
> I'm not sure what the issue actually is, so it'd be great if you could 
> send the
> fix patch.  If not then LMK and I'll try to figure out what's going on.  
> Either
> way, having the fix will make sure this gets tested properly as whatever's
> going on isn't failing for me.
> 

Nylon's patch is functional as is, but as I mentioned here 
https://patchwork.kernel.org/project/linux-riscv/patch/20210116055836.22366-2-nylon7@andestech.com/, 
it does unnecessary things (like trying to walk a user page table that 
does not exist at this point in the boot process).

Anyway, I will send another patch rebased on top of Nylon's.

Thanks,

Alex


> _______________________________________________
> linux-riscv mailing list
> linux-riscv@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-riscv

_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2021-02-26  6:15 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-02-25  7:42 [PATCH] riscv: Add KASAN_VMALLOC support Alexandre Ghiti
2021-02-25  7:42 ` Alexandre Ghiti
2021-02-25  7:48 ` Alex Ghiti
2021-02-25  7:48   ` Alex Ghiti
2021-02-26  5:32   ` Palmer Dabbelt
2021-02-26  5:32     ` Palmer Dabbelt
2021-02-26  6:14     ` Alex Ghiti
2021-02-26  6:14       ` Alex Ghiti

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.