All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2] riscv: make update_mmu_cache to support asid
@ 2022-09-04 13:37 ` Jinyu Tang
  0 siblings, 0 replies; 10+ messages in thread
From: Jinyu Tang @ 2022-09-04 13:37 UTC (permalink / raw)
  To: anup, paul.walmsley, palmer, aou, alexandre.ghiti, guoren, heiko,
	akpm, panqinglin2020, tongtiangen, sunnanyong, anshuman.khandual,
	atishp
  Cc: linux-riscv, linux-kernel, falcon, tjytimi

The `update_mmu_cache` function in riscv flush tlb cache without asid
information now, which will flush tlbs in other tasks' address space
even if processor supports asid. So add a new function
`flush_tlb_local_one_page` to flush local one page whether processor
supports asid or not,for cases that need to flush local one page like
function `update_mmu_cache`.

Signed-off-by: Jinyu Tang <tjytimi@163.com>
---
RFC V1 -> V2 : 
1.Rebased on PATCH9 of IPI imporvement series as Anup Patel
suggestion. 
2.Make commit log more clear.

 arch/riscv/include/asm/pgtable.h  |  2 +-
 arch/riscv/include/asm/tlbflush.h |  2 ++
 arch/riscv/mm/tlbflush.c          | 11 +++++++++++
 3 files changed, 14 insertions(+), 1 deletion(-)

diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 7ec936910a96..09ccefa6b6c7 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -415,7 +415,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
 	 * Relying on flush_tlb_fix_spurious_fault would suffice, but
 	 * the extra traps reduce performance.  So, eagerly SFENCE.VMA.
 	 */
-	local_flush_tlb_page(address);
+	flush_tlb_local_one_page(vma, address);
 }
 
 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index 801019381dea..120aeb1c6ecf 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -30,6 +30,7 @@ static inline void local_flush_tlb_page(unsigned long addr)
 #if defined(CONFIG_SMP) && defined(CONFIG_MMU)
 void flush_tlb_all(void);
 void flush_tlb_mm(struct mm_struct *mm);
+void flush_tlb_local_one_page(struct vm_area_struct *vma, unsigned long addr);
 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 		     unsigned long end);
@@ -42,6 +43,7 @@ void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
 
 #define flush_tlb_all() local_flush_tlb_all()
 #define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
+#define flush_tlb_local_one_page(vma, addr) local_flush_tlb_page(addr)
 
 static inline void flush_tlb_range(struct vm_area_struct *vma,
 		unsigned long start, unsigned long end)
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index 27a7db8eb2c4..0843e1baaf34 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -41,6 +41,17 @@ static inline void local_flush_tlb_range_asid(unsigned long start,
 		local_flush_tlb_all_asid(asid);
 }
 
+void flush_tlb_local_one_page(struct vm_area_struct *vma, unsigned long addr)
+{
+	if (static_branch_unlikely(&use_asid_allocator)) {
+		unsigned long asid = atomic_long_read(&vma->vm_mm->context.id);
+
+		local_flush_tlb_page_asid(addr, asid);
+	} else {
+		local_flush_tlb_page(addr);
+	}
+}
+
 static void __ipi_flush_tlb_all(void *info)
 {
 	local_flush_tlb_all();
-- 
2.30.2


_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH v2] riscv: make update_mmu_cache to support asid
@ 2022-09-04 13:37 ` Jinyu Tang
  0 siblings, 0 replies; 10+ messages in thread
From: Jinyu Tang @ 2022-09-04 13:37 UTC (permalink / raw)
  To: anup, paul.walmsley, palmer, aou, alexandre.ghiti, guoren, heiko,
	akpm, panqinglin2020, tongtiangen, sunnanyong, anshuman.khandual,
	atishp
  Cc: linux-riscv, linux-kernel, falcon, tjytimi

The `update_mmu_cache` function in riscv flush tlb cache without asid
information now, which will flush tlbs in other tasks' address space
even if processor supports asid. So add a new function
`flush_tlb_local_one_page` to flush local one page whether processor
supports asid or not,for cases that need to flush local one page like
function `update_mmu_cache`.

Signed-off-by: Jinyu Tang <tjytimi@163.com>
---
RFC V1 -> V2 : 
1.Rebased on PATCH9 of IPI imporvement series as Anup Patel
suggestion. 
2.Make commit log more clear.

 arch/riscv/include/asm/pgtable.h  |  2 +-
 arch/riscv/include/asm/tlbflush.h |  2 ++
 arch/riscv/mm/tlbflush.c          | 11 +++++++++++
 3 files changed, 14 insertions(+), 1 deletion(-)

diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 7ec936910a96..09ccefa6b6c7 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -415,7 +415,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
 	 * Relying on flush_tlb_fix_spurious_fault would suffice, but
 	 * the extra traps reduce performance.  So, eagerly SFENCE.VMA.
 	 */
-	local_flush_tlb_page(address);
+	flush_tlb_local_one_page(vma, address);
 }
 
 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index 801019381dea..120aeb1c6ecf 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -30,6 +30,7 @@ static inline void local_flush_tlb_page(unsigned long addr)
 #if defined(CONFIG_SMP) && defined(CONFIG_MMU)
 void flush_tlb_all(void);
 void flush_tlb_mm(struct mm_struct *mm);
+void flush_tlb_local_one_page(struct vm_area_struct *vma, unsigned long addr);
 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 		     unsigned long end);
@@ -42,6 +43,7 @@ void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
 
 #define flush_tlb_all() local_flush_tlb_all()
 #define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
+#define flush_tlb_local_one_page(vma, addr) local_flush_tlb_page(addr)
 
 static inline void flush_tlb_range(struct vm_area_struct *vma,
 		unsigned long start, unsigned long end)
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index 27a7db8eb2c4..0843e1baaf34 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -41,6 +41,17 @@ static inline void local_flush_tlb_range_asid(unsigned long start,
 		local_flush_tlb_all_asid(asid);
 }
 
+void flush_tlb_local_one_page(struct vm_area_struct *vma, unsigned long addr)
+{
+	if (static_branch_unlikely(&use_asid_allocator)) {
+		unsigned long asid = atomic_long_read(&vma->vm_mm->context.id);
+
+		local_flush_tlb_page_asid(addr, asid);
+	} else {
+		local_flush_tlb_page(addr);
+	}
+}
+
 static void __ipi_flush_tlb_all(void *info)
 {
 	local_flush_tlb_all();
-- 
2.30.2


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [PATCH v2] riscv: make update_mmu_cache to support asid
  2022-09-04 13:37 ` Jinyu Tang
@ 2022-09-04 13:49   ` Conor.Dooley
  -1 siblings, 0 replies; 10+ messages in thread
From: Conor.Dooley @ 2022-09-04 13:49 UTC (permalink / raw)
  To: tjytimi, anup, paul.walmsley, palmer, aou, alexandre.ghiti,
	guoren, heiko, akpm, panqinglin2020, tongtiangen, sunnanyong,
	anshuman.khandual, atishp
  Cc: linux-riscv, linux-kernel, falcon

On 04/09/2022 14:37, Jinyu Tang wrote:
> The `update_mmu_cache` function in riscv flush tlb cache without asid

FWIW, when referring to functions please put the () at the end.
Makes the changelog more natural to read. You do not need to make
a v3 for that though.

Thanks,
Conor.

> information now, which will flush tlbs in other tasks' address space
> even if processor supports asid. So add a new function
> `flush_tlb_local_one_page` to flush local one page whether processor
> supports asid or not,for cases that need to flush local one page like
> function `update_mmu_cache`.
> 
> Signed-off-by: Jinyu Tang <tjytimi@163.com>
> ---
> RFC V1 -> V2 : 
> 1.Rebased on PATCH9 of IPI imporvement series as Anup Patel
> suggestion. 
> 2.Make commit log more clear.
> 
>  arch/riscv/include/asm/pgtable.h  |  2 +-
>  arch/riscv/include/asm/tlbflush.h |  2 ++
>  arch/riscv/mm/tlbflush.c          | 11 +++++++++++
>  3 files changed, 14 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
> index 7ec936910a96..09ccefa6b6c7 100644
> --- a/arch/riscv/include/asm/pgtable.h
> +++ b/arch/riscv/include/asm/pgtable.h
> @@ -415,7 +415,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
>  	 * Relying on flush_tlb_fix_spurious_fault would suffice, but
>  	 * the extra traps reduce performance.  So, eagerly SFENCE.VMA.
>  	 */
> -	local_flush_tlb_page(address);
> +	flush_tlb_local_one_page(vma, address);
>  }
>  
>  static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
> diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
> index 801019381dea..120aeb1c6ecf 100644
> --- a/arch/riscv/include/asm/tlbflush.h
> +++ b/arch/riscv/include/asm/tlbflush.h
> @@ -30,6 +30,7 @@ static inline void local_flush_tlb_page(unsigned long addr)
>  #if defined(CONFIG_SMP) && defined(CONFIG_MMU)
>  void flush_tlb_all(void);
>  void flush_tlb_mm(struct mm_struct *mm);
> +void flush_tlb_local_one_page(struct vm_area_struct *vma, unsigned long addr);
>  void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
>  void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
>  		     unsigned long end);
> @@ -42,6 +43,7 @@ void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
>  
>  #define flush_tlb_all() local_flush_tlb_all()
>  #define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
> +#define flush_tlb_local_one_page(vma, addr) local_flush_tlb_page(addr)
>  
>  static inline void flush_tlb_range(struct vm_area_struct *vma,
>  		unsigned long start, unsigned long end)
> diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
> index 27a7db8eb2c4..0843e1baaf34 100644
> --- a/arch/riscv/mm/tlbflush.c
> +++ b/arch/riscv/mm/tlbflush.c
> @@ -41,6 +41,17 @@ static inline void local_flush_tlb_range_asid(unsigned long start,
>  		local_flush_tlb_all_asid(asid);
>  }
>  
> +void flush_tlb_local_one_page(struct vm_area_struct *vma, unsigned long addr)
> +{
> +	if (static_branch_unlikely(&use_asid_allocator)) {
> +		unsigned long asid = atomic_long_read(&vma->vm_mm->context.id);
> +
> +		local_flush_tlb_page_asid(addr, asid);
> +	} else {
> +		local_flush_tlb_page(addr);
> +	}
> +}
> +
>  static void __ipi_flush_tlb_all(void *info)
>  {
>  	local_flush_tlb_all();

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v2] riscv: make update_mmu_cache to support asid
@ 2022-09-04 13:49   ` Conor.Dooley
  0 siblings, 0 replies; 10+ messages in thread
From: Conor.Dooley @ 2022-09-04 13:49 UTC (permalink / raw)
  To: tjytimi, anup, paul.walmsley, palmer, aou, alexandre.ghiti,
	guoren, heiko, akpm, panqinglin2020, tongtiangen, sunnanyong,
	anshuman.khandual, atishp
  Cc: linux-riscv, linux-kernel, falcon

On 04/09/2022 14:37, Jinyu Tang wrote:
> The `update_mmu_cache` function in riscv flush tlb cache without asid

FWIW, when referring to functions please put the () at the end.
Makes the changelog more natural to read. You do not need to make
a v3 for that though.

Thanks,
Conor.

> information now, which will flush tlbs in other tasks' address space
> even if processor supports asid. So add a new function
> `flush_tlb_local_one_page` to flush local one page whether processor
> supports asid or not,for cases that need to flush local one page like
> function `update_mmu_cache`.
> 
> Signed-off-by: Jinyu Tang <tjytimi@163.com>
> ---
> RFC V1 -> V2 : 
> 1.Rebased on PATCH9 of IPI imporvement series as Anup Patel
> suggestion. 
> 2.Make commit log more clear.
> 
>  arch/riscv/include/asm/pgtable.h  |  2 +-
>  arch/riscv/include/asm/tlbflush.h |  2 ++
>  arch/riscv/mm/tlbflush.c          | 11 +++++++++++
>  3 files changed, 14 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
> index 7ec936910a96..09ccefa6b6c7 100644
> --- a/arch/riscv/include/asm/pgtable.h
> +++ b/arch/riscv/include/asm/pgtable.h
> @@ -415,7 +415,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
>  	 * Relying on flush_tlb_fix_spurious_fault would suffice, but
>  	 * the extra traps reduce performance.  So, eagerly SFENCE.VMA.
>  	 */
> -	local_flush_tlb_page(address);
> +	flush_tlb_local_one_page(vma, address);
>  }
>  
>  static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
> diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
> index 801019381dea..120aeb1c6ecf 100644
> --- a/arch/riscv/include/asm/tlbflush.h
> +++ b/arch/riscv/include/asm/tlbflush.h
> @@ -30,6 +30,7 @@ static inline void local_flush_tlb_page(unsigned long addr)
>  #if defined(CONFIG_SMP) && defined(CONFIG_MMU)
>  void flush_tlb_all(void);
>  void flush_tlb_mm(struct mm_struct *mm);
> +void flush_tlb_local_one_page(struct vm_area_struct *vma, unsigned long addr);
>  void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
>  void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
>  		     unsigned long end);
> @@ -42,6 +43,7 @@ void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
>  
>  #define flush_tlb_all() local_flush_tlb_all()
>  #define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
> +#define flush_tlb_local_one_page(vma, addr) local_flush_tlb_page(addr)
>  
>  static inline void flush_tlb_range(struct vm_area_struct *vma,
>  		unsigned long start, unsigned long end)
> diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
> index 27a7db8eb2c4..0843e1baaf34 100644
> --- a/arch/riscv/mm/tlbflush.c
> +++ b/arch/riscv/mm/tlbflush.c
> @@ -41,6 +41,17 @@ static inline void local_flush_tlb_range_asid(unsigned long start,
>  		local_flush_tlb_all_asid(asid);
>  }
>  
> +void flush_tlb_local_one_page(struct vm_area_struct *vma, unsigned long addr)
> +{
> +	if (static_branch_unlikely(&use_asid_allocator)) {
> +		unsigned long asid = atomic_long_read(&vma->vm_mm->context.id);
> +
> +		local_flush_tlb_page_asid(addr, asid);
> +	} else {
> +		local_flush_tlb_page(addr);
> +	}
> +}
> +
>  static void __ipi_flush_tlb_all(void *info)
>  {
>  	local_flush_tlb_all();
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re:Re: [PATCH v2] riscv: make update_mmu_cache to support asid
  2022-09-04 13:49   ` Conor.Dooley
@ 2022-09-06 10:57     ` Jinyu Tang
  -1 siblings, 0 replies; 10+ messages in thread
From: Jinyu Tang @ 2022-09-06 10:57 UTC (permalink / raw)
  To: conor.dooley
  Cc: akpm, alexandre.ghiti, anshuman.khandual, anup, aou, atishp,
	falcon, guoren, heiko, linux-kernel, linux-riscv, palmer,
	panqinglin2020, paul.walmsley, sunnanyong, tjytimi, tongtiangen

Thanks for your guidance.^ ^

Sincerely yours,
Jinyu

At 2022-09-04 21:49:31, Conor.Dooley@microchip.com wrote:
>On 04/09/2022 14:37, Jinyu Tang wrote:
>> The `update_mmu_cache` function in riscv flush tlb cache without asid
>
>FWIW, when referring to functions please put the () at the end.
>Makes the changelog more natural to read. You do not need to make
>a v3 for that though.
>
>Thanks,
>Conor.
>
>> information now, which will flush tlbs in other tasks' address space
>> even if processor supports asid. So add a new function
>> `flush_tlb_local_one_page` to flush local one page whether processor
>> supports asid or not,for cases that need to flush local one page like
>> function `update_mmu_cache`.
>> 
>> Signed-off-by: Jinyu Tang <tjytimi@163.com>
>> ---
>> RFC V1 -> V2 : 
>> 1.Rebased on PATCH9 of IPI imporvement series as Anup Patel
>> suggestion. 
>> 2.Make commit log more clear.
>> 
>>  arch/riscv/include/asm/pgtable.h  |  2 +-
>>  arch/riscv/include/asm/tlbflush.h |  2 ++
>>  arch/riscv/mm/tlbflush.c          | 11 +++++++++++
>>  3 files changed, 14 insertions(+), 1 deletion(-)
>> 
>> diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
>> index 7ec936910a96..09ccefa6b6c7 100644
>> --- a/arch/riscv/include/asm/pgtable.h
>> +++ b/arch/riscv/include/asm/pgtable.h
>> @@ -415,7 +415,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
>>  	 * Relying on flush_tlb_fix_spurious_fault would suffice, but
>>  	 * the extra traps reduce performance.  So, eagerly SFENCE.VMA.
>>  	 */
>> -	local_flush_tlb_page(address);
>> +	flush_tlb_local_one_page(vma, address);
>>  }
>>  
>>  static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
>> diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
>> index 801019381dea..120aeb1c6ecf 100644
>> --- a/arch/riscv/include/asm/tlbflush.h
>> +++ b/arch/riscv/include/asm/tlbflush.h
>> @@ -30,6 +30,7 @@ static inline void local_flush_tlb_page(unsigned long addr)
>>  #if defined(CONFIG_SMP) && defined(CONFIG_MMU)
>>  void flush_tlb_all(void);
>>  void flush_tlb_mm(struct mm_struct *mm);
>> +void flush_tlb_local_one_page(struct vm_area_struct *vma, unsigned long addr);
>>  void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
>>  void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
>>  		     unsigned long end);
>> @@ -42,6 +43,7 @@ void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
>>  
>>  #define flush_tlb_all() local_flush_tlb_all()
>>  #define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
>> +#define flush_tlb_local_one_page(vma, addr) local_flush_tlb_page(addr)
>>  
>>  static inline void flush_tlb_range(struct vm_area_struct *vma,
>>  		unsigned long start, unsigned long end)
>> diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
>> index 27a7db8eb2c4..0843e1baaf34 100644
>> --- a/arch/riscv/mm/tlbflush.c
>> +++ b/arch/riscv/mm/tlbflush.c
>> @@ -41,6 +41,17 @@ static inline void local_flush_tlb_range_asid(unsigned long start,
>>  		local_flush_tlb_all_asid(asid);
>>  }
>>  
>> +void flush_tlb_local_one_page(struct vm_area_struct *vma, unsigned long addr)
>> +{
>> +	if (static_branch_unlikely(&use_asid_allocator)) {
>> +		unsigned long asid = atomic_long_read(&vma->vm_mm->context.id);
>> +
>> +		local_flush_tlb_page_asid(addr, asid);
>> +	} else {
>> +		local_flush_tlb_page(addr);
>> +	}
>> +}
>> +
>>  static void __ipi_flush_tlb_all(void *info)
>>  {
>>  	local_flush_tlb_all();


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re:Re: [PATCH v2] riscv: make update_mmu_cache to support asid
@ 2022-09-06 10:57     ` Jinyu Tang
  0 siblings, 0 replies; 10+ messages in thread
From: Jinyu Tang @ 2022-09-06 10:57 UTC (permalink / raw)
  To: conor.dooley
  Cc: akpm, alexandre.ghiti, anshuman.khandual, anup, aou, atishp,
	falcon, guoren, heiko, linux-kernel, linux-riscv, palmer,
	panqinglin2020, paul.walmsley, sunnanyong, tjytimi, tongtiangen

Thanks for your guidance.^ ^

Sincerely yours,
Jinyu

At 2022-09-04 21:49:31, Conor.Dooley@microchip.com wrote:
>On 04/09/2022 14:37, Jinyu Tang wrote:
>> The `update_mmu_cache` function in riscv flush tlb cache without asid
>
>FWIW, when referring to functions please put the () at the end.
>Makes the changelog more natural to read. You do not need to make
>a v3 for that though.
>
>Thanks,
>Conor.
>
>> information now, which will flush tlbs in other tasks' address space
>> even if processor supports asid. So add a new function
>> `flush_tlb_local_one_page` to flush local one page whether processor
>> supports asid or not,for cases that need to flush local one page like
>> function `update_mmu_cache`.
>> 
>> Signed-off-by: Jinyu Tang <tjytimi@163.com>
>> ---
>> RFC V1 -> V2 : 
>> 1.Rebased on PATCH9 of IPI imporvement series as Anup Patel
>> suggestion. 
>> 2.Make commit log more clear.
>> 
>>  arch/riscv/include/asm/pgtable.h  |  2 +-
>>  arch/riscv/include/asm/tlbflush.h |  2 ++
>>  arch/riscv/mm/tlbflush.c          | 11 +++++++++++
>>  3 files changed, 14 insertions(+), 1 deletion(-)
>> 
>> diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
>> index 7ec936910a96..09ccefa6b6c7 100644
>> --- a/arch/riscv/include/asm/pgtable.h
>> +++ b/arch/riscv/include/asm/pgtable.h
>> @@ -415,7 +415,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
>>  	 * Relying on flush_tlb_fix_spurious_fault would suffice, but
>>  	 * the extra traps reduce performance.  So, eagerly SFENCE.VMA.
>>  	 */
>> -	local_flush_tlb_page(address);
>> +	flush_tlb_local_one_page(vma, address);
>>  }
>>  
>>  static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
>> diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
>> index 801019381dea..120aeb1c6ecf 100644
>> --- a/arch/riscv/include/asm/tlbflush.h
>> +++ b/arch/riscv/include/asm/tlbflush.h
>> @@ -30,6 +30,7 @@ static inline void local_flush_tlb_page(unsigned long addr)
>>  #if defined(CONFIG_SMP) && defined(CONFIG_MMU)
>>  void flush_tlb_all(void);
>>  void flush_tlb_mm(struct mm_struct *mm);
>> +void flush_tlb_local_one_page(struct vm_area_struct *vma, unsigned long addr);
>>  void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
>>  void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
>>  		     unsigned long end);
>> @@ -42,6 +43,7 @@ void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
>>  
>>  #define flush_tlb_all() local_flush_tlb_all()
>>  #define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
>> +#define flush_tlb_local_one_page(vma, addr) local_flush_tlb_page(addr)
>>  
>>  static inline void flush_tlb_range(struct vm_area_struct *vma,
>>  		unsigned long start, unsigned long end)
>> diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
>> index 27a7db8eb2c4..0843e1baaf34 100644
>> --- a/arch/riscv/mm/tlbflush.c
>> +++ b/arch/riscv/mm/tlbflush.c
>> @@ -41,6 +41,17 @@ static inline void local_flush_tlb_range_asid(unsigned long start,
>>  		local_flush_tlb_all_asid(asid);
>>  }
>>  
>> +void flush_tlb_local_one_page(struct vm_area_struct *vma, unsigned long addr)
>> +{
>> +	if (static_branch_unlikely(&use_asid_allocator)) {
>> +		unsigned long asid = atomic_long_read(&vma->vm_mm->context.id);
>> +
>> +		local_flush_tlb_page_asid(addr, asid);
>> +	} else {
>> +		local_flush_tlb_page(addr);
>> +	}
>> +}
>> +
>>  static void __ipi_flush_tlb_all(void *info)
>>  {
>>  	local_flush_tlb_all();


_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v2] riscv: make update_mmu_cache to support asid
  2022-09-04 13:37 ` Jinyu Tang
@ 2022-09-12  7:26   ` Sergey Matyukevich
  -1 siblings, 0 replies; 10+ messages in thread
From: Sergey Matyukevich @ 2022-09-12  7:26 UTC (permalink / raw)
  To: Jinyu Tang
  Cc: anup, paul.walmsley, palmer, aou, alexandre.ghiti, guoren, heiko,
	akpm, panqinglin2020, tongtiangen, sunnanyong, anshuman.khandual,
	atishp, linux-riscv, linux-kernel, falcon

Hi Jinyu,

> The `update_mmu_cache` function in riscv flush tlb cache without asid
> information now, which will flush tlbs in other tasks' address space
> even if processor supports asid. So add a new function
> `flush_tlb_local_one_page` to flush local one page whether processor
> supports asid or not,for cases that need to flush local one page like
> function `update_mmu_cache`.
> 
> Signed-off-by: Jinyu Tang <tjytimi@163.com>
> ---
> RFC V1 -> V2 : 
> 1.Rebased on PATCH9 of IPI imporvement series as Anup Patel
> suggestion. 
> 2.Make commit log more clear.
> 
>  arch/riscv/include/asm/pgtable.h  |  2 +-
>  arch/riscv/include/asm/tlbflush.h |  2 ++
>  arch/riscv/mm/tlbflush.c          | 11 +++++++++++
>  3 files changed, 14 insertions(+), 1 deletion(-)

Just FYI: I have been looking into the same function w.r.t. to its
ASID/SMP handling. In addition to what your patch is doing with ASID,
I posted experimental change following flush_icache_mm approach. That
patch takes into account other concurrently running harts as well as
possible migration to other harts later on, see:

https://lore.kernel.org/linux-riscv/20220829205219.283543-1-geomatsi@gmail.com/

Regards,
Sergey

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH v2] riscv: make update_mmu_cache to support asid
@ 2022-09-12  7:26   ` Sergey Matyukevich
  0 siblings, 0 replies; 10+ messages in thread
From: Sergey Matyukevich @ 2022-09-12  7:26 UTC (permalink / raw)
  To: Jinyu Tang
  Cc: anup, paul.walmsley, palmer, aou, alexandre.ghiti, guoren, heiko,
	akpm, panqinglin2020, tongtiangen, sunnanyong, anshuman.khandual,
	atishp, linux-riscv, linux-kernel, falcon

Hi Jinyu,

> The `update_mmu_cache` function in riscv flush tlb cache without asid
> information now, which will flush tlbs in other tasks' address space
> even if processor supports asid. So add a new function
> `flush_tlb_local_one_page` to flush local one page whether processor
> supports asid or not,for cases that need to flush local one page like
> function `update_mmu_cache`.
> 
> Signed-off-by: Jinyu Tang <tjytimi@163.com>
> ---
> RFC V1 -> V2 : 
> 1.Rebased on PATCH9 of IPI imporvement series as Anup Patel
> suggestion. 
> 2.Make commit log more clear.
> 
>  arch/riscv/include/asm/pgtable.h  |  2 +-
>  arch/riscv/include/asm/tlbflush.h |  2 ++
>  arch/riscv/mm/tlbflush.c          | 11 +++++++++++
>  3 files changed, 14 insertions(+), 1 deletion(-)

Just FYI: I have been looking into the same function w.r.t. to its
ASID/SMP handling. In addition to what your patch is doing with ASID,
I posted experimental change following flush_icache_mm approach. That
patch takes into account other concurrently running harts as well as
possible migration to other harts later on, see:

https://lore.kernel.org/linux-riscv/20220829205219.283543-1-geomatsi@gmail.com/

Regards,
Sergey

_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re:Re: [PATCH v2] riscv: make update_mmu_cache to support asid
  2022-09-12  7:26   ` Sergey Matyukevich
@ 2022-09-18  5:33     ` Jinyu Tang
  -1 siblings, 0 replies; 10+ messages in thread
From: Jinyu Tang @ 2022-09-18  5:33 UTC (permalink / raw)
  To: geomatsi
  Cc: akpm, alexandre.ghiti, anshuman.khandual, anup, aou, atishp,
	falcon, guoren, heiko, linux-kernel, linux-riscv, palmer,
	panqinglin2020, paul.walmsley, sunnanyong, tjytimi, tongtiangen

At 2022-09-12 15:26:00, "Sergey Matyukevich" <geomatsi@gmail.com> wrote:
>Hi Jinyu,
>
>> The `update_mmu_cache` function in riscv flush tlb cache without asid
>> information now, which will flush tlbs in other tasks' address space
>> even if processor supports asid. So add a new function
>> `flush_tlb_local_one_page` to flush local one page whether processor
>> supports asid or not,for cases that need to flush local one page like
>> function `update_mmu_cache`.
>> 
>> Signed-off-by: Jinyu Tang <tjytimi@163.com>
>> ---
>> RFC V1 -> V2 : 
>> 1.Rebased on PATCH9 of IPI imporvement series as Anup Patel
>> suggestion. 
>> 2.Make commit log more clear.
>> 
>>  arch/riscv/include/asm/pgtable.h  |  2 +-
>>  arch/riscv/include/asm/tlbflush.h |  2 ++
>>  arch/riscv/mm/tlbflush.c          | 11 +++++++++++
>>  3 files changed, 14 insertions(+), 1 deletion(-)
>
>Just FYI: I have been looking into the same function w.r.t. to its
>ASID/SMP handling. In addition to what your patch is doing with ASID,
>I posted experimental change following flush_icache_mm approach. That
>patch takes into account other concurrently running harts as well as
>possible migration to other harts later on, see:
>
>https://lore.kernel.org/linux-riscv/20220829205219.283543-1-geomatsi@gmail.com/
>
>Regards,
>Sergey
Maybe local flush is enough for this function,because others may not use the pte forever.
And if another hart use this pte later, it may trap in pagefault to survive.

Yours,

Jinyu


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re:Re: [PATCH v2] riscv: make update_mmu_cache to support asid
@ 2022-09-18  5:33     ` Jinyu Tang
  0 siblings, 0 replies; 10+ messages in thread
From: Jinyu Tang @ 2022-09-18  5:33 UTC (permalink / raw)
  To: geomatsi
  Cc: akpm, alexandre.ghiti, anshuman.khandual, anup, aou, atishp,
	falcon, guoren, heiko, linux-kernel, linux-riscv, palmer,
	panqinglin2020, paul.walmsley, sunnanyong, tjytimi, tongtiangen

At 2022-09-12 15:26:00, "Sergey Matyukevich" <geomatsi@gmail.com> wrote:
>Hi Jinyu,
>
>> The `update_mmu_cache` function in riscv flush tlb cache without asid
>> information now, which will flush tlbs in other tasks' address space
>> even if processor supports asid. So add a new function
>> `flush_tlb_local_one_page` to flush local one page whether processor
>> supports asid or not,for cases that need to flush local one page like
>> function `update_mmu_cache`.
>> 
>> Signed-off-by: Jinyu Tang <tjytimi@163.com>
>> ---
>> RFC V1 -> V2 : 
>> 1.Rebased on PATCH9 of IPI imporvement series as Anup Patel
>> suggestion. 
>> 2.Make commit log more clear.
>> 
>>  arch/riscv/include/asm/pgtable.h  |  2 +-
>>  arch/riscv/include/asm/tlbflush.h |  2 ++
>>  arch/riscv/mm/tlbflush.c          | 11 +++++++++++
>>  3 files changed, 14 insertions(+), 1 deletion(-)
>
>Just FYI: I have been looking into the same function w.r.t. to its
>ASID/SMP handling. In addition to what your patch is doing with ASID,
>I posted experimental change following flush_icache_mm approach. That
>patch takes into account other concurrently running harts as well as
>possible migration to other harts later on, see:
>
>https://lore.kernel.org/linux-riscv/20220829205219.283543-1-geomatsi@gmail.com/
>
>Regards,
>Sergey
Maybe local flush is enough for this function,because others may not use the pte forever.
And if another hart use this pte later, it may trap in pagefault to survive.

Yours,

Jinyu


_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2022-09-18  5:35 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-09-04 13:37 [PATCH v2] riscv: make update_mmu_cache to support asid Jinyu Tang
2022-09-04 13:37 ` Jinyu Tang
2022-09-04 13:49 ` Conor.Dooley
2022-09-04 13:49   ` Conor.Dooley
2022-09-06 10:57   ` Jinyu Tang
2022-09-06 10:57     ` Jinyu Tang
2022-09-12  7:26 ` Sergey Matyukevich
2022-09-12  7:26   ` Sergey Matyukevich
2022-09-18  5:33   ` Jinyu Tang
2022-09-18  5:33     ` Jinyu Tang

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.