linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/3] MIPS: Do not flush tlb page when updating PTE entry
@ 2020-05-15  4:10 Bibo Mao
  2020-05-15  4:10 ` [PATCH 2/3] mm/memory.c: Update local TLB if PTE entry exists Bibo Mao
  2020-05-15  4:10 ` [PATCH 3/3] mm/memory.c: Add memory read privilege before filling PTE entry Bibo Mao
  0 siblings, 2 replies; 5+ messages in thread
From: Bibo Mao @ 2020-05-15  4:10 UTC (permalink / raw)
  To: Thomas Bogendoerfer, Jiaxun Yang, Huacai Chen, Andrew Morton,
	Paul Burton, Dmitry Korotin, Philippe Mathieu-Daudé,
	Stafford Horne, Steven Price, Anshuman Khandual
  Cc: linux-mips, linux-kernel, Mike Rapoport, Sergei Shtylyov,
	Maciej W. Rozycki, linux-mm

It is not necessary to flush tlb page on all CPUs if suitable PTE
entry exists already during page fault handling, just updating
TLB is fine.

Here redefine flush_tlb_fix_spurious_fault as empty on mips system.
---
Change in v2:
- split flush_tlb_fix_spurious_fault and tlb update into two patches
- comments typo modification

Signed-off-by: Bibo Mao <maobibo@loongson.cn>
---
 arch/mips/include/asm/pgtable.h | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index aab0ec1..e215542 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -454,6 +454,8 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
 	return __pgprot(prot);
 }
 
+#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
+
 /*
  * Conversion functions: convert a page and protection to a page entry,
  * and a page entry and page directory to the page they refer to.
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 2/3] mm/memory.c: Update local TLB if PTE entry exists
  2020-05-15  4:10 [PATCH 1/3] MIPS: Do not flush tlb page when updating PTE entry Bibo Mao
@ 2020-05-15  4:10 ` Bibo Mao
  2020-05-15 20:40   ` Andrew Morton
  2020-05-15  4:10 ` [PATCH 3/3] mm/memory.c: Add memory read privilege before filling PTE entry Bibo Mao
  1 sibling, 1 reply; 5+ messages in thread
From: Bibo Mao @ 2020-05-15  4:10 UTC (permalink / raw)
  To: Thomas Bogendoerfer, Jiaxun Yang, Huacai Chen, Andrew Morton,
	Paul Burton, Dmitry Korotin, Philippe Mathieu-Daudé,
	Stafford Horne, Steven Price, Anshuman Khandual
  Cc: linux-mips, linux-kernel, Mike Rapoport, Sergei Shtylyov,
	Maciej W. Rozycki, linux-mm

If there are two threads hitting page fault at the same page,
one thread updates PTE entry and local TLB, the other can
update local tlb also, rather than give up and do page fault
again.
---
Change in V2:
- separate tlb update and add pte readable privilege into two patches

Signed-off-by: Bibo Mao <maobibo@loongson.cn>
---
 mm/memory.c | 44 +++++++++++++++++++++++++++-----------------
 1 file changed, 27 insertions(+), 17 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index f703fe8..57748de 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1770,8 +1770,8 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
 			}
 			entry = pte_mkyoung(*pte);
 			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
-			if (ptep_set_access_flags(vma, addr, pte, entry, 1))
-				update_mmu_cache(vma, addr, pte);
+			ptep_set_access_flags(vma, addr, pte, entry, 1);
+			update_mmu_cache(vma, addr, pte);
 		}
 		goto out_unlock;
 	}
@@ -2436,17 +2436,16 @@ static inline bool cow_user_page(struct page *dst, struct page *src,
 		if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
 			/*
 			 * Other thread has already handled the fault
-			 * and we don't need to do anything. If it's
-			 * not the case, the fault will be triggered
-			 * again on the same address.
+			 * and update local tlb only
 			 */
+			update_mmu_cache(vma, addr, vmf->pte);
 			ret = false;
 			goto pte_unlock;
 		}
 
 		entry = pte_mkyoung(vmf->orig_pte);
-		if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
-			update_mmu_cache(vma, addr, vmf->pte);
+		ptep_set_access_flags(vma, addr, vmf->pte, entry, 0);
+		update_mmu_cache(vma, addr, vmf->pte);
 	}
 
 	/*
@@ -2463,7 +2462,8 @@ static inline bool cow_user_page(struct page *dst, struct page *src,
 		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
 		locked = true;
 		if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
-			/* The PTE changed under us. Retry page fault. */
+			/* The PTE changed under us, update local tlb */
+			pdate_mmu_cache(vma, addr, vmf->pte);
 			ret = false;
 			goto pte_unlock;
 		}
@@ -2618,8 +2618,8 @@ static inline void wp_page_reuse(struct vm_fault *vmf)
 	flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
 	entry = pte_mkyoung(vmf->orig_pte);
 	entry = maybe_mkwrite(pte_mkdirty(entry), vma);
-	if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
-		update_mmu_cache(vma, vmf->address, vmf->pte);
+	ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1);
+	update_mmu_cache(vma, vmf->address, vmf->pte);
 	pte_unmap_unlock(vmf->pte, vmf->ptl);
 }
 
@@ -2752,6 +2752,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
 		new_page = old_page;
 		page_copied = 1;
 	} else {
+		update_mmu_cache(vma, vmf->address, vmf->pte);
 		mem_cgroup_cancel_charge(new_page, memcg, false);
 	}
 
@@ -2812,6 +2813,7 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf)
 	 * pte_offset_map_lock.
 	 */
 	if (!pte_same(*vmf->pte, vmf->orig_pte)) {
+		update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
 		pte_unmap_unlock(vmf->pte, vmf->ptl);
 		return VM_FAULT_NOPAGE;
 	}
@@ -2936,6 +2938,7 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
 			vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
 					vmf->address, &vmf->ptl);
 			if (!pte_same(*vmf->pte, vmf->orig_pte)) {
+				update_mmu_cache(vma, vmf->address, vmf->pte);
 				unlock_page(vmf->page);
 				pte_unmap_unlock(vmf->pte, vmf->ptl);
 				put_page(vmf->page);
@@ -3341,8 +3344,10 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
 						vma->vm_page_prot));
 		vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
 				vmf->address, &vmf->ptl);
-		if (!pte_none(*vmf->pte))
+		if (!pte_none(*vmf->pte)) {
+			update_mmu_cache(vma, vmf->address, vmf->pte);
 			goto unlock;
+		}
 		ret = check_stable_address_space(vma->vm_mm);
 		if (ret)
 			goto unlock;
@@ -3378,8 +3383,10 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
 
 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
 			&vmf->ptl);
-	if (!pte_none(*vmf->pte))
+	if (!pte_none(*vmf->pte)) {
+		update_mmu_cache(vma, vmf->address, vmf->pte);
 		goto release;
+	}
 
 	ret = check_stable_address_space(vma->vm_mm);
 	if (ret)
@@ -3646,8 +3653,10 @@ vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
 	}
 
 	/* Re-check under ptl */
-	if (unlikely(!pte_none(*vmf->pte)))
+	if (unlikely(!pte_none(*vmf->pte))) {
+		update_mmu_cache(vma, vmf->address, vmf->pte);
 		return VM_FAULT_NOPAGE;
+	}
 
 	flush_icache_page(vma, page);
 	entry = mk_pte(page, vma->vm_page_prot);
@@ -4224,18 +4233,18 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
 	vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
 	spin_lock(vmf->ptl);
 	entry = vmf->orig_pte;
-	if (unlikely(!pte_same(*vmf->pte, entry)))
+	if (unlikely(!pte_same(*vmf->pte, entry))) {
+		update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
 		goto unlock;
+	}
 	if (vmf->flags & FAULT_FLAG_WRITE) {
 		if (!pte_write(entry))
 			return do_wp_page(vmf);
 		entry = pte_mkdirty(entry);
 	}
 	entry = pte_mkyoung(entry);
-	if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
+	if (!ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
 				vmf->flags & FAULT_FLAG_WRITE)) {
-		update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
-	} else {
 		/*
 		 * This is needed only for protection faults but the arch code
 		 * is not yet telling us if this is a protection fault or not.
@@ -4245,6 +4254,7 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
 		if (vmf->flags & FAULT_FLAG_WRITE)
 			flush_tlb_fix_spurious_fault(vmf->vma, vmf->address);
 	}
+	update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
 unlock:
 	pte_unmap_unlock(vmf->pte, vmf->ptl);
 	return 0;
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 3/3] mm/memory.c: Add memory read privilege before filling PTE entry
  2020-05-15  4:10 [PATCH 1/3] MIPS: Do not flush tlb page when updating PTE entry Bibo Mao
  2020-05-15  4:10 ` [PATCH 2/3] mm/memory.c: Update local TLB if PTE entry exists Bibo Mao
@ 2020-05-15  4:10 ` Bibo Mao
  1 sibling, 0 replies; 5+ messages in thread
From: Bibo Mao @ 2020-05-15  4:10 UTC (permalink / raw)
  To: Thomas Bogendoerfer, Jiaxun Yang, Huacai Chen, Andrew Morton,
	Paul Burton, Dmitry Korotin, Philippe Mathieu-Daudé,
	Stafford Horne, Steven Price, Anshuman Khandual
  Cc: linux-mips, linux-kernel, Mike Rapoport, Sergei Shtylyov,
	Maciej W. Rozycki, linux-mm

On mips platform, hw PTE entry valid bit is set in pte_mkyoung
function, it is used to set physical page with readable privilege.

Here add pte_mkyoung function to make page readable on mips platform
during page fault handling.

Signed-off-by: Bibo Mao <maobibo@loongson.cn>
---
 mm/memory.c   | 3 +++
 mm/mprotect.c | 2 ++
 2 files changed, 5 insertions(+)

diff --git a/mm/memory.c b/mm/memory.c
index 57748de..26e0b8e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2704,6 +2704,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
 		}
 		flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
 		entry = mk_pte(new_page, vma->vm_page_prot);
+		entry = pte_mkyoung(entry);
 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
 		/*
 		 * Clear the pte entry and flush it first, before updating the
@@ -3378,6 +3379,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
 	__SetPageUptodate(page);
 
 	entry = mk_pte(page, vma->vm_page_prot);
+	entry = pte_mkyoung(entry);
 	if (vma->vm_flags & VM_WRITE)
 		entry = pte_mkwrite(pte_mkdirty(entry));
 
@@ -3660,6 +3662,7 @@ vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
 
 	flush_icache_page(vma, page);
 	entry = mk_pte(page, vma->vm_page_prot);
+	entry = pte_mkyoung(entry);
 	if (write)
 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
 	/* copy-on-write page */
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 494192ca..673f1cd 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -131,6 +131,8 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 				ptent = pte_clear_uffd_wp(ptent);
 			}
 
+			if (vma->vm_flags & VM_READ)
+				ptent = pte_mkyoung(ptent);
 			/* Avoid taking write faults for known dirty pages */
 			if (dirty_accountable && pte_dirty(ptent) &&
 					(pte_soft_dirty(ptent) ||
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH 2/3] mm/memory.c: Update local TLB if PTE entry exists
  2020-05-15  4:10 ` [PATCH 2/3] mm/memory.c: Update local TLB if PTE entry exists Bibo Mao
@ 2020-05-15 20:40   ` Andrew Morton
  2020-05-16  9:42     ` maobibo
  0 siblings, 1 reply; 5+ messages in thread
From: Andrew Morton @ 2020-05-15 20:40 UTC (permalink / raw)
  To: Bibo Mao
  Cc: Thomas Bogendoerfer, Jiaxun Yang, Huacai Chen, Paul Burton,
	Dmitry Korotin, Philippe Mathieu-Daudé,
	Stafford Horne, Steven Price, Anshuman Khandual, linux-mips,
	linux-kernel, Mike Rapoport, Sergei Shtylyov, Maciej W. Rozycki,
	linux-mm

On Fri, 15 May 2020 12:10:08 +0800 Bibo Mao <maobibo@loongson.cn> wrote:

> If there are two threads hitting page fault at the same page,
> one thread updates PTE entry and local TLB, the other can
> update local tlb also, rather than give up and do page fault
> again.
>
> ...
>
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -1770,8 +1770,8 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
>  			}
>  			entry = pte_mkyoung(*pte);
>  			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
> -			if (ptep_set_access_flags(vma, addr, pte, entry, 1))
> -				update_mmu_cache(vma, addr, pte);
> +			ptep_set_access_flags(vma, addr, pte, entry, 1);
> +			update_mmu_cache(vma, addr, pte);

Presumably these changes mean that other architectures will run
update_mmu_cache() more frequently than they used to.  How much more
frequently, and what will be the impact of this change?  (Please fully
explain all this in the changelog).

>  		}
>  		goto out_unlock;
>  	}
>
> ...
>
> @@ -2463,7 +2462,8 @@ static inline bool cow_user_page(struct page *dst, struct page *src,
>  		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
>  		locked = true;
>  		if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
> -			/* The PTE changed under us. Retry page fault. */
> +			/* The PTE changed under us, update local tlb */
> +			pdate_mmu_cache(vma, addr, vmf->pte);

Missing a 'u' there.  Which tells me this patch isn't the one which you
tested!

>  			ret = false;
>  			goto pte_unlock;
>  		}
>
> ...
>

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH 2/3] mm/memory.c: Update local TLB if PTE entry exists
  2020-05-15 20:40   ` Andrew Morton
@ 2020-05-16  9:42     ` maobibo
  0 siblings, 0 replies; 5+ messages in thread
From: maobibo @ 2020-05-16  9:42 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Thomas Bogendoerfer, Jiaxun Yang, Huacai Chen, Paul Burton,
	Dmitry Korotin, Philippe Mathieu-Daudé,
	Stafford Horne, Steven Price, Anshuman Khandual, linux-mips,
	linux-kernel, Mike Rapoport, Sergei Shtylyov, Maciej W. Rozycki,
	linux-mm



On 05/16/2020 04:40 AM, Andrew Morton wrote:
> On Fri, 15 May 2020 12:10:08 +0800 Bibo Mao <maobibo@loongson.cn> wrote:
> 
>> If there are two threads hitting page fault at the same page,
>> one thread updates PTE entry and local TLB, the other can
>> update local tlb also, rather than give up and do page fault
>> again.
>>
>> ...
>>
>> --- a/mm/memory.c
>> +++ b/mm/memory.c
>> @@ -1770,8 +1770,8 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
>>  			}
>>  			entry = pte_mkyoung(*pte);
>>  			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
>> -			if (ptep_set_access_flags(vma, addr, pte, entry, 1))
>> -				update_mmu_cache(vma, addr, pte);
>> +			ptep_set_access_flags(vma, addr, pte, entry, 1);
>> +			update_mmu_cache(vma, addr, pte);
> 
> Presumably these changes mean that other architectures will run
> update_mmu_cache() more frequently than they used to.  How much more
> frequently, and what will be the impact of this change?  (Please fully
> explain all this in the changelog).
> 
It is only useful for those architects where software can update tlb, if the  function update_mmu_cache is used for other reason, it will bring out somewhat impact, and I will explain it in the changelog.

>>  		}
>>  		goto out_unlock;
>>  	}
>>
>> ...
>>
>> @@ -2463,7 +2462,8 @@ static inline bool cow_user_page(struct page *dst, struct page *src,
>>  		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
>>  		locked = true;
>>  		if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
>> -			/* The PTE changed under us. Retry page fault. */
>> +			/* The PTE changed under us, update local tlb */
>> +			pdate_mmu_cache(vma, addr, vmf->pte);
> 
> Missing a 'u' there.  Which tells me this patch isn't the one which you
> tested!
> 
Sorry about it, I will refresh the patch and add modification about this obvious typo

regards
bibo, mao
>>  			ret = false;
>>  			goto pte_unlock;
>>  		}
>>
>> ...
>>


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2020-05-16  9:43 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-05-15  4:10 [PATCH 1/3] MIPS: Do not flush tlb page when updating PTE entry Bibo Mao
2020-05-15  4:10 ` [PATCH 2/3] mm/memory.c: Update local TLB if PTE entry exists Bibo Mao
2020-05-15 20:40   ` Andrew Morton
2020-05-16  9:42     ` maobibo
2020-05-15  4:10 ` [PATCH 3/3] mm/memory.c: Add memory read privilege before filling PTE entry Bibo Mao

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).