All of lore.kernel.org
 help / color / mirror / Atom feed
diff for duplicates of <20171019140426.21f51957@MiWiFi-R3-srv>

diff --git a/a/1.txt b/N1/1.txt
index d148233..9e38e78 100644
--- a/a/1.txt
+++ b/N1/1.txt
@@ -284,4 +284,10 @@ OK.. so we could get write faults on write accesses from the device.
 Looking at the patchset, I understand the efficiency, but I am concerned
 with correctness.
 
-Balbir Singh.
\ No newline at end of file
+Balbir Singh.
+
+--
+To unsubscribe, send a message with 'unsubscribe linux-mm' in
+the body to majordomo@kvack.org.  For more info on Linux MM,
+see: http://www.linux-mm.org/ .
+Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
\ No newline at end of file
diff --git a/a/content_digest b/N1/content_digest
index 96e6370..1a124b2 100644
--- a/a/content_digest
+++ b/N1/content_digest
@@ -328,7 +328,13 @@
   "Looking at the patchset, I understand the efficiency, but I am concerned\n",
   "with correctness.\n",
   "\n",
-  "Balbir Singh."
+  "Balbir Singh.\n",
+  "\n",
+  "--\n",
+  "To unsubscribe, send a message with 'unsubscribe linux-mm' in\n",
+  "the body to majordomo\@kvack.org.  For more info on Linux MM,\n",
+  "see: http://www.linux-mm.org/ .\n",
+  "Don't email: <a href=mailto:\"dont\@kvack.org\"> email\@kvack.org </a>"
 ]
 
-d22594b2b0752c604dbe0a0dbe19d9fd06d5c5dcbd7429d2f372da6062605366
+7a2dad5c628fbcb3f0489541b021d442202deaca4c1eed44c9c1527e6c368c6b

diff --git a/a/1.txt b/N2/1.txt
index d148233..0f9f7c3 100644
--- a/a/1.txt
+++ b/N2/1.txt
@@ -1,8 +1,8 @@
 On Mon, 16 Oct 2017 23:10:02 -0400
 jglisse@redhat.com wrote:
 
-> From: Jérôme Glisse <jglisse@redhat.com>
-> 
+> From: J=C3=A9r=C3=B4me Glisse <jglisse@redhat.com>
+>=20
 > +		/*
 > +		 * No need to call mmu_notifier_invalidate_range() as we are
 > +		 * downgrading page table protection not changing it to point
@@ -13,20 +13,23 @@ jglisse@redhat.com wrote:
 >  		if (pmdp) {
 >  #ifdef CONFIG_FS_DAX_PMD
 >  			pmd_t pmd;
-> @@ -628,7 +635,6 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
->  			pmd = pmd_wrprotect(pmd);
->  			pmd = pmd_mkclean(pmd);
+> @@ -628,7 +635,6 @@ static void dax_mapping_entry_mkclean(struct address_=
+space *mapping,
+>  			pmd =3D pmd_wrprotect(pmd);
+>  			pmd =3D pmd_mkclean(pmd);
 >  			set_pmd_at(vma->vm_mm, address, pmdp, pmd);
 > -			mmu_notifier_invalidate_range(vma->vm_mm, start, end);
 
-Could the secondary TLB still see the mapping as dirty and propagate the dirty bit back?
+Could the secondary TLB still see the mapping as dirty and propagate the di=
+rty bit back?
 
 >  unlock_pmd:
 >  			spin_unlock(ptl);
 >  #endif
-> @@ -643,7 +649,6 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
->  			pte = pte_wrprotect(pte);
->  			pte = pte_mkclean(pte);
+> @@ -643,7 +649,6 @@ static void dax_mapping_entry_mkclean(struct address_=
+space *mapping,
+>  			pte =3D pte_wrprotect(pte);
+>  			pte =3D pte_mkclean(pte);
 >  			set_pte_at(vma->vm_mm, address, ptep, pte);
 > -			mmu_notifier_invalidate_range(vma->vm_mm, start, end);
 
@@ -53,10 +56,11 @@ Ditto
 > index c037d3d34950..ff5bc647b51d 100644
 > --- a/mm/huge_memory.c
 > +++ b/mm/huge_memory.c
-> @@ -1186,8 +1186,15 @@ static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd,
+> @@ -1186,8 +1186,15 @@ static int do_huge_pmd_wp_page_fallback(struct vm_=
+fault *vmf, pmd_t orig_pmd,
 >  		goto out_free_pages;
 >  	VM_BUG_ON_PAGE(!PageHead(page), page);
->  
+> =20
 > +	/*
 > +	 * Leave pmd empty until pte is filled note we must notify here as
 > +	 * concurrent CPU thread might write to new page before the call to
@@ -67,13 +71,14 @@ Ditto
 > +	 */
 >  	pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd);
 > -	/* leave pmd empty until pte is filled */
->  
->  	pgtable = pgtable_trans_huge_withdraw(vma->vm_mm, vmf->pmd);
+> =20
+>  	pgtable =3D pgtable_trans_huge_withdraw(vma->vm_mm, vmf->pmd);
 >  	pmd_populate(vma->vm_mm, &_pmd, pgtable);
-> @@ -2026,8 +2033,15 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
+> @@ -2026,8 +2033,15 @@ static void __split_huge_zero_page_pmd(struct vm_a=
+rea_struct *vma,
 >  	pmd_t _pmd;
 >  	int i;
->  
+> =20
 > -	/* leave pmd empty until pte is filled */
 > -	pmdp_huge_clear_flush_notify(vma, haddr, pmd);
 > +	/*
@@ -88,14 +93,15 @@ Ditto
 
 Shouldn't the secondary TLB know if the page size changed?
 
->  
->  	pgtable = pgtable_trans_huge_withdraw(mm, pmd);
+> =20
+>  	pgtable =3D pgtable_trans_huge_withdraw(mm, pmd);
 >  	pmd_populate(mm, &_pmd, pgtable);
 > diff --git a/mm/hugetlb.c b/mm/hugetlb.c
 > index 1768efa4c501..63a63f1b536c 100644
 > --- a/mm/hugetlb.c
 > +++ b/mm/hugetlb.c
-> @@ -3254,9 +3254,14 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
+> @@ -3254,9 +3254,14 @@ int copy_hugetlb_page_range(struct mm_struct *dst,=
+ struct mm_struct *src,
 >  			set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
 >  		} else {
 >  			if (cow) {
@@ -113,9 +119,10 @@ OK.. so we could get write faults on write accesses from the device.
 > -				mmu_notifier_invalidate_range(src, mmun_start,
 > -								   mmun_end);
 >  			}
->  			entry = huge_ptep_get(src_pte);
->  			ptepage = pte_page(entry);
-> @@ -4288,7 +4293,12 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
+>  			entry =3D huge_ptep_get(src_pte);
+>  			ptepage =3D pte_page(entry);
+> @@ -4288,7 +4293,12 @@ unsigned long hugetlb_change_protection(struct vm_=
+area_struct *vma,
 >  	 * and that page table be reused and filled with junk.
 >  	 */
 >  	flush_hugetlb_tlb_range(vma, start, end);
@@ -128,12 +135,13 @@ OK.. so we could get write faults on write accesses from the device.
 > +	 */
 >  	i_mmap_unlock_write(vma->vm_file->f_mapping);
 >  	mmu_notifier_invalidate_range_end(mm, start, end);
->  
+> =20
 > diff --git a/mm/ksm.c b/mm/ksm.c
 > index 6cb60f46cce5..be8f4576f842 100644
 > --- a/mm/ksm.c
 > +++ b/mm/ksm.c
-> @@ -1052,8 +1052,13 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
+> @@ -1052,8 +1052,13 @@ static int write_protect_page(struct vm_area_struc=
+t *vma, struct page *page,
 >  		 * So we clear the pte and flush the tlb before the check
 >  		 * this assure us that no O_DIRECT can happen after the check
 >  		 * or in the middle of the check.
@@ -143,14 +151,15 @@ OK.. so we could get write faults on write accesses from the device.
 > +		 *
 > +		 * See Documentation/vm/mmu_notifier.txt
 >  		 */
-> -		entry = ptep_clear_flush_notify(vma, pvmw.address, pvmw.pte);
-> +		entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte);
+> -		entry =3D ptep_clear_flush_notify(vma, pvmw.address, pvmw.pte);
+> +		entry =3D ptep_clear_flush(vma, pvmw.address, pvmw.pte);
 >  		/*
 >  		 * Check that no O_DIRECT or similar I/O is in progress on the
 >  		 * page
-> @@ -1136,7 +1141,13 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
+> @@ -1136,7 +1141,13 @@ static int replace_page(struct vm_area_struct *vma=
+, struct page *page,
 >  	}
->  
+> =20
 >  	flush_cache_page(vma, addr, pte_pfn(*ptep));
 > -	ptep_clear_flush_notify(vma, addr, ptep);
 > +	/*
@@ -161,16 +170,17 @@ OK.. so we could get write faults on write accesses from the device.
 > +	 */
 > +	ptep_clear_flush(vma, addr, ptep);
 >  	set_pte_at_notify(mm, addr, ptep, newpte);
->  
+> =20
 >  	page_remove_rmap(page, false);
 > diff --git a/mm/rmap.c b/mm/rmap.c
 > index 061826278520..6b5a0f219ac0 100644
 > --- a/mm/rmap.c
 > +++ b/mm/rmap.c
-> @@ -937,10 +937,15 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
+> @@ -937,10 +937,15 @@ static bool page_mkclean_one(struct page *page, str=
+uct vm_area_struct *vma,
 >  #endif
 >  		}
->  
+> =20
 > -		if (ret) {
 > -			mmu_notifier_invalidate_range(vma->vm_mm, cstart, cend);
 > +		/*
@@ -184,11 +194,12 @@ OK.. so we could get write faults on write accesses from the device.
 >  			(*cleaned)++;
 > -		}
 >  	}
->  
+> =20
 >  	mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
-> @@ -1424,6 +1429,10 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
+> @@ -1424,6 +1429,10 @@ static bool try_to_unmap_one(struct page *page, st=
+ruct vm_area_struct *vma,
 >  			if (pte_soft_dirty(pteval))
->  				swp_pte = pte_swp_mksoft_dirty(swp_pte);
+>  				swp_pte =3D pte_swp_mksoft_dirty(swp_pte);
 >  			set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
 > +			/*
 > +			 * No need to invalidate here it will synchronize on
@@ -196,8 +207,9 @@ OK.. so we could get write faults on write accesses from the device.
 > +			 */
 >  			goto discard;
 >  		}
->  
-> @@ -1481,6 +1490,9 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
+> =20
+> @@ -1481,6 +1490,9 @@ static bool try_to_unmap_one(struct page *page, str=
+uct vm_area_struct *vma,
 >  			 * will take care of the rest.
 >  			 */
 >  			dec_mm_counter(mm, mm_counter(page));
@@ -207,27 +219,30 @@ OK.. so we could get write faults on write accesses from the device.
 >  		} else if (IS_ENABLED(CONFIG_MIGRATION) &&
 >  				(flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))) {
 >  			swp_entry_t entry;
-> @@ -1496,6 +1508,10 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
+> @@ -1496,6 +1508,10 @@ static bool try_to_unmap_one(struct page *page, st=
+ruct vm_area_struct *vma,
 >  			if (pte_soft_dirty(pteval))
->  				swp_pte = pte_swp_mksoft_dirty(swp_pte);
+>  				swp_pte =3D pte_swp_mksoft_dirty(swp_pte);
 >  			set_pte_at(mm, address, pvmw.pte, swp_pte);
 > +			/*
 > +			 * No need to invalidate here it will synchronize on
 > +			 * against the special swap migration pte.
 > +			 */
 >  		} else if (PageAnon(page)) {
->  			swp_entry_t entry = { .val = page_private(subpage) };
+>  			swp_entry_t entry =3D { .val =3D page_private(subpage) };
 >  			pte_t swp_pte;
-> @@ -1507,6 +1523,8 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
+> @@ -1507,6 +1523,8 @@ static bool try_to_unmap_one(struct page *page, str=
+uct vm_area_struct *vma,
 >  				WARN_ON_ONCE(1);
->  				ret = false;
+>  				ret =3D false;
 >  				/* We have to invalidate as we cleared the pte */
 > +				mmu_notifier_invalidate_range(mm, address,
 > +							address + PAGE_SIZE);
 >  				page_vma_mapped_walk_done(&pvmw);
 >  				break;
 >  			}
-> @@ -1514,6 +1532,9 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
+> @@ -1514,6 +1532,9 @@ static bool try_to_unmap_one(struct page *page, str=
+uct vm_area_struct *vma,
 >  			/* MADV_FREE page check */
 >  			if (!PageSwapBacked(page)) {
 >  				if (!PageDirty(page)) {
@@ -237,9 +252,10 @@ OK.. so we could get write faults on write accesses from the device.
 >  					dec_mm_counter(mm, MM_ANONPAGES);
 >  					goto discard;
 >  				}
-> @@ -1547,13 +1568,39 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
+> @@ -1547,13 +1568,39 @@ static bool try_to_unmap_one(struct page *page, s=
+truct vm_area_struct *vma,
 >  			if (pte_soft_dirty(pteval))
->  				swp_pte = pte_swp_mksoft_dirty(swp_pte);
+>  				swp_pte =3D pte_swp_mksoft_dirty(swp_pte);
 >  			set_pte_at(mm, address, pvmw.pte, swp_pte);
 > -		} else
 > +			/* Invalidate as we cleared the pte */
@@ -278,7 +294,7 @@ OK.. so we could get write faults on write accesses from the device.
 > -		mmu_notifier_invalidate_range(mm, address,
 > -					      address + PAGE_SIZE);
 >  	}
->  
+> =20
 >  	mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
 
 Looking at the patchset, I understand the efficiency, but I am concerned
diff --git a/a/content_digest b/N2/content_digest
index 96e6370..963db75 100644
--- a/a/content_digest
+++ b/N2/content_digest
@@ -45,8 +45,8 @@
   "On Mon, 16 Oct 2017 23:10:02 -0400\n",
   "jglisse\@redhat.com wrote:\n",
   "\n",
-  "> From: J\303\251r\303\264me Glisse <jglisse\@redhat.com>\n",
-  "> \n",
+  "> From: J=C3=A9r=C3=B4me Glisse <jglisse\@redhat.com>\n",
+  ">=20\n",
   "> +\t\t/*\n",
   "> +\t\t * No need to call mmu_notifier_invalidate_range() as we are\n",
   "> +\t\t * downgrading page table protection not changing it to point\n",
@@ -57,20 +57,23 @@
   ">  \t\tif (pmdp) {\n",
   ">  #ifdef CONFIG_FS_DAX_PMD\n",
   ">  \t\t\tpmd_t pmd;\n",
-  "> \@\@ -628,7 +635,6 \@\@ static void dax_mapping_entry_mkclean(struct address_space *mapping,\n",
-  ">  \t\t\tpmd = pmd_wrprotect(pmd);\n",
-  ">  \t\t\tpmd = pmd_mkclean(pmd);\n",
+  "> \@\@ -628,7 +635,6 \@\@ static void dax_mapping_entry_mkclean(struct address_=\n",
+  "space *mapping,\n",
+  ">  \t\t\tpmd =3D pmd_wrprotect(pmd);\n",
+  ">  \t\t\tpmd =3D pmd_mkclean(pmd);\n",
   ">  \t\t\tset_pmd_at(vma->vm_mm, address, pmdp, pmd);\n",
   "> -\t\t\tmmu_notifier_invalidate_range(vma->vm_mm, start, end);\n",
   "\n",
-  "Could the secondary TLB still see the mapping as dirty and propagate the dirty bit back?\n",
+  "Could the secondary TLB still see the mapping as dirty and propagate the di=\n",
+  "rty bit back?\n",
   "\n",
   ">  unlock_pmd:\n",
   ">  \t\t\tspin_unlock(ptl);\n",
   ">  #endif\n",
-  "> \@\@ -643,7 +649,6 \@\@ static void dax_mapping_entry_mkclean(struct address_space *mapping,\n",
-  ">  \t\t\tpte = pte_wrprotect(pte);\n",
-  ">  \t\t\tpte = pte_mkclean(pte);\n",
+  "> \@\@ -643,7 +649,6 \@\@ static void dax_mapping_entry_mkclean(struct address_=\n",
+  "space *mapping,\n",
+  ">  \t\t\tpte =3D pte_wrprotect(pte);\n",
+  ">  \t\t\tpte =3D pte_mkclean(pte);\n",
   ">  \t\t\tset_pte_at(vma->vm_mm, address, ptep, pte);\n",
   "> -\t\t\tmmu_notifier_invalidate_range(vma->vm_mm, start, end);\n",
   "\n",
@@ -97,10 +100,11 @@
   "> index c037d3d34950..ff5bc647b51d 100644\n",
   "> --- a/mm/huge_memory.c\n",
   "> +++ b/mm/huge_memory.c\n",
-  "> \@\@ -1186,8 +1186,15 \@\@ static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd,\n",
+  "> \@\@ -1186,8 +1186,15 \@\@ static int do_huge_pmd_wp_page_fallback(struct vm_=\n",
+  "fault *vmf, pmd_t orig_pmd,\n",
   ">  \t\tgoto out_free_pages;\n",
   ">  \tVM_BUG_ON_PAGE(!PageHead(page), page);\n",
-  ">  \n",
+  "> =20\n",
   "> +\t/*\n",
   "> +\t * Leave pmd empty until pte is filled note we must notify here as\n",
   "> +\t * concurrent CPU thread might write to new page before the call to\n",
@@ -111,13 +115,14 @@
   "> +\t */\n",
   ">  \tpmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd);\n",
   "> -\t/* leave pmd empty until pte is filled */\n",
-  ">  \n",
-  ">  \tpgtable = pgtable_trans_huge_withdraw(vma->vm_mm, vmf->pmd);\n",
+  "> =20\n",
+  ">  \tpgtable =3D pgtable_trans_huge_withdraw(vma->vm_mm, vmf->pmd);\n",
   ">  \tpmd_populate(vma->vm_mm, &_pmd, pgtable);\n",
-  "> \@\@ -2026,8 +2033,15 \@\@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,\n",
+  "> \@\@ -2026,8 +2033,15 \@\@ static void __split_huge_zero_page_pmd(struct vm_a=\n",
+  "rea_struct *vma,\n",
   ">  \tpmd_t _pmd;\n",
   ">  \tint i;\n",
-  ">  \n",
+  "> =20\n",
   "> -\t/* leave pmd empty until pte is filled */\n",
   "> -\tpmdp_huge_clear_flush_notify(vma, haddr, pmd);\n",
   "> +\t/*\n",
@@ -132,14 +137,15 @@
   "\n",
   "Shouldn't the secondary TLB know if the page size changed?\n",
   "\n",
-  ">  \n",
-  ">  \tpgtable = pgtable_trans_huge_withdraw(mm, pmd);\n",
+  "> =20\n",
+  ">  \tpgtable =3D pgtable_trans_huge_withdraw(mm, pmd);\n",
   ">  \tpmd_populate(mm, &_pmd, pgtable);\n",
   "> diff --git a/mm/hugetlb.c b/mm/hugetlb.c\n",
   "> index 1768efa4c501..63a63f1b536c 100644\n",
   "> --- a/mm/hugetlb.c\n",
   "> +++ b/mm/hugetlb.c\n",
-  "> \@\@ -3254,9 +3254,14 \@\@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,\n",
+  "> \@\@ -3254,9 +3254,14 \@\@ int copy_hugetlb_page_range(struct mm_struct *dst,=\n",
+  " struct mm_struct *src,\n",
   ">  \t\t\tset_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);\n",
   ">  \t\t} else {\n",
   ">  \t\t\tif (cow) {\n",
@@ -157,9 +163,10 @@
   "> -\t\t\t\tmmu_notifier_invalidate_range(src, mmun_start,\n",
   "> -\t\t\t\t\t\t\t\t   mmun_end);\n",
   ">  \t\t\t}\n",
-  ">  \t\t\tentry = huge_ptep_get(src_pte);\n",
-  ">  \t\t\tptepage = pte_page(entry);\n",
-  "> \@\@ -4288,7 +4293,12 \@\@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,\n",
+  ">  \t\t\tentry =3D huge_ptep_get(src_pte);\n",
+  ">  \t\t\tptepage =3D pte_page(entry);\n",
+  "> \@\@ -4288,7 +4293,12 \@\@ unsigned long hugetlb_change_protection(struct vm_=\n",
+  "area_struct *vma,\n",
   ">  \t * and that page table be reused and filled with junk.\n",
   ">  \t */\n",
   ">  \tflush_hugetlb_tlb_range(vma, start, end);\n",
@@ -172,12 +179,13 @@
   "> +\t */\n",
   ">  \ti_mmap_unlock_write(vma->vm_file->f_mapping);\n",
   ">  \tmmu_notifier_invalidate_range_end(mm, start, end);\n",
-  ">  \n",
+  "> =20\n",
   "> diff --git a/mm/ksm.c b/mm/ksm.c\n",
   "> index 6cb60f46cce5..be8f4576f842 100644\n",
   "> --- a/mm/ksm.c\n",
   "> +++ b/mm/ksm.c\n",
-  "> \@\@ -1052,8 +1052,13 \@\@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,\n",
+  "> \@\@ -1052,8 +1052,13 \@\@ static int write_protect_page(struct vm_area_struc=\n",
+  "t *vma, struct page *page,\n",
   ">  \t\t * So we clear the pte and flush the tlb before the check\n",
   ">  \t\t * this assure us that no O_DIRECT can happen after the check\n",
   ">  \t\t * or in the middle of the check.\n",
@@ -187,14 +195,15 @@
   "> +\t\t *\n",
   "> +\t\t * See Documentation/vm/mmu_notifier.txt\n",
   ">  \t\t */\n",
-  "> -\t\tentry = ptep_clear_flush_notify(vma, pvmw.address, pvmw.pte);\n",
-  "> +\t\tentry = ptep_clear_flush(vma, pvmw.address, pvmw.pte);\n",
+  "> -\t\tentry =3D ptep_clear_flush_notify(vma, pvmw.address, pvmw.pte);\n",
+  "> +\t\tentry =3D ptep_clear_flush(vma, pvmw.address, pvmw.pte);\n",
   ">  \t\t/*\n",
   ">  \t\t * Check that no O_DIRECT or similar I/O is in progress on the\n",
   ">  \t\t * page\n",
-  "> \@\@ -1136,7 +1141,13 \@\@ static int replace_page(struct vm_area_struct *vma, struct page *page,\n",
+  "> \@\@ -1136,7 +1141,13 \@\@ static int replace_page(struct vm_area_struct *vma=\n",
+  ", struct page *page,\n",
   ">  \t}\n",
-  ">  \n",
+  "> =20\n",
   ">  \tflush_cache_page(vma, addr, pte_pfn(*ptep));\n",
   "> -\tptep_clear_flush_notify(vma, addr, ptep);\n",
   "> +\t/*\n",
@@ -205,16 +214,17 @@
   "> +\t */\n",
   "> +\tptep_clear_flush(vma, addr, ptep);\n",
   ">  \tset_pte_at_notify(mm, addr, ptep, newpte);\n",
-  ">  \n",
+  "> =20\n",
   ">  \tpage_remove_rmap(page, false);\n",
   "> diff --git a/mm/rmap.c b/mm/rmap.c\n",
   "> index 061826278520..6b5a0f219ac0 100644\n",
   "> --- a/mm/rmap.c\n",
   "> +++ b/mm/rmap.c\n",
-  "> \@\@ -937,10 +937,15 \@\@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,\n",
+  "> \@\@ -937,10 +937,15 \@\@ static bool page_mkclean_one(struct page *page, str=\n",
+  "uct vm_area_struct *vma,\n",
   ">  #endif\n",
   ">  \t\t}\n",
-  ">  \n",
+  "> =20\n",
   "> -\t\tif (ret) {\n",
   "> -\t\t\tmmu_notifier_invalidate_range(vma->vm_mm, cstart, cend);\n",
   "> +\t\t/*\n",
@@ -228,11 +238,12 @@
   ">  \t\t\t(*cleaned)++;\n",
   "> -\t\t}\n",
   ">  \t}\n",
-  ">  \n",
+  "> =20\n",
   ">  \tmmu_notifier_invalidate_range_end(vma->vm_mm, start, end);\n",
-  "> \@\@ -1424,6 +1429,10 \@\@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,\n",
+  "> \@\@ -1424,6 +1429,10 \@\@ static bool try_to_unmap_one(struct page *page, st=\n",
+  "ruct vm_area_struct *vma,\n",
   ">  \t\t\tif (pte_soft_dirty(pteval))\n",
-  ">  \t\t\t\tswp_pte = pte_swp_mksoft_dirty(swp_pte);\n",
+  ">  \t\t\t\tswp_pte =3D pte_swp_mksoft_dirty(swp_pte);\n",
   ">  \t\t\tset_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);\n",
   "> +\t\t\t/*\n",
   "> +\t\t\t * No need to invalidate here it will synchronize on\n",
@@ -240,8 +251,9 @@
   "> +\t\t\t */\n",
   ">  \t\t\tgoto discard;\n",
   ">  \t\t}\n",
-  ">  \n",
-  "> \@\@ -1481,6 +1490,9 \@\@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,\n",
+  "> =20\n",
+  "> \@\@ -1481,6 +1490,9 \@\@ static bool try_to_unmap_one(struct page *page, str=\n",
+  "uct vm_area_struct *vma,\n",
   ">  \t\t\t * will take care of the rest.\n",
   ">  \t\t\t */\n",
   ">  \t\t\tdec_mm_counter(mm, mm_counter(page));\n",
@@ -251,27 +263,30 @@
   ">  \t\t} else if (IS_ENABLED(CONFIG_MIGRATION) &&\n",
   ">  \t\t\t\t(flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))) {\n",
   ">  \t\t\tswp_entry_t entry;\n",
-  "> \@\@ -1496,6 +1508,10 \@\@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,\n",
+  "> \@\@ -1496,6 +1508,10 \@\@ static bool try_to_unmap_one(struct page *page, st=\n",
+  "ruct vm_area_struct *vma,\n",
   ">  \t\t\tif (pte_soft_dirty(pteval))\n",
-  ">  \t\t\t\tswp_pte = pte_swp_mksoft_dirty(swp_pte);\n",
+  ">  \t\t\t\tswp_pte =3D pte_swp_mksoft_dirty(swp_pte);\n",
   ">  \t\t\tset_pte_at(mm, address, pvmw.pte, swp_pte);\n",
   "> +\t\t\t/*\n",
   "> +\t\t\t * No need to invalidate here it will synchronize on\n",
   "> +\t\t\t * against the special swap migration pte.\n",
   "> +\t\t\t */\n",
   ">  \t\t} else if (PageAnon(page)) {\n",
-  ">  \t\t\tswp_entry_t entry = { .val = page_private(subpage) };\n",
+  ">  \t\t\tswp_entry_t entry =3D { .val =3D page_private(subpage) };\n",
   ">  \t\t\tpte_t swp_pte;\n",
-  "> \@\@ -1507,6 +1523,8 \@\@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,\n",
+  "> \@\@ -1507,6 +1523,8 \@\@ static bool try_to_unmap_one(struct page *page, str=\n",
+  "uct vm_area_struct *vma,\n",
   ">  \t\t\t\tWARN_ON_ONCE(1);\n",
-  ">  \t\t\t\tret = false;\n",
+  ">  \t\t\t\tret =3D false;\n",
   ">  \t\t\t\t/* We have to invalidate as we cleared the pte */\n",
   "> +\t\t\t\tmmu_notifier_invalidate_range(mm, address,\n",
   "> +\t\t\t\t\t\t\taddress + PAGE_SIZE);\n",
   ">  \t\t\t\tpage_vma_mapped_walk_done(&pvmw);\n",
   ">  \t\t\t\tbreak;\n",
   ">  \t\t\t}\n",
-  "> \@\@ -1514,6 +1532,9 \@\@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,\n",
+  "> \@\@ -1514,6 +1532,9 \@\@ static bool try_to_unmap_one(struct page *page, str=\n",
+  "uct vm_area_struct *vma,\n",
   ">  \t\t\t/* MADV_FREE page check */\n",
   ">  \t\t\tif (!PageSwapBacked(page)) {\n",
   ">  \t\t\t\tif (!PageDirty(page)) {\n",
@@ -281,9 +296,10 @@
   ">  \t\t\t\t\tdec_mm_counter(mm, MM_ANONPAGES);\n",
   ">  \t\t\t\t\tgoto discard;\n",
   ">  \t\t\t\t}\n",
-  "> \@\@ -1547,13 +1568,39 \@\@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,\n",
+  "> \@\@ -1547,13 +1568,39 \@\@ static bool try_to_unmap_one(struct page *page, s=\n",
+  "truct vm_area_struct *vma,\n",
   ">  \t\t\tif (pte_soft_dirty(pteval))\n",
-  ">  \t\t\t\tswp_pte = pte_swp_mksoft_dirty(swp_pte);\n",
+  ">  \t\t\t\tswp_pte =3D pte_swp_mksoft_dirty(swp_pte);\n",
   ">  \t\t\tset_pte_at(mm, address, pvmw.pte, swp_pte);\n",
   "> -\t\t} else\n",
   "> +\t\t\t/* Invalidate as we cleared the pte */\n",
@@ -322,7 +338,7 @@
   "> -\t\tmmu_notifier_invalidate_range(mm, address,\n",
   "> -\t\t\t\t\t      address + PAGE_SIZE);\n",
   ">  \t}\n",
-  ">  \n",
+  "> =20\n",
   ">  \tmmu_notifier_invalidate_range_end(vma->vm_mm, start, end);\n",
   "\n",
   "Looking at the patchset, I understand the efficiency, but I am concerned\n",
@@ -331,4 +347,4 @@
   "Balbir Singh."
 ]
 
-d22594b2b0752c604dbe0a0dbe19d9fd06d5c5dcbd7429d2f372da6062605366
+aaeb14709d4c8044f174753cf1c5c112a1c63160d7222d81b2a044ac261eebf0

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.