linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2] mm/smaps: Don't access young/dirty bit if pte unpresent
@ 2022-08-05 16:00 Peter Xu
  2022-08-08  5:23 ` Huang, Ying
  0 siblings, 1 reply; 2+ messages in thread
From: Peter Xu @ 2022-08-05 16:00 UTC (permalink / raw)
  To: linux-kernel, linux-mm
  Cc: Yang Shi, David Hildenbrand, peterx, Nadav Amit, Andrew Morton,
	Konstantin Khlebnikov, Huang Ying, Vlastimil Babka

These bits should only be valid when the ptes are present.  Introducing two
booleans for it and set it to false when !pte_present() for both pte and
pmd accountings.

The bug is found during code reading and no real world issue reported, but
logically such an error can cause incorrect readings for either smaps or
smaps_rollup output on quite a few fields.

For example, it could cause over-estimate on values like Shared_Dirty,
Private_Dirty, Referenced.  Or it could also cause under-estimate on values
like LazyFree, Shared_Clean, Private_Clean.

Cc: Konstantin Khlebnikov <khlebnikov@openvz.org>
Cc: Huang Ying <ying.huang@intel.com>
Fixes: b1d4d9e0cbd0 ("proc/smaps: carefully handle migration entries")
Fixes: c94b6923fa0a ("/proc/PID/smaps: Add PMD migration entry parsing")
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Yang Shi <shy828301@gmail.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
---
 fs/proc/task_mmu.c | 14 ++++++++------
 1 file changed, 8 insertions(+), 6 deletions(-)

diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 9913f3be9fd2..d56c65f98d00 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -527,10 +527,12 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
 	struct vm_area_struct *vma = walk->vma;
 	bool locked = !!(vma->vm_flags & VM_LOCKED);
 	struct page *page = NULL;
-	bool migration = false;
+	bool migration = false, young = false, dirty = false;
 
 	if (pte_present(*pte)) {
 		page = vm_normal_page(vma, addr, *pte);
+		young = pte_young(*pte);
+		dirty = pte_dirty(*pte);
 	} else if (is_swap_pte(*pte)) {
 		swp_entry_t swpent = pte_to_swp_entry(*pte);
 
@@ -560,8 +562,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
 	if (!page)
 		return;
 
-	smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte),
-		      locked, migration);
+	smaps_account(mss, page, false, young, dirty, locked, migration);
 }
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -572,11 +573,13 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
 	struct vm_area_struct *vma = walk->vma;
 	bool locked = !!(vma->vm_flags & VM_LOCKED);
 	struct page *page = NULL;
-	bool migration = false;
+	bool migration = false, young = false, dirty = false;
 
 	if (pmd_present(*pmd)) {
 		/* FOLL_DUMP will return -EFAULT on huge zero page */
 		page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
+		young = pmd_young(*pmd);
+		dirty = pmd_dirty(*pmd);
 	} else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
 		swp_entry_t entry = pmd_to_swp_entry(*pmd);
 
@@ -596,8 +599,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
 	else
 		mss->file_thp += HPAGE_PMD_SIZE;
 
-	smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd),
-		      locked, migration);
+	smaps_account(mss, page, true, young, dirty, locked, migration);
 }
 #else
 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
-- 
2.32.0


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH v2] mm/smaps: Don't access young/dirty bit if pte unpresent
  2022-08-05 16:00 [PATCH v2] mm/smaps: Don't access young/dirty bit if pte unpresent Peter Xu
@ 2022-08-08  5:23 ` Huang, Ying
  0 siblings, 0 replies; 2+ messages in thread
From: Huang, Ying @ 2022-08-08  5:23 UTC (permalink / raw)
  To: Peter Xu
  Cc: linux-kernel, linux-mm, Yang Shi, David Hildenbrand, Nadav Amit,
	Andrew Morton, Konstantin Khlebnikov, Vlastimil Babka

Peter Xu <peterx@redhat.com> writes:

> These bits should only be valid when the ptes are present.  Introducing two
> booleans for it and set it to false when !pte_present() for both pte and
> pmd accountings.
>
> The bug is found during code reading and no real world issue reported, but
> logically such an error can cause incorrect readings for either smaps or
> smaps_rollup output on quite a few fields.
>
> For example, it could cause over-estimate on values like Shared_Dirty,
> Private_Dirty, Referenced.  Or it could also cause under-estimate on values
> like LazyFree, Shared_Clean, Private_Clean.
>
> Cc: Konstantin Khlebnikov <khlebnikov@openvz.org>
> Cc: Huang Ying <ying.huang@intel.com>
> Fixes: b1d4d9e0cbd0 ("proc/smaps: carefully handle migration entries")
> Fixes: c94b6923fa0a ("/proc/PID/smaps: Add PMD migration entry parsing")
> Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
> Reviewed-by: David Hildenbrand <david@redhat.com>
> Reviewed-by: Yang Shi <shy828301@gmail.com>
> Signed-off-by: Peter Xu <peterx@redhat.com>

LGTM, Thanks!

Reviewed-by: "Huang, Ying" <ying.huang@intel.com>

> ---
>  fs/proc/task_mmu.c | 14 ++++++++------
>  1 file changed, 8 insertions(+), 6 deletions(-)
>
> diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
> index 9913f3be9fd2..d56c65f98d00 100644
> --- a/fs/proc/task_mmu.c
> +++ b/fs/proc/task_mmu.c
> @@ -527,10 +527,12 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
>  	struct vm_area_struct *vma = walk->vma;
>  	bool locked = !!(vma->vm_flags & VM_LOCKED);
>  	struct page *page = NULL;
> -	bool migration = false;
> +	bool migration = false, young = false, dirty = false;
>  
>  	if (pte_present(*pte)) {
>  		page = vm_normal_page(vma, addr, *pte);
> +		young = pte_young(*pte);
> +		dirty = pte_dirty(*pte);
>  	} else if (is_swap_pte(*pte)) {
>  		swp_entry_t swpent = pte_to_swp_entry(*pte);
>  
> @@ -560,8 +562,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
>  	if (!page)
>  		return;
>  
> -	smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte),
> -		      locked, migration);
> +	smaps_account(mss, page, false, young, dirty, locked, migration);
>  }
>  
>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> @@ -572,11 +573,13 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
>  	struct vm_area_struct *vma = walk->vma;
>  	bool locked = !!(vma->vm_flags & VM_LOCKED);
>  	struct page *page = NULL;
> -	bool migration = false;
> +	bool migration = false, young = false, dirty = false;
>  
>  	if (pmd_present(*pmd)) {
>  		/* FOLL_DUMP will return -EFAULT on huge zero page */
>  		page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
> +		young = pmd_young(*pmd);
> +		dirty = pmd_dirty(*pmd);
>  	} else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
>  		swp_entry_t entry = pmd_to_swp_entry(*pmd);
>  
> @@ -596,8 +599,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
>  	else
>  		mss->file_thp += HPAGE_PMD_SIZE;
>  
> -	smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd),
> -		      locked, migration);
> +	smaps_account(mss, page, true, young, dirty, locked, migration);
>  }
>  #else
>  static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2022-08-08  5:24 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-08-05 16:00 [PATCH v2] mm/smaps: Don't access young/dirty bit if pte unpresent Peter Xu
2022-08-08  5:23 ` Huang, Ying

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).