All of lore.kernel.org
 help / color / mirror / Atom feed
* + mm-proc-smaps_rollup-fix-pss_locked-calculation.patch added to -mm tree
@ 2019-02-08 22:18 akpm
  0 siblings, 0 replies; 2+ messages in thread
From: akpm @ 2019-02-08 22:18 UTC (permalink / raw)
  To: mm-commits, vbabka, stable, dancol, adobriyan, sspatil


The patch titled
     Subject: mm: proc: smaps_rollup: fix pss_locked calculation
has been added to the -mm tree.  Its filename is
     mm-proc-smaps_rollup-fix-pss_locked-calculation.patch

This patch should soon appear at
    http://ozlabs.org/~akpm/mmots/broken-out/mm-proc-smaps_rollup-fix-pss_locked-calculation.patch
and later at
    http://ozlabs.org/~akpm/mmotm/broken-out/mm-proc-smaps_rollup-fix-pss_locked-calculation.patch

Before you just go and hit "reply", please:
   a) Consider who else should be cc'ed
   b) Prefer to cc a suitable mailing list as well
   c) Ideally: find the original patch on the mailing list and do a
      reply-to-all to that, adding suitable additional cc's

*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***

The -mm tree is included into linux-next and is updated
there every 3-4 working days

------------------------------------------------------
From: Sandeep Patil <sspatil@android.com>
Subject: mm: proc: smaps_rollup: fix pss_locked calculation

The 'pss_locked' field of smaps_rollup was being calculated incorrectly. 
It accumulated the current pss everytime a locked VMA was found.  Fix that
by adding to 'pss_locked' the same time as that of 'pss' if the vma being
walked is locked.

Link: http://lkml.kernel.org/r/20190203065425.14650-1-sspatil@android.com
Fixes: 493b0e9d945f ("mm: add /proc/pid/smaps_rollup")
Signed-off-by: Sandeep Patil <sspatil@android.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Daniel Colascione <dancol@google.com>
Cc: <stable@vger.kernel.org>	[4.14.x, 4.19.x]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---

 fs/proc/task_mmu.c |   22 ++++++++++++++--------
 1 file changed, 14 insertions(+), 8 deletions(-)

--- a/fs/proc/task_mmu.c~mm-proc-smaps_rollup-fix-pss_locked-calculation
+++ a/fs/proc/task_mmu.c
@@ -423,7 +423,7 @@ struct mem_size_stats {
 };
 
 static void smaps_account(struct mem_size_stats *mss, struct page *page,
-		bool compound, bool young, bool dirty)
+		bool compound, bool young, bool dirty, bool locked)
 {
 	int i, nr = compound ? 1 << compound_order(page) : 1;
 	unsigned long size = nr * PAGE_SIZE;
@@ -450,24 +450,31 @@ static void smaps_account(struct mem_siz
 		else
 			mss->private_clean += size;
 		mss->pss += (u64)size << PSS_SHIFT;
+		if (locked)
+			mss->pss_locked += (u64)size << PSS_SHIFT;
 		return;
 	}
 
 	for (i = 0; i < nr; i++, page++) {
 		int mapcount = page_mapcount(page);
+		unsigned long pss = (PAGE_SIZE << PSS_SHIFT);
 
 		if (mapcount >= 2) {
 			if (dirty || PageDirty(page))
 				mss->shared_dirty += PAGE_SIZE;
 			else
 				mss->shared_clean += PAGE_SIZE;
-			mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
+			mss->pss += pss / mapcount;
+			if (locked)
+				mss->pss_locked += pss / mapcount;
 		} else {
 			if (dirty || PageDirty(page))
 				mss->private_dirty += PAGE_SIZE;
 			else
 				mss->private_clean += PAGE_SIZE;
-			mss->pss += PAGE_SIZE << PSS_SHIFT;
+			mss->pss += pss;
+			if (locked)
+				mss->pss_locked += pss;
 		}
 	}
 }
@@ -490,6 +497,7 @@ static void smaps_pte_entry(pte_t *pte,
 {
 	struct mem_size_stats *mss = walk->private;
 	struct vm_area_struct *vma = walk->vma;
+	bool locked = !!(vma->vm_flags & VM_LOCKED);
 	struct page *page = NULL;
 
 	if (pte_present(*pte)) {
@@ -532,7 +540,7 @@ static void smaps_pte_entry(pte_t *pte,
 	if (!page)
 		return;
 
-	smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte));
+	smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked);
 }
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -541,6 +549,7 @@ static void smaps_pmd_entry(pmd_t *pmd,
 {
 	struct mem_size_stats *mss = walk->private;
 	struct vm_area_struct *vma = walk->vma;
+	bool locked = !!(vma->vm_flags & VM_LOCKED);
 	struct page *page;
 
 	/* FOLL_DUMP will return -EFAULT on huge zero page */
@@ -555,7 +564,7 @@ static void smaps_pmd_entry(pmd_t *pmd,
 		/* pass */;
 	else
 		VM_BUG_ON_PAGE(1, page);
-	smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd));
+	smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked);
 }
 #else
 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
@@ -737,11 +746,8 @@ static void smap_gather_stats(struct vm_
 		}
 	}
 #endif
-
 	/* mmap_sem is held in m_start */
 	walk_page_vma(vma, &smaps_walk);
-	if (vma->vm_flags & VM_LOCKED)
-		mss->pss_locked += mss->pss;
 }
 
 #define SEQ_PUT_DEC(str, val) \
_

Patches currently in -mm which might be from sspatil@android.com are

mm-proc-smaps_rollup-fix-pss_locked-calculation.patch


^ permalink raw reply	[flat|nested] 2+ messages in thread

* + mm-proc-smaps_rollup-fix-pss_locked-calculation.patch added to -mm tree
@ 2019-01-29  0:15 akpm
  0 siblings, 0 replies; 2+ messages in thread
From: akpm @ 2019-01-29  0:15 UTC (permalink / raw)
  To: mm-commits, vbabka, stable, dancol, avagin, adobriyan, sspatil


The patch titled
     Subject: fs/proc/task_mmu.c: fix smaps_rollup pss_locked calculation
has been added to the -mm tree.  Its filename is
     mm-proc-smaps_rollup-fix-pss_locked-calculation.patch

This patch should soon appear at
    http://ozlabs.org/~akpm/mmots/broken-out/mm-proc-smaps_rollup-fix-pss_locked-calculation.patch
and later at
    http://ozlabs.org/~akpm/mmotm/broken-out/mm-proc-smaps_rollup-fix-pss_locked-calculation.patch

Before you just go and hit "reply", please:
   a) Consider who else should be cc'ed
   b) Prefer to cc a suitable mailing list as well
   c) Ideally: find the original patch on the mailing list and do a
      reply-to-all to that, adding suitable additional cc's

*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***

The -mm tree is included into linux-next and is updated
there every 3-4 working days

------------------------------------------------------
From: Sandeep Patil <sspatil@android.com>
Subject: fs/proc/task_mmu.c: fix smaps_rollup pss_locked calculation

The 'pss_locked' field of smaps_rollup was being calculated incorrectly as
it accumulated the current pss everytime a locked VMA was found.

Fix that by making sure we record the current pss value before each VMA is
walked.  So, we can only add the delta if the VMA was found to be
VM_LOCKED.

Link: http://lkml.kernel.org/r/20190121011049.160505-1-sspatil@android.com
Fixes: 493b0e9d945f ("mm: add /proc/pid/smaps_rollup")
Signed-off-by: Sandeep Patil <sspatil@android.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Andrey Vagin <avagin@openvz.org>
Cc: Daniel Colascione <dancol@google.com>
Cc: <stable@vger.kernel.org>	[4.14.x 4.19.x]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---

 fs/proc/task_mmu.c |    6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

--- a/fs/proc/task_mmu.c~mm-proc-smaps_rollup-fix-pss_locked-calculation
+++ a/fs/proc/task_mmu.c
@@ -721,6 +721,7 @@ static void smap_gather_stats(struct vm_
 #endif
 		.mm = vma->vm_mm,
 	};
+	unsigned long pss;
 
 	smaps_walk.private = mss;
 
@@ -749,11 +750,12 @@ static void smap_gather_stats(struct vm_
 		}
 	}
 #endif
-
+	/* record current pss so we can calculate the delta after page walk */
+	pss = mss->pss;
 	/* mmap_sem is held in m_start */
 	walk_page_vma(vma, &smaps_walk);
 	if (vma->vm_flags & VM_LOCKED)
-		mss->pss_locked += mss->pss;
+		mss->pss_locked += mss->pss - pss;
 }
 
 #define SEQ_PUT_DEC(str, val) \
_

Patches currently in -mm which might be from sspatil@android.com are

mm-proc-smaps_rollup-fix-pss_locked-calculation.patch


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2019-02-08 22:18 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-02-08 22:18 + mm-proc-smaps_rollup-fix-pss_locked-calculation.patch added to -mm tree akpm
  -- strict thread matches above, loose matches on Subject: below --
2019-01-29  0:15 akpm

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.