All of lore.kernel.org
 help / color / mirror / Atom feed
From: Sahitya Tummala <stummala@codeaurora.org>
To: Alexander Polakov <apolyakov@beget.ru>,
	Andrew Morton <akpm@linux-foundation.org>,
	Vladimir Davydov <vdavydov.dev@gmail.com>,
	Jan Kara <jack@suse.cz>,
	viro@zeniv.linux.org.uk, linux-mm@kvack.org,
	linux-kernel@vger.kernel.org, linux-fsdevel@vger.kernel.org
Cc: Sahitya Tummala <stummala@codeaurora.org>
Subject: [PATCH v4 1/2] mm/list_lru.c: fix list_lru_count_node() to be race free
Date: Thu, 29 Jun 2017 09:09:15 +0530	[thread overview]
Message-ID: <1498707555-30525-1-git-send-email-stummala@codeaurora.org> (raw)
In-Reply-To: <20170628171854.t4sjyjv55j673qzv@esperanza>

list_lru_count_node() iterates over all memcgs to get
the total number of entries on the node but it can race with
memcg_drain_all_list_lrus(), which migrates the entries from
a dead cgroup to another. This can return incorrect number of
entries from list_lru_count_node().

Fix this by keeping track of entries per node and simply return
it in list_lru_count_node().

Signed-off-by: Sahitya Tummala <stummala@codeaurora.org>
---
 include/linux/list_lru.h |  1 +
 mm/list_lru.c            | 14 ++++++--------
 2 files changed, 7 insertions(+), 8 deletions(-)

diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index cb0ba9f..fa7fd03 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -44,6 +44,7 @@ struct list_lru_node {
 	/* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
 	struct list_lru_memcg	*memcg_lrus;
 #endif
+	long nr_items;
 } ____cacheline_aligned_in_smp;
 
 struct list_lru {
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 234676e..7a40fa2 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -117,6 +117,7 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item)
 		l = list_lru_from_kmem(nlru, item);
 		list_add_tail(item, &l->list);
 		l->nr_items++;
+		nlru->nr_items++;
 		spin_unlock(&nlru->lock);
 		return true;
 	}
@@ -136,6 +137,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item)
 		l = list_lru_from_kmem(nlru, item);
 		list_del_init(item);
 		l->nr_items--;
+		nlru->nr_items--;
 		spin_unlock(&nlru->lock);
 		return true;
 	}
@@ -183,15 +185,10 @@ unsigned long list_lru_count_one(struct list_lru *lru,
 
 unsigned long list_lru_count_node(struct list_lru *lru, int nid)
 {
-	long count = 0;
-	int memcg_idx;
+	struct list_lru_node *nlru;
 
-	count += __list_lru_count_one(lru, nid, -1);
-	if (list_lru_memcg_aware(lru)) {
-		for_each_memcg_cache_index(memcg_idx)
-			count += __list_lru_count_one(lru, nid, memcg_idx);
-	}
-	return count;
+	nlru = &lru->node[nid];
+	return nlru->nr_items;
 }
 EXPORT_SYMBOL_GPL(list_lru_count_node);
 
@@ -226,6 +223,7 @@ unsigned long list_lru_count_node(struct list_lru *lru, int nid)
 			assert_spin_locked(&nlru->lock);
 		case LRU_REMOVED:
 			isolated++;
+			nlru->nr_items--;
 			/*
 			 * If the lru lock has been dropped, our list
 			 * traversal is now invalid and so we have to
-- 
Qualcomm India Private Limited, on behalf of Qualcomm Innovation Center, Inc.
Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum, a Linux Foundation Collaborative Project.

WARNING: multiple messages have this Message-ID (diff)
From: Sahitya Tummala <stummala@codeaurora.org>
To: Alexander Polakov <apolyakov@beget.ru>,
	Andrew Morton <akpm@linux-foundation.org>,
	Vladimir Davydov <vdavydov.dev@gmail.com>,
	Jan Kara <jack@suse.cz>,
	viro@zeniv.linux.org.uk, linux-mm@kvack.org,
	linux-kernel@vger.kernel.org, linux-fsdevel@vger.kernel.org
Cc: Sahitya Tummala <stummala@codeaurora.org>
Subject: [PATCH v4 1/2] mm/list_lru.c: fix list_lru_count_node() to be race free
Date: Thu, 29 Jun 2017 09:09:15 +0530	[thread overview]
Message-ID: <1498707555-30525-1-git-send-email-stummala@codeaurora.org> (raw)
In-Reply-To: <20170628171854.t4sjyjv55j673qzv@esperanza>

list_lru_count_node() iterates over all memcgs to get
the total number of entries on the node but it can race with
memcg_drain_all_list_lrus(), which migrates the entries from
a dead cgroup to another. This can return incorrect number of
entries from list_lru_count_node().

Fix this by keeping track of entries per node and simply return
it in list_lru_count_node().

Signed-off-by: Sahitya Tummala <stummala@codeaurora.org>
---
 include/linux/list_lru.h |  1 +
 mm/list_lru.c            | 14 ++++++--------
 2 files changed, 7 insertions(+), 8 deletions(-)

diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index cb0ba9f..fa7fd03 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -44,6 +44,7 @@ struct list_lru_node {
 	/* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
 	struct list_lru_memcg	*memcg_lrus;
 #endif
+	long nr_items;
 } ____cacheline_aligned_in_smp;
 
 struct list_lru {
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 234676e..7a40fa2 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -117,6 +117,7 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item)
 		l = list_lru_from_kmem(nlru, item);
 		list_add_tail(item, &l->list);
 		l->nr_items++;
+		nlru->nr_items++;
 		spin_unlock(&nlru->lock);
 		return true;
 	}
@@ -136,6 +137,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item)
 		l = list_lru_from_kmem(nlru, item);
 		list_del_init(item);
 		l->nr_items--;
+		nlru->nr_items--;
 		spin_unlock(&nlru->lock);
 		return true;
 	}
@@ -183,15 +185,10 @@ unsigned long list_lru_count_one(struct list_lru *lru,
 
 unsigned long list_lru_count_node(struct list_lru *lru, int nid)
 {
-	long count = 0;
-	int memcg_idx;
+	struct list_lru_node *nlru;
 
-	count += __list_lru_count_one(lru, nid, -1);
-	if (list_lru_memcg_aware(lru)) {
-		for_each_memcg_cache_index(memcg_idx)
-			count += __list_lru_count_one(lru, nid, memcg_idx);
-	}
-	return count;
+	nlru = &lru->node[nid];
+	return nlru->nr_items;
 }
 EXPORT_SYMBOL_GPL(list_lru_count_node);
 
@@ -226,6 +223,7 @@ unsigned long list_lru_count_node(struct list_lru *lru, int nid)
 			assert_spin_locked(&nlru->lock);
 		case LRU_REMOVED:
 			isolated++;
+			nlru->nr_items--;
 			/*
 			 * If the lru lock has been dropped, our list
 			 * traversal is now invalid and so we have to
-- 
Qualcomm India Private Limited, on behalf of Qualcomm Innovation Center, Inc.
Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum, a Linux Foundation Collaborative Project.

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  reply	other threads:[~2017-06-29  3:39 UTC|newest]

Thread overview: 38+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-06-12  0:47 [PATCH] mm/list_lru.c: use cond_resched_lock() for nlru->lock Sahitya Tummala
2017-06-12  0:47 ` Sahitya Tummala
2017-06-12 13:11 ` Jan Kara
2017-06-12 13:11   ` Jan Kara
2017-06-15 21:05 ` Andrew Morton
2017-06-15 21:05   ` Andrew Morton
2017-06-16 14:44   ` Sahitya Tummala
2017-06-16 14:44     ` Sahitya Tummala
2017-06-17 11:14   ` Vladimir Davydov
2017-06-17 11:14     ` Vladimir Davydov
2017-06-20  2:52     ` Sahitya Tummala
2017-06-20  2:52       ` Sahitya Tummala
2017-06-21  6:39       ` [PATCH v2] fs/dcache.c: fix spin lockup issue on nlru->lock Sahitya Tummala
2017-06-21  6:39         ` Sahitya Tummala
2017-06-21 16:31         ` Vladimir Davydov
2017-06-21 16:31           ` Vladimir Davydov
2017-06-22 16:31           ` Sahitya Tummala
2017-06-22 16:31             ` Sahitya Tummala
2017-06-22 17:49             ` Vladimir Davydov
2017-06-22 17:49               ` Vladimir Davydov
2017-06-28  6:07               ` [PATCH v3 1/2] mm/list_lru.c: fix list_lru_count_node() to be race free Sahitya Tummala
2017-06-28  6:07                 ` Sahitya Tummala
2017-06-28  6:07                 ` [PATCH v3 2/2] fs/dcache.c: fix spin lockup issue on nlru->lock Sahitya Tummala
2017-06-28  6:07                   ` Sahitya Tummala
2017-06-28 17:18                 ` [PATCH v3 1/2] mm/list_lru.c: fix list_lru_count_node() to be race free Vladimir Davydov
2017-06-28 17:18                   ` Vladimir Davydov
2017-06-29  3:39                   ` Sahitya Tummala [this message]
2017-06-29  3:39                     ` [PATCH v4 " Sahitya Tummala
2017-07-01 16:28                     ` Vladimir Davydov
2017-07-01 16:28                       ` Vladimir Davydov
2017-06-29  3:39                   ` [PATCH v4 2/2] fs/dcache.c: fix spin lockup issue on nlru->lock Sahitya Tummala
2017-06-29  3:39                     ` Sahitya Tummala
2017-06-29 22:48                     ` Andrew Morton
2017-06-29 22:48                       ` Andrew Morton
2017-06-30  3:16                       ` Sahitya Tummala
2017-06-30  3:16                         ` Sahitya Tummala
2017-07-01 16:28                     ` Vladimir Davydov
2017-07-01 16:28                       ` Vladimir Davydov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1498707555-30525-1-git-send-email-stummala@codeaurora.org \
    --to=stummala@codeaurora.org \
    --cc=akpm@linux-foundation.org \
    --cc=apolyakov@beget.ru \
    --cc=jack@suse.cz \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=vdavydov.dev@gmail.com \
    --cc=viro@zeniv.linux.org.uk \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.