linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Yuanchu Xie <yuanchu@google.com>
To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
	"Rafael J . Wysocki" <rafael@kernel.org>,
	 "Michael S . Tsirkin" <mst@redhat.com>,
	David Hildenbrand <david@redhat.com>,
	Jason Wang <jasowang@redhat.com>,
	 Andrew Morton <akpm@linux-foundation.org>,
	Johannes Weiner <hannes@cmpxchg.org>,
	 Michal Hocko <mhocko@kernel.org>,
	Roman Gushchin <roman.gushchin@linux.dev>,
	 Shakeel Butt <shakeelb@google.com>,
	Muchun Song <muchun.song@linux.dev>, Yu Zhao <yuzhao@google.com>,
	 Kefeng Wang <wangkefeng.wang@huawei.com>,
	Kairui Song <kasong@tencent.com>,
	 Yosry Ahmed <yosryahmed@google.com>,
	Yuanchu Xie <yuanchu@google.com>,
	 "T . J . Alumbaugh" <talumbau@google.com>
Cc: Wei Xu <weixugc@google.com>, SeongJae Park <sj@kernel.org>,
	 Sudarshan Rajagopalan <quic_sudaraja@quicinc.com>,
	kai.huang@intel.com, hch@lst.de,  jon@nutanix.com,
	Aneesh Kumar K V <aneesh.kumar@linux.ibm.com>,
	 Matthew Wilcox <willy@infradead.org>,
	Vasily Averin <vasily.averin@linux.dev>,
	 linux-kernel@vger.kernel.org,
	virtualization@lists.linux-foundation.org,  linux-mm@kvack.org,
	cgroups@vger.kernel.org
Subject: [RFC PATCH v2 2/6] mm: add working set refresh threshold to rate-limit aggregation
Date: Wed, 21 Jun 2023 18:04:50 +0000	[thread overview]
Message-ID: <20230621180454.973862-3-yuanchu@google.com> (raw)
In-Reply-To: <20230621180454.973862-1-yuanchu@google.com>

Refresh threshold is a rate limiting factor to working set
histogram reads. When a working set report is generated, a timestamp
is noted, and the same report will be read until it expires beyond
the refresh threshold, at which point a new report is generated.

Signed-off-by: T.J. Alumbaugh <talumbau@google.com>
Signed-off-by: Yuanchu Xie <yuanchu@google.com>
---
 include/linux/mmzone.h |  1 +
 include/linux/wsr.h    |  3 +++
 mm/internal.h          | 11 +++++++++
 mm/vmscan.c            | 39 +++++++++++++++++++++++++++++--
 mm/wsr.c               | 52 +++++++++++++++++++++++++++++++++++++++---
 5 files changed, 101 insertions(+), 5 deletions(-)

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 96f0d8f3584e4..bca828a16a46b 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -362,6 +362,7 @@ enum lruvec_flags {
 
 #ifndef __GENERATING_BOUNDS_H
 
+struct node;
 struct lruvec;
 struct page_vma_mapped_walk;
 
diff --git a/include/linux/wsr.h b/include/linux/wsr.h
index fa46b4d61177d..a86105468c710 100644
--- a/include/linux/wsr.h
+++ b/include/linux/wsr.h
@@ -26,6 +26,8 @@ struct ws_bin {
 struct wsr {
 	/* protects bins */
 	struct mutex bins_lock;
+	unsigned long timestamp;
+	unsigned long refresh_threshold;
 	struct ws_bin bins[MAX_NR_BINS];
 };
 
@@ -40,6 +42,7 @@ ssize_t wsr_intervals_ms_parse(char *src, struct ws_bin *bins);
 
 /*
  * wsr->bins needs to be locked
+ * refreshes wsr based on the refresh threshold
  */
 void wsr_refresh(struct wsr *wsr, struct mem_cgroup *root,
 		 struct pglist_data *pgdat);
diff --git a/mm/internal.h b/mm/internal.h
index 88dba0b11f663..ce4757e7f8277 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -186,6 +186,17 @@ int folio_isolate_lru(struct folio *folio);
 void putback_lru_page(struct page *page);
 void folio_putback_lru(struct folio *folio);
 extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
+int get_swappiness(struct lruvec *lruvec, struct scan_control *sc);
+bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
+			struct scan_control *sc, bool can_swap,
+			bool force_scan);
+
+/*
+ * in mm/wsr.c
+ */
+void refresh_wsr(struct wsr *wsr, struct mem_cgroup *root,
+		 struct pglist_data *pgdat, struct scan_control *sc,
+		 unsigned long refresh_threshold);
 
 /*
  * in mm/rmap.c:
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 150e3cd70c65e..66c5df2a7f65b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3201,7 +3201,7 @@ static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid)
 	return &pgdat->__lruvec;
 }
 
-static int get_swappiness(struct lruvec *lruvec, struct scan_control *sc)
+int get_swappiness(struct lruvec *lruvec, struct scan_control *sc)
 {
 	struct mem_cgroup *memcg = lruvec_memcg(lruvec);
 	struct pglist_data *pgdat = lruvec_pgdat(lruvec);
@@ -4402,7 +4402,7 @@ static void inc_max_seq(struct lruvec *lruvec, bool can_swap, bool force_scan)
 	spin_unlock_irq(&lruvec->lru_lock);
 }
 
-static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
+bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
 			       struct scan_control *sc, bool can_swap, bool force_scan)
 {
 	bool success;
@@ -5900,6 +5900,41 @@ static int __init init_lru_gen(void)
 };
 late_initcall(init_lru_gen);
 
+/******************************************************************************
+ *                          working set reporting
+ ******************************************************************************/
+
+#ifdef CONFIG_WSR
+void wsr_refresh(struct wsr *wsr, struct mem_cgroup *root,
+		 struct pglist_data *pgdat)
+{
+	unsigned int flags;
+	struct scan_control sc = {
+		.may_writepage = true,
+		.may_unmap = true,
+		.may_swap = true,
+		.reclaim_idx = MAX_NR_ZONES - 1,
+		.gfp_mask = GFP_KERNEL,
+	};
+
+	lockdep_assert_held(&wsr->bins_lock);
+
+	if (wsr->bins->idle_age != -1) {
+		unsigned long timestamp = READ_ONCE(wsr->timestamp);
+		unsigned long threshold = READ_ONCE(wsr->refresh_threshold);
+
+		if (time_is_before_jiffies(timestamp + threshold)) {
+			set_task_reclaim_state(current, &sc.reclaim_state);
+			flags = memalloc_noreclaim_save();
+			refresh_wsr(wsr, root, pgdat, &sc, threshold);
+			memalloc_noreclaim_restore(flags);
+			set_task_reclaim_state(current, NULL);
+		}
+	}
+}
+
+#endif /* CONFIG_WSR */
+
 #else /* !CONFIG_LRU_GEN */
 
 static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
diff --git a/mm/wsr.c b/mm/wsr.c
index 1e4c0ce69caf7..ee295d164461e 100644
--- a/mm/wsr.c
+++ b/mm/wsr.c
@@ -125,8 +125,9 @@ static void collect_wsr(struct wsr *wsr, const struct lruvec *lruvec)
 	}
 }
 
-static void refresh_wsr(struct wsr *wsr, struct mem_cgroup *root,
-			struct pglist_data *pgdat)
+void refresh_wsr(struct wsr *wsr, struct mem_cgroup *root,
+		 struct pglist_data *pgdat, struct scan_control *sc,
+		 unsigned long refresh_threshold)
 {
 	struct ws_bin *bin;
 	struct mem_cgroup *memcg;
@@ -146,6 +147,24 @@ static void refresh_wsr(struct wsr *wsr, struct mem_cgroup *root,
 	do {
 		struct lruvec *lruvec =
 			mem_cgroup_lruvec(memcg, pgdat);
+				bool can_swap = get_swappiness(lruvec, sc);
+		unsigned long max_seq = READ_ONCE((lruvec)->lrugen.max_seq);
+		unsigned long min_seq[ANON_AND_FILE] = {
+			READ_ONCE(lruvec->lrugen.min_seq[LRU_GEN_ANON]),
+			READ_ONCE(lruvec->lrugen.min_seq[LRU_GEN_FILE]),
+		};
+
+		mem_cgroup_calculate_protection(root, memcg);
+		if (!mem_cgroup_below_min(root, memcg) && refresh_threshold &&
+		    min_seq[!can_swap] + MAX_NR_GENS - 1 > max_seq) {
+			int gen = lru_gen_from_seq(max_seq);
+			unsigned long birth =
+				READ_ONCE(lruvec->lrugen.timestamps[gen]);
+
+			if (time_is_before_jiffies(birth + refresh_threshold))
+				try_to_inc_max_seq(lruvec, max_seq, sc,
+						   can_swap, false);
+		}
 
 		collect_wsr(wsr, lruvec);
 
@@ -165,6 +184,32 @@ static struct wsr *kobj_to_wsr(struct kobject *kobj)
 	return lruvec_wsr(mem_cgroup_lruvec(NULL, kobj_to_pgdat(kobj)));
 }
 
+
+static ssize_t refresh_ms_show(struct kobject *kobj, struct kobj_attribute *attr,
+			       char *buf)
+{
+	struct wsr *wsr = kobj_to_wsr(kobj);
+	unsigned long threshold = READ_ONCE(wsr->refresh_threshold);
+
+	return sysfs_emit(buf, "%u\n", jiffies_to_msecs(threshold));
+}
+
+static ssize_t refresh_ms_store(struct kobject *kobj, struct kobj_attribute *attr,
+				const char *buf, size_t len)
+{
+	unsigned int msecs;
+	struct wsr *wsr = kobj_to_wsr(kobj);
+
+	if (kstrtouint(buf, 0, &msecs))
+		return -EINVAL;
+
+	WRITE_ONCE(wsr->refresh_threshold, msecs_to_jiffies(msecs));
+
+	return len;
+}
+
+static struct kobj_attribute refresh_ms_attr = __ATTR_RW(refresh_ms);
+
 static ssize_t intervals_ms_show(struct kobject *kobj, struct kobj_attribute *attr,
 				 char *buf)
 {
@@ -227,7 +272,7 @@ static ssize_t histogram_show(struct kobject *kobj, struct kobj_attribute *attr,
 
 	mutex_lock(&wsr->bins_lock);
 
-	refresh_wsr(wsr, NULL, kobj_to_pgdat(kobj));
+	wsr_refresh(wsr, NULL, kobj_to_pgdat(kobj));
 
 	for (bin = wsr->bins; bin->idle_age != -1; bin++)
 		len += sysfs_emit_at(buf, len, "%u anon=%lu file=%lu\n",
@@ -245,6 +290,7 @@ static ssize_t histogram_show(struct kobject *kobj, struct kobj_attribute *attr,
 static struct kobj_attribute histogram_attr = __ATTR_RO(histogram);
 
 static struct attribute *wsr_attrs[] = {
+	&refresh_ms_attr.attr,
 	&intervals_ms_attr.attr,
 	&histogram_attr.attr,
 	NULL
-- 
2.41.0.162.gfafddb0af9-goog



  parent reply	other threads:[~2023-06-21 18:17 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-06-21 18:04 [RFC PATCH v2 0/6] mm: working set reporting Yuanchu Xie
2023-06-21 18:04 ` [RFC PATCH v2 1/6] mm: aggregate working set information into histograms Yuanchu Xie
2023-06-21 18:04 ` Yuanchu Xie [this message]
2023-06-21 18:04 ` [RFC PATCH v2 3/6] mm: report working set when under memory pressure Yuanchu Xie
2023-06-21 18:04 ` [RFC PATCH v2 4/6] mm: extend working set reporting to memcgs Yuanchu Xie
2023-06-21 18:04 ` [RFC PATCH v2 5/6] mm: add per-memcg reaccess histogram Yuanchu Xie
2023-06-21 18:04 ` [RFC PATCH v2 6/6] virtio-balloon: Add Working Set reporting Yuanchu Xie
2023-06-21 18:48 ` [RFC PATCH v2 0/6] mm: working set reporting Yu Zhao

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230621180454.973862-3-yuanchu@google.com \
    --to=yuanchu@google.com \
    --cc=akpm@linux-foundation.org \
    --cc=aneesh.kumar@linux.ibm.com \
    --cc=cgroups@vger.kernel.org \
    --cc=david@redhat.com \
    --cc=gregkh@linuxfoundation.org \
    --cc=hannes@cmpxchg.org \
    --cc=hch@lst.de \
    --cc=jasowang@redhat.com \
    --cc=jon@nutanix.com \
    --cc=kai.huang@intel.com \
    --cc=kasong@tencent.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mhocko@kernel.org \
    --cc=mst@redhat.com \
    --cc=muchun.song@linux.dev \
    --cc=quic_sudaraja@quicinc.com \
    --cc=rafael@kernel.org \
    --cc=roman.gushchin@linux.dev \
    --cc=shakeelb@google.com \
    --cc=sj@kernel.org \
    --cc=talumbau@google.com \
    --cc=vasily.averin@linux.dev \
    --cc=virtualization@lists.linux-foundation.org \
    --cc=wangkefeng.wang@huawei.com \
    --cc=weixugc@google.com \
    --cc=willy@infradead.org \
    --cc=yosryahmed@google.com \
    --cc=yuzhao@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).