All of lore.kernel.org
 help / color / mirror / Atom feed
From: Zhongkun He <hezhongkun.hzk@bytedance.com>
To: minchan@kernel.org, senozhatsky@chromium.org, mhocko@suse.com
Cc: david@redhat.com, yosryahmed@google.com, linux-mm@kvack.org,
	linux-kernel@vger.kernel.org,
	Zhongkun He <hezhongkun.hzk@bytedance.com>
Subject: [RFC PATCH v2 2/2] zram: charge the compressed RAM to the page's memcgroup
Date: Mon, 24 Jul 2023 14:22:19 +0800	[thread overview]
Message-ID: <20230724062219.2244240-1-hezhongkun.hzk@bytedance.com> (raw)

The compressed RAM is currently charged to kernel, not to
any memory cgroup. This patch can charge the pages
regardless of direct or indirect zram usage.

Direct zram usage by process within a cgroup will fail
to charge if there is no memory. Indirect zram usage by
process within a cgroup via swap in PF_MEMALLOC context,
wqwill charge successfully.

This allows some limit overrun, but not enough to matter
in practice.Charge compressed page once, mean a page will
be freed.the size of compressed page is less than or equal
to the page to be freed. The numbers of excess depend on the
compression ratio only. The maximum amount will not exceed
400KB, and will be smaller than the hard limit finally, 
So not an unbounded way.

Signed-off-by: Zhongkun He <hezhongkun.hzk@bytedance.com>
---
 drivers/block/zram/zram_drv.c | 46 +++++++++++++++++++++++++++++++++++
 drivers/block/zram/zram_drv.h |  1 +
 2 files changed, 47 insertions(+)

diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 5676e6dd5b16..1e685a430c95 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -33,6 +33,7 @@
 #include <linux/debugfs.h>
 #include <linux/cpuhotplug.h>
 #include <linux/part_stat.h>
+#include <linux/memcontrol.h>
 
 #include "zram_drv.h"
 
@@ -135,6 +136,18 @@ static void zram_set_obj_size(struct zram *zram,
 	zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size;
 }
 
+static inline void zram_set_obj_cgroup(struct zram *zram, u32 index,
+					struct obj_cgroup *objcg)
+{
+	zram->table[index].objcg = objcg;
+}
+
+static inline struct obj_cgroup *zram_get_obj_cgroup(struct zram *zram,
+					u32 index)
+{
+	return zram->table[index].objcg;
+}
+
 static inline bool zram_allocated(struct zram *zram, u32 index)
 {
 	return zram_get_obj_size(zram, index) ||
@@ -1256,6 +1269,7 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize)
 static void zram_free_page(struct zram *zram, size_t index)
 {
 	unsigned long handle;
+	struct obj_cgroup *objcg;
 
 #ifdef CONFIG_ZRAM_MEMORY_TRACKING
 	zram->table[index].ac_time = 0;
@@ -1289,6 +1303,13 @@ static void zram_free_page(struct zram *zram, size_t index)
 		goto out;
 	}
 
+	objcg = zram_get_obj_cgroup(zram, index);
+	if (objcg) {
+		obj_cgroup_uncharge_zram(objcg, zram_get_obj_size(zram, index));
+		obj_cgroup_put(objcg);
+		zram_set_obj_cgroup(zram, index, NULL);
+	}
+
 	handle = zram_get_handle(zram, index);
 	if (!handle)
 		return;
@@ -1419,6 +1440,7 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
 	struct zcomp_strm *zstrm;
 	unsigned long element = 0;
 	enum zram_pageflags flags = 0;
+	struct obj_cgroup *objcg;
 
 	mem = kmap_atomic(page);
 	if (page_same_filled(mem, &element)) {
@@ -1494,6 +1516,14 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
 		return -ENOMEM;
 	}
 
+	objcg = get_obj_cgroup_from_page(page);
+	if (objcg && obj_cgroup_charge_zram(objcg, comp_len)) {
+		zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
+		zs_free(zram->mem_pool, handle);
+		obj_cgroup_put(objcg);
+		return -ENOMEM;
+	}
+
 	dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
 
 	src = zstrm->buffer;
@@ -1526,6 +1556,7 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
 	}  else {
 		zram_set_handle(zram, index, handle);
 		zram_set_obj_size(zram, index, comp_len);
+		zram_set_obj_cgroup(zram, index, objcg);
 	}
 	zram_slot_unlock(zram, index);
 
@@ -1575,6 +1606,7 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
 			   u32 threshold, u32 prio, u32 prio_max)
 {
 	struct zcomp_strm *zstrm = NULL;
+	struct obj_cgroup *objcg;
 	unsigned long handle_old;
 	unsigned long handle_new;
 	unsigned int comp_len_old;
@@ -1669,6 +1701,17 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
 	if (threshold && comp_len_new >= threshold)
 		return 0;
 
+	objcg  = zram_get_obj_cgroup(zram, index);
+	if (objcg) {
+		obj_cgroup_get(objcg);
+		if (obj_cgroup_charge_zram(objcg, GFP_KERNEL, comp_len_new)) {
+			zcomp_stream_put(zram->comps[prio]);
+			obj_cgroup_put(objcg);
+			return -ENOMEM;
+		}
+	}
+
 	/*
 	 * No direct reclaim (slow path) for handle allocation and no
 	 * re-compression attempt (unlike in zram_write_bvec()) since
@@ -1683,6 +1726,8 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
 			       __GFP_MOVABLE);
 	if (IS_ERR_VALUE(handle_new)) {
 		zcomp_stream_put(zram->comps[prio]);
+		obj_cgroup_uncharge_zram(objcg, comp_len_new);
+		obj_cgroup_put(objcg);
 		return PTR_ERR((void *)handle_new);
 	}
 
@@ -1696,6 +1741,7 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page,
 	zram_set_handle(zram, index, handle_new);
 	zram_set_obj_size(zram, index, comp_len_new);
 	zram_set_priority(zram, index, prio);
+	zram_set_obj_cgroup(zram, index, objcg);
 
 	atomic64_add(comp_len_new, &zram->stats.compr_data_size);
 	atomic64_inc(&zram->stats.pages_stored);
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index ca7a15bd4845..959d721d5474 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -72,6 +72,7 @@ struct zram_table_entry {
 #ifdef CONFIG_ZRAM_MEMORY_TRACKING
 	ktime_t ac_time;
 #endif
+	struct obj_cgroup *objcg;
 };
 
 struct zram_stats {
-- 
2.25.1


                 reply	other threads:[~2023-07-24  6:28 UTC|newest]

Thread overview: [no followups] expand[flat|nested]  mbox.gz  Atom feed

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230724062219.2244240-1-hezhongkun.hzk@bytedance.com \
    --to=hezhongkun.hzk@bytedance.com \
    --cc=david@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mhocko@suse.com \
    --cc=minchan@kernel.org \
    --cc=senozhatsky@chromium.org \
    --cc=yosryahmed@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.