linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
To: mhocko@kernel.org, vdavydov@virtuozzo.com
Cc: akpm@linux-foundation.org, rientjes@google.com,
	hannes@cmpxchg.org, linux-mm@kvack.org,
	linux-kernel@vger.kernel.org
Subject: Re: [PATCH 2/2] mm: oom: deduplicate victim selection code for memcg and global oom
Date: Wed, 8 Jun 2016 20:18:24 +0900	[thread overview]
Message-ID: <201606082018.EDC09327.HMQOFOVJFSOFtL@I-love.SAKURA.ne.jp> (raw)
In-Reply-To: <20160608083334.GF22570@dhcp22.suse.cz>

Michal Hocko wrote:
> The victim selection code can be reduced because it is basically
> shared between the two, only the iterator differs. But I guess that
> can be eliminated by a simple helper.

Thank you for CC: me. I like this clean up.

> ---
>  include/linux/oom.h |  5 +++++
>  mm/memcontrol.c     | 47 ++++++-----------------------------------
>  mm/oom_kill.c       | 60 ++++++++++++++++++++++++++++-------------------------
>  3 files changed, 43 insertions(+), 69 deletions(-)

I think we can apply your version with below changes folded into your version.
(I think totalpages argument can be passed via oom_control as well. Also, according to
http://lkml.kernel.org/r/201602192336.EJF90671.HMFLFSVOFJOtOQ@I-love.SAKURA.ne.jp ,
we can safely replace oc->memcg in oom_badness() in oom_evaluate_task() with NULL. )

 include/linux/oom.h |   10 ----------
 mm/memcontrol.c     |    7 +++++--
 mm/oom_kill.c       |   14 ++++++++++++--
 3 files changed, 17 insertions(+), 14 deletions(-)

diff --git a/include/linux/oom.h b/include/linux/oom.h
index 7b3eb25..77e98a0 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -49,13 +49,6 @@ enum oom_constraint {
 	CONSTRAINT_MEMCG,
 };
 
-enum oom_scan_t {
-	OOM_SCAN_OK,		/* scan thread and find its badness */
-	OOM_SCAN_CONTINUE,	/* do not consider thread for oom kill */
-	OOM_SCAN_ABORT,		/* abort the iteration and return */
-	OOM_SCAN_SELECT,	/* always select this thread first */
-};
-
 extern struct mutex oom_lock;
 
 static inline void set_current_oom_origin(void)
@@ -96,9 +89,6 @@ extern void oom_kill_process(struct oom_control *oc, struct task_struct *p,
 extern void check_panic_on_oom(struct oom_control *oc,
 			       enum oom_constraint constraint);
 
-extern enum oom_scan_t oom_scan_process_thread(struct oom_control *oc,
-					       struct task_struct *task);
-
 extern bool out_of_memory(struct oom_control *oc);
 
 extern void exit_oom_victim(struct task_struct *tsk);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9c51b4d..f3482a2 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1288,12 +1288,15 @@ static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
 
 		css_task_iter_start(&iter->css, &it);
 		while ((task = css_task_iter_next(&it)))
-			if (!oom_evaluate_task(&oc, task, totalpages))
+			if (!oom_evaluate_task(&oc, task, totalpages)) {
+				css_task_iter_end(&it);
+				mem_cgroup_iter_break(memcg, iter);
 				break;
+			}
 		css_task_iter_end(&it);
 	}
 
-	if (oc.chosen) {
+	if (oc.chosen && oc.chosen != (void *) -1UL) {
 		points = oc.chosen_points * 1000 / totalpages;
 		oom_kill_process(&oc, oc.chosen, points, totalpages,
 				 "Memory cgroup out of memory");
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index bce3ea2..f634bca 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -273,8 +273,15 @@ static enum oom_constraint constrained_alloc(struct oom_control *oc,
 }
 #endif
 
-enum oom_scan_t oom_scan_process_thread(struct oom_control *oc,
-					struct task_struct *task)
+enum oom_scan_t {
+	OOM_SCAN_OK,		/* scan thread and find its badness */
+	OOM_SCAN_CONTINUE,	/* do not consider thread for oom kill */
+	OOM_SCAN_ABORT,		/* abort the iteration and return */
+	OOM_SCAN_SELECT,	/* always select this thread first */
+};
+
+static enum oom_scan_t oom_scan_process_thread(struct oom_control *oc,
+					       struct task_struct *task)
 {
 	if (oom_unkillable_task(task, NULL, oc->nodemask))
 		return OOM_SCAN_CONTINUE;
@@ -307,6 +314,9 @@ int oom_evaluate_task(struct oom_control *oc, struct task_struct *p, unsigned lo
 	case OOM_SCAN_CONTINUE:
 		return 1;
 	case OOM_SCAN_ABORT:
+		if (oc->chosen)
+			put_task_struct(oc->chosen);
+		oc->chosen = (void *) -1UL;
 		return 0;
 	case OOM_SCAN_OK:
 		break;

  reply	other threads:[~2016-06-08 11:18 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-05-27 14:17 [PATCH 1/2] mm: oom: add memcg to oom_control Vladimir Davydov
2016-05-27 14:17 ` [PATCH 2/2] mm: oom: deduplicate victim selection code for memcg and global oom Vladimir Davydov
2016-05-27 14:26   ` Michal Hocko
2016-05-27 14:45     ` Vladimir Davydov
2016-05-27 14:52       ` Michal Hocko
2016-05-27 17:23   ` Johannes Weiner
2016-06-08  8:33   ` Michal Hocko
2016-06-08 11:18     ` Tetsuo Handa [this message]
2016-06-08 11:24       ` Tetsuo Handa
2016-06-08 14:21       ` Michal Hocko
2016-06-08 13:52     ` Vladimir Davydov
2016-06-08 14:46       ` Michal Hocko
2016-05-27 14:34 ` [PATCH 1/2] mm: oom: add memcg to oom_control Michal Hocko
2016-05-27 17:20 ` Johannes Weiner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=201606082018.EDC09327.HMQOFOVJFSOFtL@I-love.SAKURA.ne.jp \
    --to=penguin-kernel@i-love.sakura.ne.jp \
    --cc=akpm@linux-foundation.org \
    --cc=hannes@cmpxchg.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mhocko@kernel.org \
    --cc=rientjes@google.com \
    --cc=vdavydov@virtuozzo.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).