linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Michal Hocko <mhocko@suse.cz>
To: Johannes Weiner <hannes@cmpxchg.org>
Cc: Andrew Morton <akpm@linux-foundation.org>,
	Jianyu Zhan <nasa4836@gmail.com>,
	bsingharora@gmail.com, kamezawa.hiroyu@jp.fujitsu.com,
	cgroups@vger.kernel.org, linux-mm@kvack.org,
	linux-kernel@vger.kernel.org, akpm@linux-foundation.com
Subject: Re: [PATCH 2/2] mm/memcontrol.c: introduce helper mem_cgroup_zoneinfo_zone()
Date: Mon, 5 May 2014 17:31:44 +0200	[thread overview]
Message-ID: <20140505153144.GD32598@dhcp22.suse.cz> (raw)
In-Reply-To: <20140502232908.GQ23420@cmpxchg.org>

On Fri 02-05-14 19:29:08, Johannes Weiner wrote:
[...]
> From: Jianyu Zhan <nasa4836@gmail.com>
> Subject: [patch] mm: memcontrol: clean up memcg zoneinfo lookup
> 
> Memcg zoneinfo lookup sites have either the page, the zone, or the
> node id and zone index, but sites that only have the zone have to look
> up the node id and zone index themselves, whereas sites that already
> have those two integers use a function for a simple pointer chase.
> 
> Provide mem_cgroup_zone_zoneinfo() that takes a zone pointer and let
> sites that already have node id and zone index - all for each node,
> for each zone iterators - use &memcg->nodeinfo[nid]->zoneinfo[zid].
> 
> Rename page_cgroup_zoneinfo() to mem_cgroup_page_zoneinfo() to match.
> 
> Signed-off-by: Jianyu Zhan <nasa4836@gmail.com>
> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>

OK, this looks better. The naming is more descriptive and it even
removes some code. Good. The opencoded zoneinfo dereference is not that
nice but I guess I can live with it.

Acked-by: Michal Hocko <mhocko@suse.cz>

> ---
>  mm/memcontrol.c | 89 +++++++++++++++++++++++++--------------------------------
>  1 file changed, 39 insertions(+), 50 deletions(-)
> 
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 29501f040568..83cbd5a0e62f 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -677,9 +677,11 @@ static void disarm_static_keys(struct mem_cgroup *memcg)
>  static void drain_all_stock_async(struct mem_cgroup *memcg);
>  
>  static struct mem_cgroup_per_zone *
> -mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid)
> +mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone)
>  {
> -	VM_BUG_ON((unsigned)nid >= nr_node_ids);
> +	int nid = zone_to_nid(zone);
> +	int zid = zone_idx(zone);
> +
>  	return &memcg->nodeinfo[nid]->zoneinfo[zid];
>  }
>  
> @@ -689,12 +691,12 @@ struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
>  }
>  
>  static struct mem_cgroup_per_zone *
> -page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page)
> +mem_cgroup_page_zoneinfo(struct mem_cgroup *memcg, struct page *page)
>  {
>  	int nid = page_to_nid(page);
>  	int zid = page_zonenum(page);
>  
> -	return mem_cgroup_zoneinfo(memcg, nid, zid);
> +	return &memcg->nodeinfo[nid]->zoneinfo[zid];
>  }
>  
>  static struct mem_cgroup_tree_per_zone *
> @@ -773,16 +775,14 @@ static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
>  	unsigned long long excess;
>  	struct mem_cgroup_per_zone *mz;
>  	struct mem_cgroup_tree_per_zone *mctz;
> -	int nid = page_to_nid(page);
> -	int zid = page_zonenum(page);
> -	mctz = soft_limit_tree_from_page(page);
>  
> +	mctz = soft_limit_tree_from_page(page);
>  	/*
>  	 * Necessary to update all ancestors when hierarchy is used.
>  	 * because their event counter is not touched.
>  	 */
>  	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
> -		mz = mem_cgroup_zoneinfo(memcg, nid, zid);
> +		mz = mem_cgroup_page_zoneinfo(memcg, page);
>  		excess = res_counter_soft_limit_excess(&memcg->res);
>  		/*
>  		 * We have to update the tree if mz is on RB-tree or
> @@ -805,14 +805,14 @@ static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
>  
>  static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
>  {
> -	int node, zone;
> -	struct mem_cgroup_per_zone *mz;
>  	struct mem_cgroup_tree_per_zone *mctz;
> +	struct mem_cgroup_per_zone *mz;
> +	int nid, zid;
>  
> -	for_each_node(node) {
> -		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
> -			mz = mem_cgroup_zoneinfo(memcg, node, zone);
> -			mctz = soft_limit_tree_node_zone(node, zone);
> +	for_each_node(nid) {
> +		for (zid = 0; zid < MAX_NR_ZONES; zid++) {
> +			mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
> +			mctz = soft_limit_tree_node_zone(nid, zid);
>  			mem_cgroup_remove_exceeded(memcg, mz, mctz);
>  		}
>  	}
> @@ -947,8 +947,7 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
>  	__this_cpu_add(memcg->stat->nr_page_events, nr_pages);
>  }
>  
> -unsigned long
> -mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
> +unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
>  {
>  	struct mem_cgroup_per_zone *mz;
>  
> @@ -956,46 +955,38 @@ mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
>  	return mz->lru_size[lru];
>  }
>  
> -static unsigned long
> -mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
> -			unsigned int lru_mask)
> -{
> -	struct mem_cgroup_per_zone *mz;
> -	enum lru_list lru;
> -	unsigned long ret = 0;
> -
> -	mz = mem_cgroup_zoneinfo(memcg, nid, zid);
> -
> -	for_each_lru(lru) {
> -		if (BIT(lru) & lru_mask)
> -			ret += mz->lru_size[lru];
> -	}
> -	return ret;
> -}
> -
> -static unsigned long
> -mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
> -			int nid, unsigned int lru_mask)
> +static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
> +						  int nid,
> +						  unsigned int lru_mask)
>  {
> -	u64 total = 0;
> +	unsigned long nr = 0;
>  	int zid;
>  
> -	for (zid = 0; zid < MAX_NR_ZONES; zid++)
> -		total += mem_cgroup_zone_nr_lru_pages(memcg,
> -						nid, zid, lru_mask);
> +	VM_BUG_ON((unsigned)nid >= nr_node_ids);
> +
> +	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
> +		struct mem_cgroup_per_zone *mz;
> +		enum lru_list lru;
>  
> -	return total;
> +		for_each_lru(lru) {
> +			if (!(BIT(lru) & lru_mask))
> +				continue;
> +			mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
> +			nr += mz->lru_size[lru];
> +		}
> +	}
> +	return nr;
>  }
>  
>  static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
>  			unsigned int lru_mask)
>  {
> +	unsigned long nr = 0;
>  	int nid;
> -	u64 total = 0;
>  
>  	for_each_node_state(nid, N_MEMORY)
> -		total += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
> -	return total;
> +		nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
> +	return nr;
>  }
>  
>  static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
> @@ -1234,11 +1225,9 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
>  		int uninitialized_var(seq);
>  
>  		if (reclaim) {
> -			int nid = zone_to_nid(reclaim->zone);
> -			int zid = zone_idx(reclaim->zone);
>  			struct mem_cgroup_per_zone *mz;
>  
> -			mz = mem_cgroup_zoneinfo(root, nid, zid);
> +			mz = mem_cgroup_zone_zoneinfo(root, reclaim->zone);
>  			iter = &mz->reclaim_iter[reclaim->priority];
>  			if (prev && reclaim->generation != iter->generation) {
>  				iter->last_visited = NULL;
> @@ -1345,7 +1334,7 @@ struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
>  		goto out;
>  	}
>  
> -	mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone));
> +	mz = mem_cgroup_zone_zoneinfo(memcg, zone);
>  	lruvec = &mz->lruvec;
>  out:
>  	/*
> @@ -1404,7 +1393,7 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
>  	if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup)
>  		pc->mem_cgroup = memcg = root_mem_cgroup;
>  
> -	mz = page_cgroup_zoneinfo(memcg, page);
> +	mz = mem_cgroup_page_zoneinfo(memcg, page);
>  	lruvec = &mz->lruvec;
>  out:
>  	/*
> @@ -5412,7 +5401,7 @@ static int memcg_stat_show(struct seq_file *m, void *v)
>  
>  		for_each_online_node(nid)
>  			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
> -				mz = mem_cgroup_zoneinfo(memcg, nid, zid);
> +				mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
>  				rstat = &mz->lruvec.reclaim_stat;
>  
>  				recent_rotated[0] += rstat->recent_rotated[0];
> -- 
> 1.9.2
> 
> 

-- 
Michal Hocko
SUSE Labs

  parent reply	other threads:[~2014-05-05 15:31 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-04-18 23:01 [PATCH 2/2] mm/memcontrol.c: introduce helper mem_cgroup_zoneinfo_zone() Jianyu Zhan
2014-04-22  9:59 ` Michal Hocko
2014-04-28 15:04   ` Michal Hocko
2014-05-01 12:54     ` Johannes Weiner
2014-05-01 13:36       ` Johannes Weiner
2014-05-02 22:05       ` Andrew Morton
2014-05-02 23:29         ` Johannes Weiner
2014-05-02 23:31           ` Johannes Weiner
2014-05-05 15:35             ` Michal Hocko
2014-05-05 15:31           ` Michal Hocko [this message]
2014-05-05 21:36           ` Andrew Morton

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20140505153144.GD32598@dhcp22.suse.cz \
    --to=mhocko@suse.cz \
    --cc=akpm@linux-foundation.com \
    --cc=akpm@linux-foundation.org \
    --cc=bsingharora@gmail.com \
    --cc=cgroups@vger.kernel.org \
    --cc=hannes@cmpxchg.org \
    --cc=kamezawa.hiroyu@jp.fujitsu.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=nasa4836@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).