From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1760401AbcDEVqJ (ORCPT ); Tue, 5 Apr 2016 17:46:09 -0400 Received: from mail-pa0-f51.google.com ([209.85.220.51]:36076 "EHLO mail-pa0-f51.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1760242AbcDEVqH (ORCPT ); Tue, 5 Apr 2016 17:46:07 -0400 Date: Tue, 5 Apr 2016 14:46:03 -0700 (PDT) From: Hugh Dickins X-X-Sender: hugh@eggly.anvils To: Andrew Morton cc: "Kirill A. Shutemov" , Andrea Arcangeli , Andres Lagar-Cavilla , Yang Shi , Ning Qu , Johannes Weiner , Michal Hocko , linux-kernel@vger.kernel.org, linux-mm@kvack.org Subject: [PATCH 19/31] huge tmpfs: mem_cgroup shmem_pmdmapped accounting In-Reply-To: Message-ID: References: User-Agent: Alpine 2.11 (LSU 23 2013-08-11) MIME-Version: 1.0 Content-Type: TEXT/PLAIN; charset=US-ASCII Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org From: Andres Lagar-Cavilla Grep now for shmem_pmdmapped in memory.stat (and also for "total_..." in a hierarchical setting). This metric allows for easy checking on a per-cgroup basis of the amount of page team memory hugely mapped (at least once) out there. The metric is counted towards the cgroup owning the page (unlike in an event such as THP split) because the team page may be mapped hugely for the first time via a shared map in some other process. Moved up mem_group_move_account()'s PageWriteback block: that movement is irrelevant to this patch, but lets us concentrate better on the PageTeam locking issues which follow in the next patch. Signed-off-by: Andres Lagar-Cavilla Signed-off-by: Hugh Dickins --- include/linux/memcontrol.h | 2 ++ include/linux/pageteam.h | 16 ++++++++++++++++ mm/huge_memory.c | 4 ++++ mm/memcontrol.c | 35 ++++++++++++++++++++++++++--------- 4 files changed, 48 insertions(+), 9 deletions(-) --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -50,6 +50,8 @@ enum mem_cgroup_stat_index { MEM_CGROUP_STAT_DIRTY, /* # of dirty pages in page cache */ MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */ MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */ + /* # of pages charged as hugely mapped teams */ + MEM_CGROUP_STAT_SHMEM_PMDMAPPED, MEM_CGROUP_STAT_NSTATS, /* default hierarchy stats */ MEMCG_KERNEL_STACK = MEM_CGROUP_STAT_NSTATS, --- a/include/linux/pageteam.h +++ b/include/linux/pageteam.h @@ -135,6 +135,22 @@ static inline bool dec_team_pmd_mapped(s } /* + * Supplies those values which mem_cgroup_move_account() + * needs to maintain memcg's huge tmpfs stats correctly. + */ +static inline void count_team_pmd_mapped(struct page *head, int *file_mapped, + bool *pmd_mapped) +{ + long team_usage; + + *file_mapped = 1; + team_usage = atomic_long_read(&head->team_usage); + *pmd_mapped = team_usage >= TEAM_PMD_MAPPED; + if (*pmd_mapped) + *file_mapped = HPAGE_PMD_NR - team_pte_count(team_usage); +} + +/* * Returns true if this pte mapping is of a non-team page, or of a team page not * covered by an existing huge pmd mapping: whereupon stats need to be updated. * Only called when mapcount goes up from 0 to 1 i.e. _mapcount from -1 to 0. --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3514,6 +3514,8 @@ static void page_add_team_rmap(struct pa __mod_zone_page_state(zone, NR_FILE_MAPPED, nr_pages); mem_cgroup_update_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, nr_pages); + mem_cgroup_update_page_stat(page, + MEM_CGROUP_STAT_SHMEM_PMDMAPPED, HPAGE_PMD_NR); } unlock_page_memcg(page); } @@ -3533,6 +3535,8 @@ static void page_remove_team_rmap(struct __mod_zone_page_state(zone, NR_FILE_MAPPED, -nr_pages); mem_cgroup_update_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, -nr_pages); + mem_cgroup_update_page_stat(page, + MEM_CGROUP_STAT_SHMEM_PMDMAPPED, -HPAGE_PMD_NR); } unlock_page_memcg(page); } --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -106,6 +107,7 @@ static const char * const mem_cgroup_sta "dirty", "writeback", "swap", + "shmem_pmdmapped", }; static const char * const mem_cgroup_events_names[] = { @@ -4447,7 +4449,8 @@ static int mem_cgroup_move_account(struc struct mem_cgroup *to) { unsigned long flags; - unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1; + int nr_pages = compound ? hpage_nr_pages(page) : 1; + int file_mapped = 1; int ret; bool anon; @@ -4471,10 +4474,10 @@ static int mem_cgroup_move_account(struc spin_lock_irqsave(&from->move_lock, flags); - if (!anon && page_mapped(page)) { - __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], + if (PageWriteback(page)) { + __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK], nr_pages); - __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], + __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK], nr_pages); } @@ -4494,11 +4497,25 @@ static int mem_cgroup_move_account(struc } } - if (PageWriteback(page)) { - __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK], - nr_pages); - __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK], - nr_pages); + if (!anon && PageTeam(page)) { + if (page == team_head(page)) { + bool pmd_mapped; + + count_team_pmd_mapped(page, &file_mapped, &pmd_mapped); + if (pmd_mapped) { + __this_cpu_sub(from->stat->count[ + MEM_CGROUP_STAT_SHMEM_PMDMAPPED], HPAGE_PMD_NR); + __this_cpu_add(to->stat->count[ + MEM_CGROUP_STAT_SHMEM_PMDMAPPED], HPAGE_PMD_NR); + } + } + } + + if (!anon && page_mapped(page)) { + __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], + file_mapped); + __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], + file_mapped); } /*