From mboxrd@z Thu Jan 1 00:00:00 1970 From: Greg Thelen Subject: [PATCH v4 07/11] memcg: add kernel calls for memcg dirty page stats Date: Fri, 29 Oct 2010 00:09:10 -0700 Message-ID: <1288336154-23256-8-git-send-email-gthelen@google.com> References: <1288336154-23256-1-git-send-email-gthelen@google.com> Return-path: In-Reply-To: <1288336154-23256-1-git-send-email-gthelen@google.com> Sender: owner-linux-mm@kvack.org To: Andrew Morton Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org, containers@lists.osdl.org, Andrea Righi , Balbir Singh , KAMEZAWA Hiroyuki , Daisuke Nishimura , Minchan Kim , Ciju Rajan K , David Rientjes , Wu Fengguang , Greg Thelen List-Id: containers.vger.kernel.org Add calls into memcg dirty page accounting. Notify memcg when pages transition between clean, file dirty, writeback, and unstable nfs. This allows the memory controller to maintain an accurate view of the amount of its memory that is dirty. Signed-off-by: Greg Thelen Signed-off-by: Andrea Righi Acked-by: KAMEZAWA Hiroyuki Reviewed-by: Daisuke Nishimura --- fs/nfs/write.c | 4 ++++ mm/filemap.c | 1 + mm/page-writeback.c | 4 ++++ mm/truncate.c | 1 + 4 files changed, 10 insertions(+), 0 deletions(-) diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 4c14c17..a3c39f7 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -450,6 +450,7 @@ nfs_mark_request_commit(struct nfs_page *req) NFS_PAGE_TAG_COMMIT); nfsi->ncommit++; spin_unlock(&inode->i_lock); + mem_cgroup_inc_page_stat(req->wb_page, MEMCG_NR_FILE_UNSTABLE_NFS); inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE); __mark_inode_dirty(inode, I_DIRTY_DATASYNC); @@ -461,6 +462,7 @@ nfs_clear_request_commit(struct nfs_page *req) struct page *page = req->wb_page; if (test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) { + mem_cgroup_dec_page_stat(page, MEMCG_NR_FILE_UNSTABLE_NFS); dec_zone_page_state(page, NR_UNSTABLE_NFS); dec_bdi_stat(page->mapping->backing_dev_info, BDI_RECLAIMABLE); return 1; @@ -1316,6 +1318,8 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how) req = nfs_list_entry(head->next); nfs_list_remove_request(req); nfs_mark_request_commit(req); + mem_cgroup_dec_page_stat(req->wb_page, + MEMCG_NR_FILE_UNSTABLE_NFS); dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); dec_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE); diff --git a/mm/filemap.c b/mm/filemap.c index 49b2d2e..f6bd6f2 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -146,6 +146,7 @@ void __remove_from_page_cache(struct page *page) * having removed the page entirely. */ if (PageDirty(page) && mapping_cap_account_dirty(mapping)) { + mem_cgroup_dec_page_stat(page, MEMCG_NR_FILE_DIRTY); dec_zone_page_state(page, NR_FILE_DIRTY); dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); } diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 722bd61..b3bb2fb 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -1118,6 +1118,7 @@ int __set_page_dirty_no_writeback(struct page *page) void account_page_dirtied(struct page *page, struct address_space *mapping) { if (mapping_cap_account_dirty(mapping)) { + mem_cgroup_inc_page_stat(page, MEMCG_NR_FILE_DIRTY); __inc_zone_page_state(page, NR_FILE_DIRTY); __inc_zone_page_state(page, NR_DIRTIED); __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); @@ -1307,6 +1308,7 @@ int clear_page_dirty_for_io(struct page *page) * for more comments. */ if (TestClearPageDirty(page)) { + mem_cgroup_dec_page_stat(page, MEMCG_NR_FILE_DIRTY); dec_zone_page_state(page, NR_FILE_DIRTY); dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); @@ -1337,6 +1339,7 @@ int test_clear_page_writeback(struct page *page) __dec_bdi_stat(bdi, BDI_WRITEBACK); __bdi_writeout_inc(bdi); } + mem_cgroup_dec_page_stat(page, MEMCG_NR_FILE_WRITEBACK); } spin_unlock_irqrestore(&mapping->tree_lock, flags); } else { @@ -1364,6 +1367,7 @@ int test_set_page_writeback(struct page *page) PAGECACHE_TAG_WRITEBACK); if (bdi_cap_account_writeback(bdi)) __inc_bdi_stat(bdi, BDI_WRITEBACK); + mem_cgroup_inc_page_stat(page, MEMCG_NR_FILE_WRITEBACK); } if (!PageDirty(page)) radix_tree_tag_clear(&mapping->page_tree, diff --git a/mm/truncate.c b/mm/truncate.c index cd94607..54cca83 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -76,6 +76,7 @@ void cancel_dirty_page(struct page *page, unsigned int account_size) if (TestClearPageDirty(page)) { struct address_space *mapping = page->mapping; if (mapping && mapping_cap_account_dirty(mapping)) { + mem_cgroup_dec_page_stat(page, MEMCG_NR_FILE_DIRTY); dec_zone_page_state(page, NR_FILE_DIRTY); dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); -- 1.7.3.1 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: email@kvack.org From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S933592Ab0J2HOi (ORCPT ); Fri, 29 Oct 2010 03:14:38 -0400 Received: from smtp-out.google.com ([216.239.44.51]:8127 "EHLO smtp-out.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S933569Ab0J2HOc (ORCPT ); Fri, 29 Oct 2010 03:14:32 -0400 DomainKey-Signature: a=rsa-sha1; s=beta; d=google.com; c=nofws; q=dns; h=from:to:cc:subject:date:message-id:x-mailer:in-reply-to:references; b=X2pemN7pq68eww05COoaMeuxdVtckTp6EJmCGHFcprw7fRbtUO3ilfGt7804I8eU7 VmvJCpGZRn5W95zxsdnGA== From: Greg Thelen To: Andrew Morton Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org, containers@lists.osdl.org, Andrea Righi , Balbir Singh , KAMEZAWA Hiroyuki , Daisuke Nishimura , Minchan Kim , Ciju Rajan K , David Rientjes , Wu Fengguang , Greg Thelen Subject: [PATCH v4 07/11] memcg: add kernel calls for memcg dirty page stats Date: Fri, 29 Oct 2010 00:09:10 -0700 Message-Id: <1288336154-23256-8-git-send-email-gthelen@google.com> X-Mailer: git-send-email 1.7.3.1 In-Reply-To: <1288336154-23256-1-git-send-email-gthelen@google.com> References: <1288336154-23256-1-git-send-email-gthelen@google.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Add calls into memcg dirty page accounting. Notify memcg when pages transition between clean, file dirty, writeback, and unstable nfs. This allows the memory controller to maintain an accurate view of the amount of its memory that is dirty. Signed-off-by: Greg Thelen Signed-off-by: Andrea Righi Acked-by: KAMEZAWA Hiroyuki Reviewed-by: Daisuke Nishimura --- fs/nfs/write.c | 4 ++++ mm/filemap.c | 1 + mm/page-writeback.c | 4 ++++ mm/truncate.c | 1 + 4 files changed, 10 insertions(+), 0 deletions(-) diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 4c14c17..a3c39f7 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -450,6 +450,7 @@ nfs_mark_request_commit(struct nfs_page *req) NFS_PAGE_TAG_COMMIT); nfsi->ncommit++; spin_unlock(&inode->i_lock); + mem_cgroup_inc_page_stat(req->wb_page, MEMCG_NR_FILE_UNSTABLE_NFS); inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE); __mark_inode_dirty(inode, I_DIRTY_DATASYNC); @@ -461,6 +462,7 @@ nfs_clear_request_commit(struct nfs_page *req) struct page *page = req->wb_page; if (test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) { + mem_cgroup_dec_page_stat(page, MEMCG_NR_FILE_UNSTABLE_NFS); dec_zone_page_state(page, NR_UNSTABLE_NFS); dec_bdi_stat(page->mapping->backing_dev_info, BDI_RECLAIMABLE); return 1; @@ -1316,6 +1318,8 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how) req = nfs_list_entry(head->next); nfs_list_remove_request(req); nfs_mark_request_commit(req); + mem_cgroup_dec_page_stat(req->wb_page, + MEMCG_NR_FILE_UNSTABLE_NFS); dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); dec_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE); diff --git a/mm/filemap.c b/mm/filemap.c index 49b2d2e..f6bd6f2 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -146,6 +146,7 @@ void __remove_from_page_cache(struct page *page) * having removed the page entirely. */ if (PageDirty(page) && mapping_cap_account_dirty(mapping)) { + mem_cgroup_dec_page_stat(page, MEMCG_NR_FILE_DIRTY); dec_zone_page_state(page, NR_FILE_DIRTY); dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); } diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 722bd61..b3bb2fb 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -1118,6 +1118,7 @@ int __set_page_dirty_no_writeback(struct page *page) void account_page_dirtied(struct page *page, struct address_space *mapping) { if (mapping_cap_account_dirty(mapping)) { + mem_cgroup_inc_page_stat(page, MEMCG_NR_FILE_DIRTY); __inc_zone_page_state(page, NR_FILE_DIRTY); __inc_zone_page_state(page, NR_DIRTIED); __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); @@ -1307,6 +1308,7 @@ int clear_page_dirty_for_io(struct page *page) * for more comments. */ if (TestClearPageDirty(page)) { + mem_cgroup_dec_page_stat(page, MEMCG_NR_FILE_DIRTY); dec_zone_page_state(page, NR_FILE_DIRTY); dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); @@ -1337,6 +1339,7 @@ int test_clear_page_writeback(struct page *page) __dec_bdi_stat(bdi, BDI_WRITEBACK); __bdi_writeout_inc(bdi); } + mem_cgroup_dec_page_stat(page, MEMCG_NR_FILE_WRITEBACK); } spin_unlock_irqrestore(&mapping->tree_lock, flags); } else { @@ -1364,6 +1367,7 @@ int test_set_page_writeback(struct page *page) PAGECACHE_TAG_WRITEBACK); if (bdi_cap_account_writeback(bdi)) __inc_bdi_stat(bdi, BDI_WRITEBACK); + mem_cgroup_inc_page_stat(page, MEMCG_NR_FILE_WRITEBACK); } if (!PageDirty(page)) radix_tree_tag_clear(&mapping->page_tree, diff --git a/mm/truncate.c b/mm/truncate.c index cd94607..54cca83 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -76,6 +76,7 @@ void cancel_dirty_page(struct page *page, unsigned int account_size) if (TestClearPageDirty(page)) { struct address_space *mapping = page->mapping; if (mapping && mapping_cap_account_dirty(mapping)) { + mem_cgroup_dec_page_stat(page, MEMCG_NR_FILE_DIRTY); dec_zone_page_state(page, NR_FILE_DIRTY); dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); -- 1.7.3.1