All of lore.kernel.org
 help / color / mirror / Atom feed
From: Andrea Arcangeli <aarcange@redhat.com>
To: linux-mm@kvack.org,
	Linus Torvalds <torvalds@linux-foundation.org>,
	Andrew Morton <akpm@linux-foundation.org>,
	linux-kernel@vger.kernel.org
Cc: Marcelo Tosatti <mtosatti@redhat.com>,
	Adam Litke <agl@us.ibm.com>, Avi Kivity <avi@redhat.com>,
	Hugh Dickins <hugh.dickins@tiscali.co.uk>,
	Rik van Riel <riel@redhat.com>, Mel Gorman <mel@csn.ul.ie>,
	Dave Hansen <dave@linux.vnet.ibm.com>,
	Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Ingo Molnar <mingo@elte.hu>, Mike Travis <travis@sgi.com>,
	KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>,
	Christoph Lameter <cl@linux-foundation.org>,
	Chris Wright <chrisw@sous-sol.org>,
	bpicco@redhat.com,
	KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>,
	Balbir Singh <balbir@linux.vnet.ibm.com>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>,
	Chris Mason <chris.mason@oracle.com>,
	Borislav Petkov <bp@alien8.de>
Subject: [PATCH 36 of 66] memcg compound
Date: Wed, 03 Nov 2010 16:28:11 +0100	[thread overview]
Message-ID: <495ffee2d60adab4d18b.1288798091@v2.random> (raw)
In-Reply-To: <patchbomb.1288798055@v2.random>

From: Andrea Arcangeli <aarcange@redhat.com>

Teach memcg to charge/uncharge compound pages.

Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Acked-by: Rik van Riel <riel@redhat.com>
---

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1019,6 +1019,10 @@ mem_cgroup_get_reclaim_stat_from_page(st
 {
 	struct page_cgroup *pc;
 	struct mem_cgroup_per_zone *mz;
+	int page_size = PAGE_SIZE;
+
+	if (PageTransHuge(page))
+		page_size <<= compound_order(page);
 
 	if (mem_cgroup_disabled())
 		return NULL;
@@ -1879,12 +1883,14 @@ static int __mem_cgroup_do_charge(struct
  * oom-killer can be invoked.
  */
 static int __mem_cgroup_try_charge(struct mm_struct *mm,
-		gfp_t gfp_mask, struct mem_cgroup **memcg, bool oom)
+				   gfp_t gfp_mask,
+				   struct mem_cgroup **memcg, bool oom,
+				   int page_size)
 {
 	int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
 	struct mem_cgroup *mem = NULL;
 	int ret;
-	int csize = CHARGE_SIZE;
+	int csize = max(CHARGE_SIZE, (unsigned long) page_size);
 
 	/*
 	 * Unlike gloval-vm's OOM-kill, we're not in memory shortage
@@ -1909,7 +1915,7 @@ again:
 		VM_BUG_ON(css_is_removed(&mem->css));
 		if (mem_cgroup_is_root(mem))
 			goto done;
-		if (consume_stock(mem))
+		if (page_size == PAGE_SIZE && consume_stock(mem))
 			goto done;
 		css_get(&mem->css);
 	} else {
@@ -1933,7 +1939,7 @@ again:
 			rcu_read_unlock();
 			goto done;
 		}
-		if (consume_stock(mem)) {
+		if (page_size == PAGE_SIZE && consume_stock(mem)) {
 			/*
 			 * It seems dagerous to access memcg without css_get().
 			 * But considering how consume_stok works, it's not
@@ -1974,7 +1980,7 @@ again:
 		case CHARGE_OK:
 			break;
 		case CHARGE_RETRY: /* not in OOM situation but retry */
-			csize = PAGE_SIZE;
+			csize = page_size;
 			css_put(&mem->css);
 			mem = NULL;
 			goto again;
@@ -1995,8 +2001,8 @@ again:
 		}
 	} while (ret != CHARGE_OK);
 
-	if (csize > PAGE_SIZE)
-		refill_stock(mem, csize - PAGE_SIZE);
+	if (csize > page_size)
+		refill_stock(mem, csize - page_size);
 	css_put(&mem->css);
 done:
 	*memcg = mem;
@@ -2024,9 +2030,10 @@ static void __mem_cgroup_cancel_charge(s
 	}
 }
 
-static void mem_cgroup_cancel_charge(struct mem_cgroup *mem)
+static void mem_cgroup_cancel_charge(struct mem_cgroup *mem,
+				     int page_size)
 {
-	__mem_cgroup_cancel_charge(mem, 1);
+	__mem_cgroup_cancel_charge(mem, page_size >> PAGE_SHIFT);
 }
 
 /*
@@ -2082,8 +2089,9 @@ struct mem_cgroup *try_get_mem_cgroup_fr
  */
 
 static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
-				     struct page_cgroup *pc,
-				     enum charge_type ctype)
+				       struct page_cgroup *pc,
+				       enum charge_type ctype,
+				       int page_size)
 {
 	/* try_charge() can return NULL to *memcg, taking care of it. */
 	if (!mem)
@@ -2092,7 +2100,7 @@ static void __mem_cgroup_commit_charge(s
 	lock_page_cgroup(pc);
 	if (unlikely(PageCgroupUsed(pc))) {
 		unlock_page_cgroup(pc);
-		mem_cgroup_cancel_charge(mem);
+		mem_cgroup_cancel_charge(mem, page_size);
 		return;
 	}
 
@@ -2166,7 +2174,7 @@ static void __mem_cgroup_move_account(st
 	mem_cgroup_charge_statistics(from, pc, false);
 	if (uncharge)
 		/* This is not "cancel", but cancel_charge does all we need. */
-		mem_cgroup_cancel_charge(from);
+		mem_cgroup_cancel_charge(from, PAGE_SIZE);
 
 	/* caller should have done css_get */
 	pc->mem_cgroup = to;
@@ -2227,13 +2235,14 @@ static int mem_cgroup_move_parent(struct
 		goto put;
 
 	parent = mem_cgroup_from_cont(pcg);
-	ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
+	ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false,
+				      PAGE_SIZE);
 	if (ret || !parent)
 		goto put_back;
 
 	ret = mem_cgroup_move_account(pc, child, parent, true);
 	if (ret)
-		mem_cgroup_cancel_charge(parent);
+		mem_cgroup_cancel_charge(parent, PAGE_SIZE);
 put_back:
 	putback_lru_page(page);
 put:
@@ -2254,6 +2263,10 @@ static int mem_cgroup_charge_common(stru
 	struct mem_cgroup *mem = NULL;
 	struct page_cgroup *pc;
 	int ret;
+	int page_size = PAGE_SIZE;
+
+	if (PageTransHuge(page))
+		page_size <<= compound_order(page);
 
 	pc = lookup_page_cgroup(page);
 	/* can happen at boot */
@@ -2261,11 +2274,11 @@ static int mem_cgroup_charge_common(stru
 		return 0;
 	prefetchw(pc);
 
-	ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
+	ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true, page_size);
 	if (ret || !mem)
 		return ret;
 
-	__mem_cgroup_commit_charge(mem, pc, ctype);
+	__mem_cgroup_commit_charge(mem, pc, ctype, page_size);
 	return 0;
 }
 
@@ -2274,8 +2287,6 @@ int mem_cgroup_newpage_charge(struct pag
 {
 	if (mem_cgroup_disabled())
 		return 0;
-	if (PageCompound(page))
-		return 0;
 	/*
 	 * If already mapped, we don't have to account.
 	 * If page cache, page->mapping has address_space.
@@ -2381,13 +2392,13 @@ int mem_cgroup_try_charge_swapin(struct 
 	if (!mem)
 		goto charge_cur_mm;
 	*ptr = mem;
-	ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
+	ret = __mem_cgroup_try_charge(NULL, mask, ptr, true, PAGE_SIZE);
 	css_put(&mem->css);
 	return ret;
 charge_cur_mm:
 	if (unlikely(!mm))
 		mm = &init_mm;
-	return __mem_cgroup_try_charge(mm, mask, ptr, true);
+	return __mem_cgroup_try_charge(mm, mask, ptr, true, PAGE_SIZE);
 }
 
 static void
@@ -2403,7 +2414,7 @@ __mem_cgroup_commit_charge_swapin(struct
 	cgroup_exclude_rmdir(&ptr->css);
 	pc = lookup_page_cgroup(page);
 	mem_cgroup_lru_del_before_commit_swapcache(page);
-	__mem_cgroup_commit_charge(ptr, pc, ctype);
+	__mem_cgroup_commit_charge(ptr, pc, ctype, PAGE_SIZE);
 	mem_cgroup_lru_add_after_commit_swapcache(page);
 	/*
 	 * Now swap is on-memory. This means this page may be
@@ -2452,11 +2463,12 @@ void mem_cgroup_cancel_charge_swapin(str
 		return;
 	if (!mem)
 		return;
-	mem_cgroup_cancel_charge(mem);
+	mem_cgroup_cancel_charge(mem, PAGE_SIZE);
 }
 
 static void
-__do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype)
+__do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype,
+	      int page_size)
 {
 	struct memcg_batch_info *batch = NULL;
 	bool uncharge_memsw = true;
@@ -2491,14 +2503,14 @@ __do_uncharge(struct mem_cgroup *mem, co
 	if (batch->memcg != mem)
 		goto direct_uncharge;
 	/* remember freed charge and uncharge it later */
-	batch->bytes += PAGE_SIZE;
+	batch->bytes += page_size;
 	if (uncharge_memsw)
-		batch->memsw_bytes += PAGE_SIZE;
+		batch->memsw_bytes += page_size;
 	return;
 direct_uncharge:
-	res_counter_uncharge(&mem->res, PAGE_SIZE);
+	res_counter_uncharge(&mem->res, page_size);
 	if (uncharge_memsw)
-		res_counter_uncharge(&mem->memsw, PAGE_SIZE);
+		res_counter_uncharge(&mem->memsw, page_size);
 	if (unlikely(batch->memcg != mem))
 		memcg_oom_recover(mem);
 	return;
@@ -2512,6 +2524,7 @@ __mem_cgroup_uncharge_common(struct page
 {
 	struct page_cgroup *pc;
 	struct mem_cgroup *mem = NULL;
+	int page_size = PAGE_SIZE;
 
 	if (mem_cgroup_disabled())
 		return NULL;
@@ -2519,6 +2532,9 @@ __mem_cgroup_uncharge_common(struct page
 	if (PageSwapCache(page))
 		return NULL;
 
+	if (PageTransHuge(page))
+		page_size <<= compound_order(page);
+
 	/*
 	 * Check if our page_cgroup is valid
 	 */
@@ -2572,7 +2588,7 @@ __mem_cgroup_uncharge_common(struct page
 		mem_cgroup_get(mem);
 	}
 	if (!mem_cgroup_is_root(mem))
-		__do_uncharge(mem, ctype);
+		__do_uncharge(mem, ctype, page_size);
 
 	return mem;
 
@@ -2767,6 +2783,7 @@ int mem_cgroup_prepare_migration(struct 
 	enum charge_type ctype;
 	int ret = 0;
 
+	VM_BUG_ON(PageTransHuge(page));
 	if (mem_cgroup_disabled())
 		return 0;
 
@@ -2816,7 +2833,7 @@ int mem_cgroup_prepare_migration(struct 
 		return 0;
 
 	*ptr = mem;
-	ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false);
+	ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false, PAGE_SIZE);
 	css_put(&mem->css);/* drop extra refcnt */
 	if (ret || *ptr == NULL) {
 		if (PageAnon(page)) {
@@ -2843,7 +2860,7 @@ int mem_cgroup_prepare_migration(struct 
 		ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
 	else
 		ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
-	__mem_cgroup_commit_charge(mem, pc, ctype);
+	__mem_cgroup_commit_charge(mem, pc, ctype, PAGE_SIZE);
 	return ret;
 }
 
@@ -4452,7 +4469,8 @@ one_by_one:
 			batch_count = PRECHARGE_COUNT_AT_ONCE;
 			cond_resched();
 		}
-		ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
+		ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false,
+					      PAGE_SIZE);
 		if (ret || !mem)
 			/* mem_cgroup_clear_mc() will do uncharge later */
 			return -ENOMEM;
@@ -4614,6 +4632,7 @@ static int mem_cgroup_count_precharge_pt
 	pte_t *pte;
 	spinlock_t *ptl;
 
+	VM_BUG_ON(pmd_trans_huge(*pmd));
 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 	for (; addr != end; pte++, addr += PAGE_SIZE)
 		if (is_target_pte_for_mc(vma, addr, *pte, NULL))
@@ -4765,6 +4784,7 @@ static int mem_cgroup_move_charge_pte_ra
 	spinlock_t *ptl;
 
 retry:
+	VM_BUG_ON(pmd_trans_huge(*pmd));
 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 	for (; addr != end; addr += PAGE_SIZE) {
 		pte_t ptent = *(pte++);

WARNING: multiple messages have this Message-ID (diff)
From: Andrea Arcangeli <aarcange@redhat.com>
To: linux-mm@kvack.org,
	Linus Torvalds <torvalds@linux-foundation.org>,
	Andrew Morton <akpm@linux-foundation.org>,
	linux-kernel@vger.kernel.org
Cc: Marcelo Tosatti <mtosatti@redhat.com>,
	Adam Litke <agl@us.ibm.com>, Avi Kivity <avi@redhat.com>,
	Hugh Dickins <hugh.dickins@tiscali.co.uk>,
	Rik van Riel <riel@redhat.com>, Mel Gorman <mel@csn.ul.ie>,
	Dave Hansen <dave@linux.vnet.ibm.com>,
	Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Ingo Molnar <mingo@elte.hu>, Mike Travis <travis@sgi.com>,
	KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>,
	Christoph Lameter <cl@linux-foundation.org>,
	Chris Wright <chrisw@sous-sol.org>,
	bpicco@redhat.com,
	KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>,
	Balbir Singh <balbir@linux.vnet.ibm.com>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>,
	Chris Mason <chris.mason@oracle.com>,
	Borislav Petkov <bp@alien8.de>
Subject: [PATCH 36 of 66] memcg compound
Date: Wed, 03 Nov 2010 16:28:11 +0100	[thread overview]
Message-ID: <495ffee2d60adab4d18b.1288798091@v2.random> (raw)
In-Reply-To: <patchbomb.1288798055@v2.random>

From: Andrea Arcangeli <aarcange@redhat.com>

Teach memcg to charge/uncharge compound pages.

Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Acked-by: Rik van Riel <riel@redhat.com>
---

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1019,6 +1019,10 @@ mem_cgroup_get_reclaim_stat_from_page(st
 {
 	struct page_cgroup *pc;
 	struct mem_cgroup_per_zone *mz;
+	int page_size = PAGE_SIZE;
+
+	if (PageTransHuge(page))
+		page_size <<= compound_order(page);
 
 	if (mem_cgroup_disabled())
 		return NULL;
@@ -1879,12 +1883,14 @@ static int __mem_cgroup_do_charge(struct
  * oom-killer can be invoked.
  */
 static int __mem_cgroup_try_charge(struct mm_struct *mm,
-		gfp_t gfp_mask, struct mem_cgroup **memcg, bool oom)
+				   gfp_t gfp_mask,
+				   struct mem_cgroup **memcg, bool oom,
+				   int page_size)
 {
 	int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
 	struct mem_cgroup *mem = NULL;
 	int ret;
-	int csize = CHARGE_SIZE;
+	int csize = max(CHARGE_SIZE, (unsigned long) page_size);
 
 	/*
 	 * Unlike gloval-vm's OOM-kill, we're not in memory shortage
@@ -1909,7 +1915,7 @@ again:
 		VM_BUG_ON(css_is_removed(&mem->css));
 		if (mem_cgroup_is_root(mem))
 			goto done;
-		if (consume_stock(mem))
+		if (page_size == PAGE_SIZE && consume_stock(mem))
 			goto done;
 		css_get(&mem->css);
 	} else {
@@ -1933,7 +1939,7 @@ again:
 			rcu_read_unlock();
 			goto done;
 		}
-		if (consume_stock(mem)) {
+		if (page_size == PAGE_SIZE && consume_stock(mem)) {
 			/*
 			 * It seems dagerous to access memcg without css_get().
 			 * But considering how consume_stok works, it's not
@@ -1974,7 +1980,7 @@ again:
 		case CHARGE_OK:
 			break;
 		case CHARGE_RETRY: /* not in OOM situation but retry */
-			csize = PAGE_SIZE;
+			csize = page_size;
 			css_put(&mem->css);
 			mem = NULL;
 			goto again;
@@ -1995,8 +2001,8 @@ again:
 		}
 	} while (ret != CHARGE_OK);
 
-	if (csize > PAGE_SIZE)
-		refill_stock(mem, csize - PAGE_SIZE);
+	if (csize > page_size)
+		refill_stock(mem, csize - page_size);
 	css_put(&mem->css);
 done:
 	*memcg = mem;
@@ -2024,9 +2030,10 @@ static void __mem_cgroup_cancel_charge(s
 	}
 }
 
-static void mem_cgroup_cancel_charge(struct mem_cgroup *mem)
+static void mem_cgroup_cancel_charge(struct mem_cgroup *mem,
+				     int page_size)
 {
-	__mem_cgroup_cancel_charge(mem, 1);
+	__mem_cgroup_cancel_charge(mem, page_size >> PAGE_SHIFT);
 }
 
 /*
@@ -2082,8 +2089,9 @@ struct mem_cgroup *try_get_mem_cgroup_fr
  */
 
 static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
-				     struct page_cgroup *pc,
-				     enum charge_type ctype)
+				       struct page_cgroup *pc,
+				       enum charge_type ctype,
+				       int page_size)
 {
 	/* try_charge() can return NULL to *memcg, taking care of it. */
 	if (!mem)
@@ -2092,7 +2100,7 @@ static void __mem_cgroup_commit_charge(s
 	lock_page_cgroup(pc);
 	if (unlikely(PageCgroupUsed(pc))) {
 		unlock_page_cgroup(pc);
-		mem_cgroup_cancel_charge(mem);
+		mem_cgroup_cancel_charge(mem, page_size);
 		return;
 	}
 
@@ -2166,7 +2174,7 @@ static void __mem_cgroup_move_account(st
 	mem_cgroup_charge_statistics(from, pc, false);
 	if (uncharge)
 		/* This is not "cancel", but cancel_charge does all we need. */
-		mem_cgroup_cancel_charge(from);
+		mem_cgroup_cancel_charge(from, PAGE_SIZE);
 
 	/* caller should have done css_get */
 	pc->mem_cgroup = to;
@@ -2227,13 +2235,14 @@ static int mem_cgroup_move_parent(struct
 		goto put;
 
 	parent = mem_cgroup_from_cont(pcg);
-	ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
+	ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false,
+				      PAGE_SIZE);
 	if (ret || !parent)
 		goto put_back;
 
 	ret = mem_cgroup_move_account(pc, child, parent, true);
 	if (ret)
-		mem_cgroup_cancel_charge(parent);
+		mem_cgroup_cancel_charge(parent, PAGE_SIZE);
 put_back:
 	putback_lru_page(page);
 put:
@@ -2254,6 +2263,10 @@ static int mem_cgroup_charge_common(stru
 	struct mem_cgroup *mem = NULL;
 	struct page_cgroup *pc;
 	int ret;
+	int page_size = PAGE_SIZE;
+
+	if (PageTransHuge(page))
+		page_size <<= compound_order(page);
 
 	pc = lookup_page_cgroup(page);
 	/* can happen at boot */
@@ -2261,11 +2274,11 @@ static int mem_cgroup_charge_common(stru
 		return 0;
 	prefetchw(pc);
 
-	ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
+	ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true, page_size);
 	if (ret || !mem)
 		return ret;
 
-	__mem_cgroup_commit_charge(mem, pc, ctype);
+	__mem_cgroup_commit_charge(mem, pc, ctype, page_size);
 	return 0;
 }
 
@@ -2274,8 +2287,6 @@ int mem_cgroup_newpage_charge(struct pag
 {
 	if (mem_cgroup_disabled())
 		return 0;
-	if (PageCompound(page))
-		return 0;
 	/*
 	 * If already mapped, we don't have to account.
 	 * If page cache, page->mapping has address_space.
@@ -2381,13 +2392,13 @@ int mem_cgroup_try_charge_swapin(struct 
 	if (!mem)
 		goto charge_cur_mm;
 	*ptr = mem;
-	ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
+	ret = __mem_cgroup_try_charge(NULL, mask, ptr, true, PAGE_SIZE);
 	css_put(&mem->css);
 	return ret;
 charge_cur_mm:
 	if (unlikely(!mm))
 		mm = &init_mm;
-	return __mem_cgroup_try_charge(mm, mask, ptr, true);
+	return __mem_cgroup_try_charge(mm, mask, ptr, true, PAGE_SIZE);
 }
 
 static void
@@ -2403,7 +2414,7 @@ __mem_cgroup_commit_charge_swapin(struct
 	cgroup_exclude_rmdir(&ptr->css);
 	pc = lookup_page_cgroup(page);
 	mem_cgroup_lru_del_before_commit_swapcache(page);
-	__mem_cgroup_commit_charge(ptr, pc, ctype);
+	__mem_cgroup_commit_charge(ptr, pc, ctype, PAGE_SIZE);
 	mem_cgroup_lru_add_after_commit_swapcache(page);
 	/*
 	 * Now swap is on-memory. This means this page may be
@@ -2452,11 +2463,12 @@ void mem_cgroup_cancel_charge_swapin(str
 		return;
 	if (!mem)
 		return;
-	mem_cgroup_cancel_charge(mem);
+	mem_cgroup_cancel_charge(mem, PAGE_SIZE);
 }
 
 static void
-__do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype)
+__do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype,
+	      int page_size)
 {
 	struct memcg_batch_info *batch = NULL;
 	bool uncharge_memsw = true;
@@ -2491,14 +2503,14 @@ __do_uncharge(struct mem_cgroup *mem, co
 	if (batch->memcg != mem)
 		goto direct_uncharge;
 	/* remember freed charge and uncharge it later */
-	batch->bytes += PAGE_SIZE;
+	batch->bytes += page_size;
 	if (uncharge_memsw)
-		batch->memsw_bytes += PAGE_SIZE;
+		batch->memsw_bytes += page_size;
 	return;
 direct_uncharge:
-	res_counter_uncharge(&mem->res, PAGE_SIZE);
+	res_counter_uncharge(&mem->res, page_size);
 	if (uncharge_memsw)
-		res_counter_uncharge(&mem->memsw, PAGE_SIZE);
+		res_counter_uncharge(&mem->memsw, page_size);
 	if (unlikely(batch->memcg != mem))
 		memcg_oom_recover(mem);
 	return;
@@ -2512,6 +2524,7 @@ __mem_cgroup_uncharge_common(struct page
 {
 	struct page_cgroup *pc;
 	struct mem_cgroup *mem = NULL;
+	int page_size = PAGE_SIZE;
 
 	if (mem_cgroup_disabled())
 		return NULL;
@@ -2519,6 +2532,9 @@ __mem_cgroup_uncharge_common(struct page
 	if (PageSwapCache(page))
 		return NULL;
 
+	if (PageTransHuge(page))
+		page_size <<= compound_order(page);
+
 	/*
 	 * Check if our page_cgroup is valid
 	 */
@@ -2572,7 +2588,7 @@ __mem_cgroup_uncharge_common(struct page
 		mem_cgroup_get(mem);
 	}
 	if (!mem_cgroup_is_root(mem))
-		__do_uncharge(mem, ctype);
+		__do_uncharge(mem, ctype, page_size);
 
 	return mem;
 
@@ -2767,6 +2783,7 @@ int mem_cgroup_prepare_migration(struct 
 	enum charge_type ctype;
 	int ret = 0;
 
+	VM_BUG_ON(PageTransHuge(page));
 	if (mem_cgroup_disabled())
 		return 0;
 
@@ -2816,7 +2833,7 @@ int mem_cgroup_prepare_migration(struct 
 		return 0;
 
 	*ptr = mem;
-	ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false);
+	ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false, PAGE_SIZE);
 	css_put(&mem->css);/* drop extra refcnt */
 	if (ret || *ptr == NULL) {
 		if (PageAnon(page)) {
@@ -2843,7 +2860,7 @@ int mem_cgroup_prepare_migration(struct 
 		ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
 	else
 		ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
-	__mem_cgroup_commit_charge(mem, pc, ctype);
+	__mem_cgroup_commit_charge(mem, pc, ctype, PAGE_SIZE);
 	return ret;
 }
 
@@ -4452,7 +4469,8 @@ one_by_one:
 			batch_count = PRECHARGE_COUNT_AT_ONCE;
 			cond_resched();
 		}
-		ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
+		ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false,
+					      PAGE_SIZE);
 		if (ret || !mem)
 			/* mem_cgroup_clear_mc() will do uncharge later */
 			return -ENOMEM;
@@ -4614,6 +4632,7 @@ static int mem_cgroup_count_precharge_pt
 	pte_t *pte;
 	spinlock_t *ptl;
 
+	VM_BUG_ON(pmd_trans_huge(*pmd));
 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 	for (; addr != end; pte++, addr += PAGE_SIZE)
 		if (is_target_pte_for_mc(vma, addr, *pte, NULL))
@@ -4765,6 +4784,7 @@ static int mem_cgroup_move_charge_pte_ra
 	spinlock_t *ptl;
 
 retry:
+	VM_BUG_ON(pmd_trans_huge(*pmd));
 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 	for (; addr != end; addr += PAGE_SIZE) {
 		pte_t ptent = *(pte++);

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom policy in Canada: sign http://dissolvethecrtc.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2010-11-03 15:42 UTC|newest]

Thread overview: 331+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2010-11-03 15:27 [PATCH 00 of 66] Transparent Hugepage Support #32 Andrea Arcangeli
2010-11-03 15:27 ` Andrea Arcangeli
2010-11-03 15:27 ` [PATCH 01 of 66] disable lumpy when compaction is enabled Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-09  3:18   ` KOSAKI Motohiro
2010-11-09  3:18     ` KOSAKI Motohiro
2010-11-09 21:30     ` Andrea Arcangeli
2010-11-09 21:30       ` Andrea Arcangeli
2010-11-09 21:38       ` Mel Gorman
2010-11-09 21:38         ` Mel Gorman
2010-11-09 22:22         ` Andrea Arcangeli
2010-11-09 22:22           ` Andrea Arcangeli
2010-11-10 14:27           ` Mel Gorman
2010-11-10 14:27             ` Mel Gorman
2010-11-10 16:03             ` Andrea Arcangeli
2010-11-10 16:03               ` Andrea Arcangeli
2010-11-18  8:30   ` Mel Gorman
2010-11-18  8:30     ` Mel Gorman
2010-11-03 15:27 ` [PATCH 02 of 66] mm, migration: Fix race between shift_arg_pages and rmap_walk by guaranteeing rmap_walk finds PTEs created within the temporary stack Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-09  3:01   ` KOSAKI Motohiro
2010-11-09  3:01     ` KOSAKI Motohiro
2010-11-09 21:36     ` Andrea Arcangeli
2010-11-09 21:36       ` Andrea Arcangeli
2010-11-18 11:13   ` Mel Gorman
2010-11-18 11:13     ` Mel Gorman
2010-11-18 17:13     ` Mel Gorman
2010-11-18 17:13       ` Mel Gorman
2010-11-19 17:38     ` Andrea Arcangeli
2010-11-19 17:38       ` Andrea Arcangeli
2010-11-19 17:54       ` Linus Torvalds
2010-11-19 17:54         ` Linus Torvalds
2010-11-19 19:26         ` Andrea Arcangeli
2010-11-19 19:26           ` Andrea Arcangeli
2010-11-03 15:27 ` [PATCH 03 of 66] transparent hugepage support documentation Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-18 11:41   ` Mel Gorman
2010-11-18 11:41     ` Mel Gorman
2010-11-25 14:35     ` Andrea Arcangeli
2010-11-25 14:35       ` Andrea Arcangeli
2010-11-26 11:40       ` Mel Gorman
2010-11-26 11:40         ` Mel Gorman
2010-11-03 15:27 ` [PATCH 04 of 66] define MADV_HUGEPAGE Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-18 11:44   ` Mel Gorman
2010-11-18 11:44     ` Mel Gorman
2010-11-03 15:27 ` [PATCH 05 of 66] compound_lock Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-18 11:49   ` Mel Gorman
2010-11-18 11:49     ` Mel Gorman
2010-11-18 17:28     ` Linus Torvalds
2010-11-18 17:28       ` Linus Torvalds
2010-11-25 16:21       ` Andrea Arcangeli
2010-11-25 16:21         ` Andrea Arcangeli
2010-11-03 15:27 ` [PATCH 06 of 66] alter compound get_page/put_page Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-18 12:37   ` Mel Gorman
2010-11-18 12:37     ` Mel Gorman
2010-11-25 16:49     ` Andrea Arcangeli
2010-11-25 16:49       ` Andrea Arcangeli
2010-11-26 11:46       ` Mel Gorman
2010-11-26 11:46         ` Mel Gorman
2010-11-03 15:27 ` [PATCH 07 of 66] update futex compound knowledge Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-03 15:27 ` [PATCH 08 of 66] fix bad_page to show the real reason the page is bad Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-09  3:03   ` KOSAKI Motohiro
2010-11-09  3:03     ` KOSAKI Motohiro
2010-11-18 12:40   ` Mel Gorman
2010-11-18 12:40     ` Mel Gorman
2010-11-03 15:27 ` [PATCH 09 of 66] clear compound mapping Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-03 15:27 ` [PATCH 10 of 66] add native_set_pmd_at Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-03 15:27 ` [PATCH 11 of 66] add pmd paravirt ops Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-26 18:01   ` Andrea Arcangeli
2010-11-26 18:01     ` Andrea Arcangeli
2010-11-03 15:27 ` [PATCH 12 of 66] no paravirt version of pmd ops Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-03 15:27 ` [PATCH 13 of 66] export maybe_mkwrite Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2011-01-17 14:14   ` Michal Simek
2011-01-17 14:14     ` Michal Simek
2011-01-17 14:33     ` Andrea Arcangeli
2011-01-17 14:33       ` Andrea Arcangeli
2011-01-18 14:29       ` Michal Simek
2011-01-18 14:29         ` Michal Simek
2011-01-18 20:32         ` Andrea Arcangeli
2011-01-18 20:32           ` Andrea Arcangeli
2011-01-20  7:03           ` Michal Simek
2010-11-03 15:27 ` [PATCH 14 of 66] comment reminder in destroy_compound_page Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-03 15:27 ` [PATCH 15 of 66] config_transparent_hugepage Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-03 15:27 ` [PATCH 16 of 66] special pmd_trans_* functions Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-18 12:51   ` Mel Gorman
2010-11-18 12:51     ` Mel Gorman
2010-11-25 17:10     ` Andrea Arcangeli
2010-11-25 17:10       ` Andrea Arcangeli
2010-11-03 15:27 ` [PATCH 17 of 66] add pmd mangling generic functions Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-18 12:52   ` Mel Gorman
2010-11-18 12:52     ` Mel Gorman
2010-11-18 17:32     ` Linus Torvalds
2010-11-18 17:32       ` Linus Torvalds
2010-11-25 17:35       ` Andrea Arcangeli
2010-11-25 17:35         ` Andrea Arcangeli
2010-11-26 22:24         ` Linus Torvalds
2010-11-26 22:24           ` Linus Torvalds
2010-12-02 17:50           ` Andrea Arcangeli
2010-12-02 17:50             ` Andrea Arcangeli
2010-11-03 15:27 ` [PATCH 18 of 66] add pmd mangling functions to x86 Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-18 13:04   ` Mel Gorman
2010-11-18 13:04     ` Mel Gorman
2010-11-26 17:57     ` Andrea Arcangeli
2010-11-26 17:57       ` Andrea Arcangeli
2010-11-29 10:23       ` Mel Gorman
2010-11-29 10:23         ` Mel Gorman
2010-11-29 16:59         ` Andrea Arcangeli
2010-11-29 16:59           ` Andrea Arcangeli
2010-11-03 15:27 ` [PATCH 19 of 66] bail out gup_fast on splitting pmd Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-03 15:27 ` [PATCH 20 of 66] pte alloc trans splitting Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-03 15:27 ` [PATCH 21 of 66] add pmd mmu_notifier helpers Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-03 15:27 ` [PATCH 22 of 66] clear page compound Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-18 13:11   ` Mel Gorman
2010-11-18 13:11     ` Mel Gorman
2010-11-03 15:27 ` [PATCH 23 of 66] add pmd_huge_pte to mm_struct Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-18 13:13   ` Mel Gorman
2010-11-18 13:13     ` Mel Gorman
2010-11-03 15:27 ` [PATCH 24 of 66] split_huge_page_mm/vma Andrea Arcangeli
2010-11-03 15:27   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 25 of 66] split_huge_page paging Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 26 of 66] clear_copy_huge_page Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 27 of 66] kvm mmu transparent hugepage support Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 28 of 66] _GFP_NO_KSWAPD Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-18 13:18   ` Mel Gorman
2010-11-18 13:18     ` Mel Gorman
2010-11-29 19:03     ` Andrea Arcangeli
2010-11-29 19:03       ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 29 of 66] don't alloc harder for gfp nomemalloc even if nowait Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-09  3:05   ` KOSAKI Motohiro
2010-11-09  3:05     ` KOSAKI Motohiro
2010-11-18 13:19   ` Mel Gorman
2010-11-18 13:19     ` Mel Gorman
2010-11-03 15:28 ` [PATCH 30 of 66] transparent hugepage core Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-18 15:12   ` Mel Gorman
2010-11-18 15:12     ` Mel Gorman
2010-12-07 21:24     ` Andrea Arcangeli
2010-12-07 21:24       ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 31 of 66] split_huge_page anon_vma ordering dependency Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-18 15:13   ` Mel Gorman
2010-11-18 15:13     ` Mel Gorman
2010-11-03 15:28 ` [PATCH 32 of 66] verify pmd_trans_huge isn't leaking Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-18 15:15   ` Mel Gorman
2010-11-18 15:15     ` Mel Gorman
2010-11-03 15:28 ` [PATCH 33 of 66] madvise(MADV_HUGEPAGE) Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-18 15:19   ` Mel Gorman
2010-11-18 15:19     ` Mel Gorman
2010-12-09 17:14     ` Andrea Arcangeli
2010-12-09 17:14       ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 34 of 66] add PageTransCompound Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-18 15:20   ` Mel Gorman
2010-11-18 15:20     ` Mel Gorman
2010-11-03 15:28 ` [PATCH 35 of 66] pmd_trans_huge migrate bugcheck Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` Andrea Arcangeli [this message]
2010-11-03 15:28   ` [PATCH 36 of 66] memcg compound Andrea Arcangeli
2010-11-18 15:26   ` Mel Gorman
2010-11-18 15:26     ` Mel Gorman
2010-11-19  1:10     ` KAMEZAWA Hiroyuki
2010-11-19  1:10       ` KAMEZAWA Hiroyuki
2010-12-14 17:38       ` Andrea Arcangeli
2010-12-14 17:38         ` Andrea Arcangeli
2010-12-15  0:12         ` KAMEZAWA Hiroyuki
2010-12-15  0:12           ` KAMEZAWA Hiroyuki
2010-12-15  5:29           ` Andrea Arcangeli
2010-12-15  5:29             ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 37 of 66] transhuge-memcg: commit tail pages at charge Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 38 of 66] memcontrol: try charging huge pages from stock Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-19  1:14   ` KAMEZAWA Hiroyuki
2010-11-19  1:14     ` KAMEZAWA Hiroyuki
2010-12-14 17:38     ` Andrea Arcangeli
2010-12-14 17:38       ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 39 of 66] memcg huge memory Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-19  1:19   ` KAMEZAWA Hiroyuki
2010-11-19  1:19     ` KAMEZAWA Hiroyuki
2010-12-14 17:38     ` Andrea Arcangeli
2010-12-14 17:38       ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 40 of 66] transparent hugepage vmstat Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 41 of 66] khugepaged Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 42 of 66] khugepaged vma merge Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 43 of 66] don't leave orhpaned swap cache after ksm merging Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-09  3:08   ` KOSAKI Motohiro
2010-11-09  3:08     ` KOSAKI Motohiro
2010-11-09 21:40     ` Andrea Arcangeli
2010-11-09 21:40       ` Andrea Arcangeli
2010-11-10  7:49       ` Hugh Dickins
2010-11-10  7:49         ` Hugh Dickins
2010-11-10 16:08         ` Andrea Arcangeli
2010-11-10 16:08           ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 44 of 66] skip transhuge pages in ksm for now Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-18 16:06   ` Mel Gorman
2010-11-18 16:06     ` Mel Gorman
2010-12-09 18:13     ` Andrea Arcangeli
2010-12-09 18:13       ` Andrea Arcangeli
2010-12-10 12:17       ` Mel Gorman
2010-12-10 12:17         ` Mel Gorman
2010-11-03 15:28 ` [PATCH 45 of 66] remove PG_buddy Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-18 16:08   ` Mel Gorman
2010-11-18 16:08     ` Mel Gorman
2010-12-09 18:15     ` Andrea Arcangeli
2010-12-09 18:15       ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 46 of 66] add x86 32bit support Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 47 of 66] mincore transparent hugepage support Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 48 of 66] add pmd_modify Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 49 of 66] mprotect: pass vma down to page table walkers Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 50 of 66] mprotect: transparent huge page support Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 51 of 66] set recommended min free kbytes Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-18 16:16   ` Mel Gorman
2010-11-18 16:16     ` Mel Gorman
2010-12-09 18:47     ` Andrea Arcangeli
2010-12-09 18:47       ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 52 of 66] enable direct defrag Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-18 16:17   ` Mel Gorman
2010-11-18 16:17     ` Mel Gorman
2010-12-09 18:57     ` Andrea Arcangeli
2010-12-09 18:57       ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 53 of 66] add numa awareness to hugepage allocations Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-29  5:38   ` Daisuke Nishimura
2010-11-29  5:38     ` Daisuke Nishimura
2010-11-29 16:11     ` Andrea Arcangeli
2010-11-29 16:11       ` Andrea Arcangeli
2010-11-30  0:38       ` Daisuke Nishimura
2010-11-30  0:38         ` Daisuke Nishimura
2010-11-30 19:01         ` Andrea Arcangeli
2010-11-30 19:01           ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 54 of 66] transparent hugepage config choice Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 55 of 66] select CONFIG_COMPACTION if TRANSPARENT_HUGEPAGE enabled Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-09  6:20   ` KOSAKI Motohiro
2010-11-09  6:20     ` KOSAKI Motohiro
2010-11-09 21:11     ` Andrea Arcangeli
2010-11-09 21:11       ` Andrea Arcangeli
2010-11-14  5:07       ` KOSAKI Motohiro
2010-11-14  5:07         ` KOSAKI Motohiro
2010-11-15 15:13         ` Andrea Arcangeli
2010-11-15 15:13           ` Andrea Arcangeli
2010-11-18 16:22       ` Mel Gorman
2010-11-18 16:22         ` Mel Gorman
2010-12-09 19:04         ` Andrea Arcangeli
2010-12-09 19:04           ` Andrea Arcangeli
2010-12-14  9:45           ` Mel Gorman
2010-12-14  9:45             ` Mel Gorman
2010-12-14 16:06             ` Andrea Arcangeli
2010-12-14 16:06               ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 56 of 66] transhuge isolate_migratepages() Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-18 16:25   ` Mel Gorman
2010-11-18 16:25     ` Mel Gorman
2010-11-03 15:28 ` [PATCH 57 of 66] avoid breaking huge pmd invariants in case of vma_adjust failures Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 58 of 66] don't allow transparent hugepage support without PSE Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 59 of 66] mmu_notifier_test_young Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 60 of 66] freeze khugepaged and ksmd Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 61 of 66] use compaction for GFP_ATOMIC order > 0 Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-09 10:27   ` KOSAKI Motohiro
2010-11-09 10:27     ` KOSAKI Motohiro
2010-11-09 21:49     ` Andrea Arcangeli
2010-11-09 21:49       ` Andrea Arcangeli
2010-11-18 16:31   ` Mel Gorman
2010-11-18 16:31     ` Mel Gorman
2010-12-09 19:10     ` Andrea Arcangeli
2010-12-09 19:10       ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 62 of 66] disable transparent hugepages by default on small systems Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-18 16:34   ` Mel Gorman
2010-11-18 16:34     ` Mel Gorman
2010-11-03 15:28 ` [PATCH 63 of 66] fix anon memory statistics with transparent hugepages Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 64 of 66] scale nr_rotated to balance memory pressure Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-09  6:16   ` KOSAKI Motohiro
2010-11-09  6:16     ` KOSAKI Motohiro
2010-11-18 19:15     ` Andrea Arcangeli
2010-11-18 19:15       ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 65 of 66] transparent hugepage sysfs meminfo Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-03 15:28 ` [PATCH 66 of 66] add debug checks for mapcount related invariants Andrea Arcangeli
2010-11-03 15:28   ` Andrea Arcangeli
2010-11-18 16:39 ` [PATCH 00 of 66] Transparent Hugepage Support #32 Mel Gorman
2010-11-18 16:39   ` Mel Gorman

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=495ffee2d60adab4d18b.1288798091@v2.random \
    --to=aarcange@redhat.com \
    --cc=agl@us.ibm.com \
    --cc=akpm@linux-foundation.org \
    --cc=avi@redhat.com \
    --cc=balbir@linux.vnet.ibm.com \
    --cc=benh@kernel.crashing.org \
    --cc=bp@alien8.de \
    --cc=bpicco@redhat.com \
    --cc=chris.mason@oracle.com \
    --cc=chrisw@sous-sol.org \
    --cc=cl@linux-foundation.org \
    --cc=dave@linux.vnet.ibm.com \
    --cc=hannes@cmpxchg.org \
    --cc=hugh.dickins@tiscali.co.uk \
    --cc=kamezawa.hiroyu@jp.fujitsu.com \
    --cc=kosaki.motohiro@jp.fujitsu.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mel@csn.ul.ie \
    --cc=mingo@elte.hu \
    --cc=mst@redhat.com \
    --cc=mtosatti@redhat.com \
    --cc=nishimura@mxp.nes.nec.co.jp \
    --cc=peterz@infradead.org \
    --cc=riel@redhat.com \
    --cc=torvalds@linux-foundation.org \
    --cc=travis@sgi.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.