All of lore.kernel.org
 help / color / mirror / Atom feed
From: Andrea Arcangeli <aarcange@redhat.com>
To: linux-mm@kvack.org, Andrew Morton <akpm@linux-foundation.org>
Cc: Marcelo Tosatti <mtosatti@redhat.com>,
	Adam Litke <agl@us.ibm.com>, Avi Kivity <avi@redhat.com>,
	Izik Eidus <ieidus@redhat.com>,
	Hugh Dickins <hugh.dickins@tiscali.co.uk>,
	Nick Piggin <npiggin@suse.de>, Rik van Riel <riel@redhat.com>,
	Mel Gorman <mel@csn.ul.ie>, Dave Hansen <dave@linux.vnet.ibm.com>,
	Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Ingo Molnar <mingo@elte.hu>, Mike Travis <travis@sgi.com>,
	KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>,
	Christoph Lameter <cl@linux-foundation.org>,
	Chris Wright <chrisw@sous-sol.org>,
	bpicco@redhat.com,
	KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>,
	Balbir Singh <balbir@linux.vnet.ibm.com>,
	Arnd Bergmann <arnd@arndb.de>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>,
	Chris Mason <chris.mason@oracle.com>
Subject: [PATCH 35 of 67] memcg compound
Date: Thu, 08 Apr 2010 03:51:18 +0200	[thread overview]
Message-ID: <2c3edecddfc77b5f7c22.1270691478@v2.random> (raw)
In-Reply-To: <patchbomb.1270691443@v2.random>

From: Andrea Arcangeli <aarcange@redhat.com>

Teach memcg to charge/uncharge compound pages.

Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Acked-by: Rik van Riel <riel@redhat.com>
---

diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt
--- a/Documentation/cgroups/memory.txt
+++ b/Documentation/cgroups/memory.txt
@@ -4,6 +4,10 @@ NOTE: The Memory Resource Controller has
 to as the memory controller in this document. Do not confuse memory controller
 used here with the memory controller that is used in hardware.
 
+NOTE: When in this documentation we refer to PAGE_SIZE, we actually
+mean the real page size of the page being accounted which is bigger than
+PAGE_SIZE for compound pages.
+
 Salient features
 
 a. Enable control of Anonymous, Page Cache (mapped and unmapped) and
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1509,12 +1509,14 @@ static int __cpuinit memcg_stock_cpu_cal
  * oom-killer can be invoked.
  */
 static int __mem_cgroup_try_charge(struct mm_struct *mm,
-			gfp_t gfp_mask, struct mem_cgroup **memcg, bool oom)
+				   gfp_t gfp_mask,
+				   struct mem_cgroup **memcg, bool oom,
+				   int page_size)
 {
 	struct mem_cgroup *mem, *mem_over_limit;
 	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
 	struct res_counter *fail_res;
-	int csize = CHARGE_SIZE;
+	int csize = max(CHARGE_SIZE, (unsigned long) page_size);
 
 	/*
 	 * Unlike gloval-vm's OOM-kill, we're not in memory shortage
@@ -1549,8 +1551,9 @@ static int __mem_cgroup_try_charge(struc
 		int ret = 0;
 		unsigned long flags = 0;
 
-		if (consume_stock(mem))
-			goto done;
+		if (page_size == PAGE_SIZE)
+			if (consume_stock(mem))
+				goto done;
 
 		ret = res_counter_charge(&mem->res, csize, &fail_res);
 		if (likely(!ret)) {
@@ -1570,8 +1573,8 @@ static int __mem_cgroup_try_charge(struc
 									res);
 
 		/* reduce request size and retry */
-		if (csize > PAGE_SIZE) {
-			csize = PAGE_SIZE;
+		if (csize > page_size) {
+			csize = page_size;
 			continue;
 		}
 		if (!(gfp_mask & __GFP_WAIT))
@@ -1647,8 +1650,10 @@ static int __mem_cgroup_try_charge(struc
 			goto bypass;
 		}
 	}
-	if (csize > PAGE_SIZE)
-		refill_stock(mem, csize - PAGE_SIZE);
+	if (csize > page_size)
+		refill_stock(mem, csize - page_size);
+	if (page_size != PAGE_SIZE)
+		__css_get(&mem->css, page_size);
 done:
 	return 0;
 nomem:
@@ -1678,9 +1683,10 @@ static void __mem_cgroup_cancel_charge(s
 	/* we don't need css_put for root */
 }
 
-static void mem_cgroup_cancel_charge(struct mem_cgroup *mem)
+static void mem_cgroup_cancel_charge(struct mem_cgroup *mem,
+				     int page_size)
 {
-	__mem_cgroup_cancel_charge(mem, 1);
+	__mem_cgroup_cancel_charge(mem, page_size >> PAGE_SHIFT);
 }
 
 /*
@@ -1736,8 +1742,9 @@ struct mem_cgroup *try_get_mem_cgroup_fr
  */
 
 static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
-				     struct page_cgroup *pc,
-				     enum charge_type ctype)
+				       struct page_cgroup *pc,
+				       enum charge_type ctype,
+				       int page_size)
 {
 	/* try_charge() can return NULL to *memcg, taking care of it. */
 	if (!mem)
@@ -1746,7 +1753,7 @@ static void __mem_cgroup_commit_charge(s
 	lock_page_cgroup(pc);
 	if (unlikely(PageCgroupUsed(pc))) {
 		unlock_page_cgroup(pc);
-		mem_cgroup_cancel_charge(mem);
+		mem_cgroup_cancel_charge(mem, page_size);
 		return;
 	}
 
@@ -1820,7 +1827,7 @@ static void __mem_cgroup_move_account(st
 	mem_cgroup_charge_statistics(from, pc, false);
 	if (uncharge)
 		/* This is not "cancel", but cancel_charge does all we need. */
-		mem_cgroup_cancel_charge(from);
+		mem_cgroup_cancel_charge(from, PAGE_SIZE);
 
 	/* caller should have done css_get */
 	pc->mem_cgroup = to;
@@ -1881,13 +1888,14 @@ static int mem_cgroup_move_parent(struct
 		goto put;
 
 	parent = mem_cgroup_from_cont(pcg);
-	ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
+	ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false,
+				      PAGE_SIZE);
 	if (ret || !parent)
 		goto put_back;
 
 	ret = mem_cgroup_move_account(pc, child, parent, true);
 	if (ret)
-		mem_cgroup_cancel_charge(parent);
+		mem_cgroup_cancel_charge(parent, PAGE_SIZE);
 put_back:
 	putback_lru_page(page);
 put:
@@ -1909,6 +1917,10 @@ static int mem_cgroup_charge_common(stru
 	struct mem_cgroup *mem;
 	struct page_cgroup *pc;
 	int ret;
+	int page_size = PAGE_SIZE;
+
+	if (PageTransHuge(page))
+		page_size <<= compound_order(page);
 
 	pc = lookup_page_cgroup(page);
 	/* can happen at boot */
@@ -1917,11 +1929,11 @@ static int mem_cgroup_charge_common(stru
 	prefetchw(pc);
 
 	mem = memcg;
-	ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
+	ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true, page_size);
 	if (ret || !mem)
 		return ret;
 
-	__mem_cgroup_commit_charge(mem, pc, ctype);
+	__mem_cgroup_commit_charge(mem, pc, ctype, page_size);
 	return 0;
 }
 
@@ -1930,8 +1942,6 @@ int mem_cgroup_newpage_charge(struct pag
 {
 	if (mem_cgroup_disabled())
 		return 0;
-	if (PageCompound(page))
-		return 0;
 	/*
 	 * If already mapped, we don't have to account.
 	 * If page cache, page->mapping has address_space.
@@ -1944,7 +1954,7 @@ int mem_cgroup_newpage_charge(struct pag
 	if (unlikely(!mm))
 		mm = &init_mm;
 	return mem_cgroup_charge_common(page, mm, gfp_mask,
-				MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
+					MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
 }
 
 static void
@@ -2037,14 +2047,14 @@ int mem_cgroup_try_charge_swapin(struct 
 	if (!mem)
 		goto charge_cur_mm;
 	*ptr = mem;
-	ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
+	ret = __mem_cgroup_try_charge(NULL, mask, ptr, true, PAGE_SIZE);
 	/* drop extra refcnt from tryget */
 	css_put(&mem->css);
 	return ret;
 charge_cur_mm:
 	if (unlikely(!mm))
 		mm = &init_mm;
-	return __mem_cgroup_try_charge(mm, mask, ptr, true);
+	return __mem_cgroup_try_charge(mm, mask, ptr, true, PAGE_SIZE);
 }
 
 static void
@@ -2060,7 +2070,7 @@ __mem_cgroup_commit_charge_swapin(struct
 	cgroup_exclude_rmdir(&ptr->css);
 	pc = lookup_page_cgroup(page);
 	mem_cgroup_lru_del_before_commit_swapcache(page);
-	__mem_cgroup_commit_charge(ptr, pc, ctype);
+	__mem_cgroup_commit_charge(ptr, pc, ctype, PAGE_SIZE);
 	mem_cgroup_lru_add_after_commit_swapcache(page);
 	/*
 	 * Now swap is on-memory. This means this page may be
@@ -2109,11 +2119,12 @@ void mem_cgroup_cancel_charge_swapin(str
 		return;
 	if (!mem)
 		return;
-	mem_cgroup_cancel_charge(mem);
+	mem_cgroup_cancel_charge(mem, PAGE_SIZE);
 }
 
 static void
-__do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype)
+__do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype,
+	      int page_size)
 {
 	struct memcg_batch_info *batch = NULL;
 	bool uncharge_memsw = true;
@@ -2146,14 +2157,14 @@ __do_uncharge(struct mem_cgroup *mem, co
 	if (batch->memcg != mem)
 		goto direct_uncharge;
 	/* remember freed charge and uncharge it later */
-	batch->bytes += PAGE_SIZE;
+	batch->bytes += page_size;
 	if (uncharge_memsw)
-		batch->memsw_bytes += PAGE_SIZE;
+		batch->memsw_bytes += page_size;
 	return;
 direct_uncharge:
-	res_counter_uncharge(&mem->res, PAGE_SIZE);
+	res_counter_uncharge(&mem->res, page_size);
 	if (uncharge_memsw)
-		res_counter_uncharge(&mem->memsw, PAGE_SIZE);
+		res_counter_uncharge(&mem->memsw, page_size);
 	return;
 }
 
@@ -2166,6 +2177,10 @@ __mem_cgroup_uncharge_common(struct page
 	struct page_cgroup *pc;
 	struct mem_cgroup *mem = NULL;
 	struct mem_cgroup_per_zone *mz;
+	int page_size = PAGE_SIZE;
+
+	if (PageTransHuge(page))
+		page_size <<= compound_order(page);
 
 	if (mem_cgroup_disabled())
 		return NULL;
@@ -2205,7 +2220,7 @@ __mem_cgroup_uncharge_common(struct page
 	}
 
 	if (!mem_cgroup_is_root(mem))
-		__do_uncharge(mem, ctype);
+		__do_uncharge(mem, ctype, page_size);
 	if (ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
 		mem_cgroup_swap_statistics(mem, true);
 	mem_cgroup_charge_statistics(mem, pc, false);
@@ -2418,6 +2433,7 @@ int mem_cgroup_prepare_migration(struct 
 	struct mem_cgroup *mem = NULL;
 	int ret = 0;
 
+	VM_BUG_ON(PageTransHuge(page));
 	if (mem_cgroup_disabled())
 		return 0;
 
@@ -2430,7 +2446,8 @@ int mem_cgroup_prepare_migration(struct 
 	unlock_page_cgroup(pc);
 
 	if (mem) {
-		ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
+		ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false,
+					      PAGE_SIZE);
 		css_put(&mem->css);
 	}
 	*ptr = mem;
@@ -2473,7 +2490,7 @@ void mem_cgroup_end_migration(struct mem
 	 * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup.
 	 * So, double-counting is effectively avoided.
 	 */
-	__mem_cgroup_commit_charge(mem, pc, ctype);
+	__mem_cgroup_commit_charge(mem, pc, ctype, PAGE_SIZE);
 
 	/*
 	 * Both of oldpage and newpage are still under lock_page().
@@ -3940,7 +3957,8 @@ one_by_one:
 			batch_count = PRECHARGE_COUNT_AT_ONCE;
 			cond_resched();
 		}
-		ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
+		ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false,
+					      PAGE_SIZE);
 		if (ret || !mem)
 			/* mem_cgroup_clear_mc() will do uncharge later */
 			return -ENOMEM;
@@ -4055,6 +4073,7 @@ static int mem_cgroup_count_precharge_pt
 	pte_t *pte;
 	spinlock_t *ptl;
 
+	VM_BUG_ON(pmd_trans_huge(*pmd));
 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 	for (; addr != end; pte++, addr += PAGE_SIZE)
 		if (is_target_pte_for_mc(vma, addr, *pte, NULL))
@@ -4201,6 +4220,7 @@ static int mem_cgroup_move_charge_pte_ra
 	spinlock_t *ptl;
 
 retry:
+	VM_BUG_ON(pmd_trans_huge(*pmd));
 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 	for (; addr != end; addr += PAGE_SIZE) {
 		pte_t ptent = *(pte++);

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2010-04-08  2:57 UTC|newest]

Thread overview: 95+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2010-04-08  1:50 [PATCH 00 of 67] Transparent Hugepage Support #18 Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 01 of 67] define MADV_HUGEPAGE Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 02 of 67] compound_lock Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 03 of 67] alter compound get_page/put_page Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 04 of 67] update futex compound knowledge Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 05 of 67] fix bad_page to show the real reason the page is bad Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 06 of 67] clear compound mapping Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 07 of 67] add native_set_pmd_at Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 08 of 67] add pmd paravirt ops Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 09 of 67] no paravirt version of pmd ops Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 10 of 67] export maybe_mkwrite Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 11 of 67] comment reminder in destroy_compound_page Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 12 of 67] config_transparent_hugepage Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 13 of 67] special pmd_trans_* functions Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 14 of 67] add pmd mangling generic functions Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 15 of 67] add pmd mangling functions to x86 Andrea Arcangeli
2010-04-08  1:50 ` [PATCH 16 of 67] bail out gup_fast on splitting pmd Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 17 of 67] pte alloc trans splitting Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 18 of 67] add pmd mmu_notifier helpers Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 19 of 67] clear page compound Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 20 of 67] add pmd_huge_pte to mm_struct Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 21 of 67] This fixes some minor issues that bugged me while going over the code: Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 22 of 67] Split out functions to handle hugetlb ranges, pte ranges and unmapped Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 23 of 67] Instead of passing a start address and a number of pages into the helper Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 24 of 67] Do page table walks with the well-known nested loops we use in several Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 25 of 67] split_huge_page_mm/vma Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 26 of 67] split_huge_page paging Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 27 of 67] clear_copy_huge_page Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 28 of 67] kvm mmu transparent hugepage support Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 29 of 67] _GFP_NO_KSWAPD Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 30 of 67] don't alloc harder for gfp nomemalloc even if nowait Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 31 of 67] transparent hugepage core Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 32 of 67] verify pmd_trans_huge isn't leaking Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 33 of 67] madvise(MADV_HUGEPAGE) Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 34 of 67] pmd_trans_huge migrate bugcheck Andrea Arcangeli
2010-04-08  1:51 ` Andrea Arcangeli [this message]
2010-04-08  1:51 ` [PATCH 36 of 67] memcg huge memory Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 37 of 67] transparent hugepage vmstat Andrea Arcangeli
2010-04-08 11:53   ` Avi Kivity
2010-04-08  1:51 ` [PATCH 38 of 67] khugepaged Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 39 of 67] don't leave orhpaned swap cache after ksm merging Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 40 of 67] skip transhuge pages in ksm for now Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 41 of 67] remove PG_buddy Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 42 of 67] add x86 32bit support Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 43 of 67] mincore transparent hugepage support Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 44 of 67] add pmd_modify Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 45 of 67] mprotect: pass vma down to page table walkers Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 46 of 67] mprotect: transparent huge page support Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 47 of 67] set recommended min free kbytes Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 48 of 67] remove lumpy_reclaim Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 49 of 67] Take a reference to the anon_vma before migrating Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 50 of 67] Do not try to migrate unmapped anonymous pages Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 51 of 67] Share the anon_vma ref counts between KSM and page migration Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 52 of 67] Allow CONFIG_MIGRATION to be set without CONFIG_NUMA or memory hot-remove Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 53 of 67] Export unusable free space index via /proc/unusable_index Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 54 of 67] Export fragmentation index via /proc/extfrag_index Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 55 of 67] Move definition for LRU isolation modes to a header Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 56 of 67] Memory compaction core Andrea Arcangeli
2010-04-08 16:18   ` Johannes Weiner
2010-04-08 16:46     ` Andrea Arcangeli
2010-04-08 17:09       ` Andrea Arcangeli
2010-04-08 17:14         ` Andrea Arcangeli
2010-04-08 17:56           ` Johannes Weiner
2010-04-08 17:58             ` Andrea Arcangeli
2010-04-08 18:48               ` Johannes Weiner
2010-04-08 21:23                 ` Andrea Arcangeli
2010-04-08 21:32                   ` Andrea Arcangeli
2010-04-09 10:51                   ` Mel Gorman
2010-04-09 15:37                     ` Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 57 of 67] Add /proc trigger for memory compaction Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 58 of 67] Add /sys trigger for per-node " Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 59 of 67] Direct compact when a high-order allocation fails Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 60 of 67] Add a tunable that decides when memory should be compacted and when it should be reclaimed Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 61 of 67] Allow the migration of PageSwapCache pages Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 62 of 67] do not display compaction-related stats when !CONFIG_COMPACTION Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 63 of 67] disable migreate_prep() Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 64 of 67] page buddy can go away before reading page_order Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 65 of 67] select CONFIG_COMPACTION if TRANSPARENT_HUGEPAGE enabled Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 66 of 67] enable direct defrag Andrea Arcangeli
2010-04-08  1:51 ` [PATCH 67 of 67] memcg fix prepare migration Andrea Arcangeli
2010-04-08  3:57   ` Daisuke Nishimura
2010-04-13  1:29     ` Andrew Morton
2010-04-09  8:13   ` KAMEZAWA Hiroyuki
2010-04-08  9:39 ` [PATCH 00 of 67] Transparent Hugepage Support #18 Avi Kivity
2010-04-08 11:44   ` Avi Kivity
2010-04-08 15:23     ` Andrea Arcangeli
2010-04-08 15:27       ` Avi Kivity
2010-04-08 16:02         ` Andrea Arcangeli
2010-04-08 15:32       ` Christoph Lameter
2010-04-08 23:17         ` Andrea Arcangeli
2010-04-09  8:45     ` Avi Kivity
2010-04-09 15:50       ` Andrea Arcangeli
2010-04-09 17:44         ` Avi Kivity
2010-04-09  2:05 ` Transparent Hugepage Support #19 Andrea Arcangeli
2010-04-09 15:43   ` Andrea Arcangeli

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=2c3edecddfc77b5f7c22.1270691478@v2.random \
    --to=aarcange@redhat.com \
    --cc=agl@us.ibm.com \
    --cc=akpm@linux-foundation.org \
    --cc=arnd@arndb.de \
    --cc=avi@redhat.com \
    --cc=balbir@linux.vnet.ibm.com \
    --cc=benh@kernel.crashing.org \
    --cc=bpicco@redhat.com \
    --cc=chris.mason@oracle.com \
    --cc=chrisw@sous-sol.org \
    --cc=cl@linux-foundation.org \
    --cc=dave@linux.vnet.ibm.com \
    --cc=hannes@cmpxchg.org \
    --cc=hugh.dickins@tiscali.co.uk \
    --cc=ieidus@redhat.com \
    --cc=kamezawa.hiroyu@jp.fujitsu.com \
    --cc=kosaki.motohiro@jp.fujitsu.com \
    --cc=linux-mm@kvack.org \
    --cc=mel@csn.ul.ie \
    --cc=mingo@elte.hu \
    --cc=mst@redhat.com \
    --cc=mtosatti@redhat.com \
    --cc=nishimura@mxp.nes.nec.co.jp \
    --cc=npiggin@suse.de \
    --cc=peterz@infradead.org \
    --cc=riel@redhat.com \
    --cc=travis@sgi.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.