All of lore.kernel.org
 help / color / mirror / Atom feed
From: Tejun Heo <tj@kernel.org>
To: Nick Piggin <npiggin@suse.de>, Tony Luck <tony.luck@intel.com>,
	Fenghua Yu <fenghua.yu@intel.com>,
	linux-ia64 <linux-ia64@vger.kernel.org>,
	Ingo Molnar <mingo@redhat.com>,
	Rusty Russell <rusty@rustcorp.com.au>,
	Christoph Lameter <cl@linux-foundation.org>,
	linux-kernel@vger.kernel.org
Cc: Tejun Heo <tj@kernel.org>, Tony Luck <tony.luck@intel.com>,
	Fenghua Yu <fenghua.yu@intel.com>,
	linux-ia64 <linux-ia64@vger.kernel.org>
Subject: [PATCH 4/5] ia64: convert to dynamic percpu allocator
Date: Wed, 23 Sep 2009 14:06:21 +0900	[thread overview]
Message-ID: <1253682382-24740-5-git-send-email-tj@kernel.org> (raw)
In-Reply-To: <1253682382-24740-1-git-send-email-tj@kernel.org>

Unlike other archs, ia64 reserves space for percpu areas during early
memory initialization.  These areas occupy a contiguous region indexed
by cpu number on contiguous memory model or are grouped by node on
discontiguous memory model.

As allocation and initialization are done by the arch code, all that
setup_per_cpu_areas() needs to do is communicating the determined
layout to the percpu allocator.  This patch implements
setup_per_cpu_areas() for both contig and discontig memory models and
drops HAVE_LEGACY_PER_CPU_AREA.

Please note that for contig model, the allocation itself is modified
only to allocate for possible cpus instead of NR_CPUS.  As dynamic
percpu allocator can handle non-direct mapping, there's no reason to
allocate memory for cpus which aren't possible.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: linux-ia64 <linux-ia64@vger.kernel.org>
---
 arch/ia64/Kconfig        |    3 --
 arch/ia64/kernel/setup.c |   12 ------
 arch/ia64/mm/contig.c    |   58 ++++++++++++++++++++++++++++---
 arch/ia64/mm/discontig.c |   85 ++++++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 138 insertions(+), 20 deletions(-)

diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 011a1cd..e624611 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -89,9 +89,6 @@ config GENERIC_TIME_VSYSCALL
 	bool
 	default y
 
-config HAVE_LEGACY_PER_CPU_AREA
-	def_bool y
-
 config HAVE_SETUP_PER_CPU_AREA
 	def_bool y
 
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 5d77c1e..bc1ef4a 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -855,18 +855,6 @@ identify_cpu (struct cpuinfo_ia64 *c)
 }
 
 /*
- * In UP configuration, setup_per_cpu_areas() is defined in
- * include/linux/percpu.h
- */
-#ifdef CONFIG_SMP
-void __init
-setup_per_cpu_areas (void)
-{
-	/* start_kernel() requires this... */
-}
-#endif
-
-/*
  * Do the following calculations:
  *
  * 1. the max. cache line size.
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 351da0a..54bf540 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -163,11 +163,11 @@ per_cpu_init (void)
 	first_time = false;
 
 	/*
-	 * get_free_pages() cannot be used before cpu_init() done.  BSP
-	 * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls
-	 * get_zeroed_page().
+	 * get_free_pages() cannot be used before cpu_init() done.
+	 * BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs
+	 * to avoid that AP calls get_zeroed_page().
 	 */
-	for (cpu = 0; cpu < NR_CPUS; cpu++) {
+	for_each_possible_cpu(cpu) {
 		void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start;
 
 		memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start);
@@ -196,9 +196,57 @@ skip:
 static inline void
 alloc_per_cpu_data(void)
 {
-	cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
+	cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * num_possible_cpus(),
 				   PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
 }
+
+/**
+ * setup_per_cpu_areas - setup percpu areas
+ *
+ * Arch code has already allocated and initialized percpu areas.  All
+ * this function has to do is to teach the determined layout to the
+ * dynamic percpu allocator, which happens to be more complex than
+ * creating whole new ones using helpers.
+ */
+void __init
+setup_per_cpu_areas(void)
+{
+	struct pcpu_alloc_info *ai;
+	struct pcpu_group_info *gi;
+	unsigned int cpu;
+	ssize_t static_size, reserved_size, dyn_size;
+	int rc;
+
+	ai = pcpu_alloc_alloc_info(1, num_possible_cpus());
+	if (!ai)
+		panic("failed to allocate pcpu_alloc_info");
+	gi = &ai->groups[0];
+
+	/* units are assigned consecutively to possible cpus */
+	for_each_possible_cpu(cpu)
+		gi->cpu_map[gi->nr_units++] = cpu;
+
+	/* set parameters */
+	static_size = __per_cpu_end - __per_cpu_start;
+	reserved_size = PERCPU_MODULE_RESERVE;
+	dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
+	if (dyn_size < 0)
+		panic("percpu area overflow static=%zd reserved=%zd\n",
+		      static_size, reserved_size);
+
+	ai->static_size		= static_size;
+	ai->reserved_size	= reserved_size;
+	ai->dyn_size		= dyn_size;
+	ai->unit_size		= PERCPU_PAGE_SIZE;
+	ai->atom_size		= PAGE_SIZE;
+	ai->alloc_size		= PERCPU_PAGE_SIZE;
+
+	rc = pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]);
+	if (rc)
+		panic("failed to setup percpu area (err=%d)", rc);
+
+	pcpu_free_alloc_info(ai);
+}
 #else
 #define alloc_per_cpu_data() do { } while (0)
 #endif /* CONFIG_SMP */
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 200282b..40e4c1f 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -172,6 +172,91 @@ static void *per_cpu_node_setup(void *cpu_data, int node)
 	return cpu_data;
 }
 
+#ifdef CONFIG_SMP
+/**
+ * setup_per_cpu_areas - setup percpu areas
+ *
+ * Arch code has already allocated and initialized percpu areas.  All
+ * this function has to do is to teach the determined layout to the
+ * dynamic percpu allocator, which happens to be more complex than
+ * creating whole new ones using helpers.
+ */
+void __init setup_per_cpu_areas(void)
+{
+	struct pcpu_alloc_info *ai;
+	struct pcpu_group_info *uninitialized_var(gi);
+	unsigned int *cpu_map;
+	void *base;
+	unsigned long base_offset;
+	unsigned int cpu;
+	ssize_t static_size, reserved_size, dyn_size;
+	int node, prev_node, unit, nr_units, rc;
+
+	ai = pcpu_alloc_alloc_info(MAX_NUMNODES, nr_cpu_ids);
+	if (!ai)
+		panic("failed to allocate pcpu_alloc_info");
+	cpu_map = ai->groups[0].cpu_map;
+
+	/* determine base */
+	base = (void *)ULONG_MAX;
+	for_each_possible_cpu(cpu)
+		base = min(base,
+			   (void *)(__per_cpu_offset[cpu] + __per_cpu_start));
+	base_offset = (void *)__per_cpu_start - base;
+
+	/* build cpu_map, units are grouped by node */
+	unit = 0;
+	for_each_node(node)
+		for_each_possible_cpu(cpu)
+			if (node == node_cpuid[cpu].nid)
+				cpu_map[unit++] = cpu;
+	nr_units = unit;
+
+	/* set basic parameters */
+	static_size = __per_cpu_end - __per_cpu_start;
+	reserved_size = PERCPU_MODULE_RESERVE;
+	dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
+	if (dyn_size < 0)
+		panic("percpu area overflow static=%zd reserved=%zd\n",
+		      static_size, reserved_size);
+
+	ai->static_size		= static_size;
+	ai->reserved_size	= reserved_size;
+	ai->dyn_size		= dyn_size;
+	ai->unit_size		= PERCPU_PAGE_SIZE;
+	ai->atom_size		= PAGE_SIZE;
+	ai->alloc_size		= PERCPU_PAGE_SIZE;
+
+	/*
+	 * CPUs are put into groups according to node.  Walk cpu_map
+	 * and create new groups at node boundaries.
+	 */
+	prev_node = -1;
+	ai->nr_groups = 0;
+	for (unit = 0; unit < nr_units; unit++) {
+		cpu = cpu_map[unit];
+		node = node_cpuid[cpu].nid;
+
+		if (node == prev_node) {
+			gi->nr_units++;
+			continue;
+		}
+		prev_node = node;
+
+		gi = &ai->groups[ai->nr_groups++];
+		gi->nr_units		= 1;
+		gi->base_offset		= __per_cpu_offset[cpu] + base_offset;
+		gi->cpu_map		= &cpu_map[unit];
+	}
+
+	rc = pcpu_setup_first_chunk(ai, base);
+	if (rc)
+		panic("failed to setup percpu area (err=%d)", rc);
+
+	pcpu_free_alloc_info(ai);
+}
+#endif
+
 /**
  * fill_pernode - initialize pernode data.
  * @node: the node id.
-- 
1.6.4.2


WARNING: multiple messages have this Message-ID (diff)
From: Tejun Heo <tj@kernel.org>
To: Nick Piggin <npiggin@suse.de>, Tony Luck <tony.luck@intel.com>,
	Fenghua Yu <fenghua.yu@intel.com>,
	linux-ia64 <linux-ia64@vger.kernel.org>,
	Ingo Molnar <mingo@redhat.com>,
	Rusty Russell <rusty@rustcorp.com.au>,
	Christoph Lameter <cl@linux-foundation.org>,
	linux-kernel@vger.kernel.org
Cc: Tejun Heo <tj@kernel.org>, Tony Luck <tony.luck@intel.com>,
	Fenghua Yu <fenghua.yu@intel.com>,
	linux-ia64 <linux-ia64@vger.kernel.org>
Subject: [PATCH 4/5] ia64: convert to dynamic percpu allocator
Date: Wed, 23 Sep 2009 05:06:21 +0000	[thread overview]
Message-ID: <1253682382-24740-5-git-send-email-tj@kernel.org> (raw)
In-Reply-To: <1253682382-24740-1-git-send-email-tj@kernel.org>

Unlike other archs, ia64 reserves space for percpu areas during early
memory initialization.  These areas occupy a contiguous region indexed
by cpu number on contiguous memory model or are grouped by node on
discontiguous memory model.

As allocation and initialization are done by the arch code, all that
setup_per_cpu_areas() needs to do is communicating the determined
layout to the percpu allocator.  This patch implements
setup_per_cpu_areas() for both contig and discontig memory models and
drops HAVE_LEGACY_PER_CPU_AREA.

Please note that for contig model, the allocation itself is modified
only to allocate for possible cpus instead of NR_CPUS.  As dynamic
percpu allocator can handle non-direct mapping, there's no reason to
allocate memory for cpus which aren't possible.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: linux-ia64 <linux-ia64@vger.kernel.org>
---
 arch/ia64/Kconfig        |    3 --
 arch/ia64/kernel/setup.c |   12 ------
 arch/ia64/mm/contig.c    |   58 ++++++++++++++++++++++++++++---
 arch/ia64/mm/discontig.c |   85 ++++++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 138 insertions(+), 20 deletions(-)

diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 011a1cd..e624611 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -89,9 +89,6 @@ config GENERIC_TIME_VSYSCALL
 	bool
 	default y
 
-config HAVE_LEGACY_PER_CPU_AREA
-	def_bool y
-
 config HAVE_SETUP_PER_CPU_AREA
 	def_bool y
 
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 5d77c1e..bc1ef4a 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -855,18 +855,6 @@ identify_cpu (struct cpuinfo_ia64 *c)
 }
 
 /*
- * In UP configuration, setup_per_cpu_areas() is defined in
- * include/linux/percpu.h
- */
-#ifdef CONFIG_SMP
-void __init
-setup_per_cpu_areas (void)
-{
-	/* start_kernel() requires this... */
-}
-#endif
-
-/*
  * Do the following calculations:
  *
  * 1. the max. cache line size.
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 351da0a..54bf540 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -163,11 +163,11 @@ per_cpu_init (void)
 	first_time = false;
 
 	/*
-	 * get_free_pages() cannot be used before cpu_init() done.  BSP
-	 * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls
-	 * get_zeroed_page().
+	 * get_free_pages() cannot be used before cpu_init() done.
+	 * BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs
+	 * to avoid that AP calls get_zeroed_page().
 	 */
-	for (cpu = 0; cpu < NR_CPUS; cpu++) {
+	for_each_possible_cpu(cpu) {
 		void *src = cpu = 0 ? cpu0_data : __phys_per_cpu_start;
 
 		memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start);
@@ -196,9 +196,57 @@ skip:
 static inline void
 alloc_per_cpu_data(void)
 {
-	cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
+	cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * num_possible_cpus(),
 				   PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
 }
+
+/**
+ * setup_per_cpu_areas - setup percpu areas
+ *
+ * Arch code has already allocated and initialized percpu areas.  All
+ * this function has to do is to teach the determined layout to the
+ * dynamic percpu allocator, which happens to be more complex than
+ * creating whole new ones using helpers.
+ */
+void __init
+setup_per_cpu_areas(void)
+{
+	struct pcpu_alloc_info *ai;
+	struct pcpu_group_info *gi;
+	unsigned int cpu;
+	ssize_t static_size, reserved_size, dyn_size;
+	int rc;
+
+	ai = pcpu_alloc_alloc_info(1, num_possible_cpus());
+	if (!ai)
+		panic("failed to allocate pcpu_alloc_info");
+	gi = &ai->groups[0];
+
+	/* units are assigned consecutively to possible cpus */
+	for_each_possible_cpu(cpu)
+		gi->cpu_map[gi->nr_units++] = cpu;
+
+	/* set parameters */
+	static_size = __per_cpu_end - __per_cpu_start;
+	reserved_size = PERCPU_MODULE_RESERVE;
+	dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
+	if (dyn_size < 0)
+		panic("percpu area overflow static=%zd reserved=%zd\n",
+		      static_size, reserved_size);
+
+	ai->static_size		= static_size;
+	ai->reserved_size	= reserved_size;
+	ai->dyn_size		= dyn_size;
+	ai->unit_size		= PERCPU_PAGE_SIZE;
+	ai->atom_size		= PAGE_SIZE;
+	ai->alloc_size		= PERCPU_PAGE_SIZE;
+
+	rc = pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]);
+	if (rc)
+		panic("failed to setup percpu area (err=%d)", rc);
+
+	pcpu_free_alloc_info(ai);
+}
 #else
 #define alloc_per_cpu_data() do { } while (0)
 #endif /* CONFIG_SMP */
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 200282b..40e4c1f 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -172,6 +172,91 @@ static void *per_cpu_node_setup(void *cpu_data, int node)
 	return cpu_data;
 }
 
+#ifdef CONFIG_SMP
+/**
+ * setup_per_cpu_areas - setup percpu areas
+ *
+ * Arch code has already allocated and initialized percpu areas.  All
+ * this function has to do is to teach the determined layout to the
+ * dynamic percpu allocator, which happens to be more complex than
+ * creating whole new ones using helpers.
+ */
+void __init setup_per_cpu_areas(void)
+{
+	struct pcpu_alloc_info *ai;
+	struct pcpu_group_info *uninitialized_var(gi);
+	unsigned int *cpu_map;
+	void *base;
+	unsigned long base_offset;
+	unsigned int cpu;
+	ssize_t static_size, reserved_size, dyn_size;
+	int node, prev_node, unit, nr_units, rc;
+
+	ai = pcpu_alloc_alloc_info(MAX_NUMNODES, nr_cpu_ids);
+	if (!ai)
+		panic("failed to allocate pcpu_alloc_info");
+	cpu_map = ai->groups[0].cpu_map;
+
+	/* determine base */
+	base = (void *)ULONG_MAX;
+	for_each_possible_cpu(cpu)
+		base = min(base,
+			   (void *)(__per_cpu_offset[cpu] + __per_cpu_start));
+	base_offset = (void *)__per_cpu_start - base;
+
+	/* build cpu_map, units are grouped by node */
+	unit = 0;
+	for_each_node(node)
+		for_each_possible_cpu(cpu)
+			if (node = node_cpuid[cpu].nid)
+				cpu_map[unit++] = cpu;
+	nr_units = unit;
+
+	/* set basic parameters */
+	static_size = __per_cpu_end - __per_cpu_start;
+	reserved_size = PERCPU_MODULE_RESERVE;
+	dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
+	if (dyn_size < 0)
+		panic("percpu area overflow static=%zd reserved=%zd\n",
+		      static_size, reserved_size);
+
+	ai->static_size		= static_size;
+	ai->reserved_size	= reserved_size;
+	ai->dyn_size		= dyn_size;
+	ai->unit_size		= PERCPU_PAGE_SIZE;
+	ai->atom_size		= PAGE_SIZE;
+	ai->alloc_size		= PERCPU_PAGE_SIZE;
+
+	/*
+	 * CPUs are put into groups according to node.  Walk cpu_map
+	 * and create new groups at node boundaries.
+	 */
+	prev_node = -1;
+	ai->nr_groups = 0;
+	for (unit = 0; unit < nr_units; unit++) {
+		cpu = cpu_map[unit];
+		node = node_cpuid[cpu].nid;
+
+		if (node = prev_node) {
+			gi->nr_units++;
+			continue;
+		}
+		prev_node = node;
+
+		gi = &ai->groups[ai->nr_groups++];
+		gi->nr_units		= 1;
+		gi->base_offset		= __per_cpu_offset[cpu] + base_offset;
+		gi->cpu_map		= &cpu_map[unit];
+	}
+
+	rc = pcpu_setup_first_chunk(ai, base);
+	if (rc)
+		panic("failed to setup percpu area (err=%d)", rc);
+
+	pcpu_free_alloc_info(ai);
+}
+#endif
+
 /**
  * fill_pernode - initialize pernode data.
  * @node: the node id.
-- 
1.6.4.2


  parent reply	other threads:[~2009-09-23  5:07 UTC|newest]

Thread overview: 72+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2009-09-23  5:06 [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu and drop the old one, take#2 Tejun Heo
2009-09-23  5:06 ` Tejun Heo
2009-09-23  5:06 ` [PATCH 1/5] ia64: don't alias VMALLOC_END to vmalloc_end Tejun Heo
2009-09-23  5:06   ` Tejun Heo
2009-09-23  5:06 ` [PATCH 2/5] ia64: initialize cpu maps early Tejun Heo
2009-09-23  5:06   ` Tejun Heo
2009-09-23  5:06 ` [PATCH 3/5] ia64: allocate percpu area for cpu0 like percpu areas for other cpus Tejun Heo
2009-09-23  5:06   ` Tejun Heo
2009-09-23  5:06 ` Tejun Heo [this message]
2009-09-23  5:06   ` [PATCH 4/5] ia64: convert to dynamic percpu allocator Tejun Heo
2009-09-23  5:06 ` [PATCH 5/5] percpu: kill legacy " Tejun Heo
2009-09-23  5:06   ` Tejun Heo
2009-09-23 11:10   ` Rusty Russell
2009-09-23 11:22     ` Rusty Russell
2009-09-29  0:25 ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu and drop the old one, take#2 Tejun Heo
2009-09-29  0:25   ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu Tejun Heo
2009-09-30 20:32   ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu and drop the old one, take#2 Luck, Tony
2009-09-30 20:32     ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic Luck, Tony
2009-09-30 20:47     ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu and drop the old one, take#2 Christoph Lameter
2009-09-30 20:47       ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu Christoph Lameter
2009-09-30 22:05       ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu and drop the old one, take#2 Luck, Tony
2009-09-30 22:05         ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic Luck, Tony
2009-09-30 23:06         ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu and drop the old one, take#2 Peter Chubb
2009-09-30 23:06           ` Peter Chubb
2009-09-30 23:49           ` Christoph Lameter
2009-09-30 23:49             ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu Christoph Lameter
2009-09-30  1:24 ` [PATCH REPOST 3/5] ia64: allocate percpu area for cpu0 like percpu areas for other cpus Tejun Heo
2009-09-30  1:24   ` [PATCH REPOST 3/5] ia64: allocate percpu area for cpu0 like percpu Tejun Heo
2009-10-02  5:11 ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu and drop the old one, take#2 Tejun Heo
2009-10-02  5:11   ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu Tejun Heo
  -- strict thread matches above, loose matches on Subject: below --
2009-09-22  7:40 [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu and drop the old one Tejun Heo
2009-09-22  7:40 ` Tejun Heo
2009-09-22  7:40 ` [PATCH 1/4] vmalloc: rename local variables vmalloc_start and vmalloc_end Tejun Heo
2009-09-22  7:40   ` Tejun Heo
2009-09-22 22:52   ` Christoph Lameter
2009-09-22 22:52     ` [PATCH 1/4] vmalloc: rename local variables vmalloc_start and Christoph Lameter
2009-09-23  2:08     ` [PATCH 1/4] vmalloc: rename local variables vmalloc_start and vmalloc_end Tejun Heo
2009-09-23  2:08       ` [PATCH 1/4] vmalloc: rename local variables vmalloc_start and Tejun Heo
2009-09-22  7:40 ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu areas for other cpus Tejun Heo
2009-09-22  7:40   ` Tejun Heo
2009-09-22 22:59   ` Christoph Lameter
2009-09-22 22:59     ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu Christoph Lameter
2009-09-23  2:11     ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu areas for other cpus Tejun Heo
2009-09-23  2:11       ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu areas Tejun Heo
2009-09-23 13:44       ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu areas for other cpus Christoph Lameter
2009-09-23 13:44         ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu Christoph Lameter
2009-09-23 14:01         ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu areas for other cpus Tejun Heo
2009-09-23 14:01           ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu areas Tejun Heo
2009-09-23 17:17           ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu areas for other cpus Christoph Lameter
2009-09-23 17:17             ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu Christoph Lameter
2009-09-23 22:03             ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu areas for other cpus Tejun Heo
2009-09-23 22:03               ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu areas Tejun Heo
2009-09-24  7:36               ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu areas for other cpus Christoph Lameter
2009-09-24  7:36                 ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu Christoph Lameter
2009-09-24  8:37                 ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu areas for other cpus Tejun Heo
2009-09-24  8:37                   ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu areas Tejun Heo
2009-09-28 15:12                   ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu areas for other cpus Christoph Lameter
2009-09-28 15:12                     ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu Christoph Lameter
2009-09-22  7:40 ` [PATCH 3/4] ia64: convert to dynamic percpu allocator Tejun Heo
2009-09-22  7:40   ` Tejun Heo
2009-09-22  7:40 ` [PATCH 4/4] percpu: kill legacy " Tejun Heo
2009-09-22  7:40   ` Tejun Heo
2009-09-22  8:16 ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu and drop the old one Ingo Molnar
2009-09-22  8:16   ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic Ingo Molnar
2009-09-22 20:49   ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu and drop the old one Luck, Tony
2009-09-22 20:49     ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic Luck, Tony
2009-09-22 21:10     ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu and drop the old one Luck, Tony
2009-09-22 21:10       ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic Luck, Tony
2009-09-22 21:24       ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu and drop the old one Luck, Tony
2009-09-22 21:24         ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic Luck, Tony
2009-09-22 21:50         ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu and drop the old one Tejun Heo
2009-09-22 21:50           ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu Tejun Heo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1253682382-24740-5-git-send-email-tj@kernel.org \
    --to=tj@kernel.org \
    --cc=cl@linux-foundation.org \
    --cc=fenghua.yu@intel.com \
    --cc=linux-ia64@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=npiggin@suse.de \
    --cc=rusty@rustcorp.com.au \
    --cc=tony.luck@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.