All of lore.kernel.org
 help / color / mirror / Atom feed
From: Tejun Heo <teheo@suse.de>
To: Nick Piggin <npiggin@suse.de>, Tony Luck <tony.luck@intel.com>,
	Fenghua Yu <fenghua.yu@intel.com>,
	linux-ia64 <linux-ia64@vger.kernel.org>,
	Ingo Molnar <mingo@redhat.com>,
	Rusty Russell <rusty@rustcorp.com.au>,
	Christoph Lameter <cl@linux-foundation.org>,
	linux-kernel@vger.kernel.org
Subject: [PATCH REPOST 3/5] ia64: allocate percpu area for cpu0 like percpu areas for other cpus
Date: Wed, 30 Sep 2009 10:24:14 +0900	[thread overview]
Message-ID: <4AC2B33E.6000902@suse.de> (raw)
In-Reply-To: <1253682382-24740-1-git-send-email-tj@kernel.org>

cpu0 used special percpu area reserved by the linker, __cpu0_per_cpu,
which is set up early in boot by head.S.  However, this doesn't
guarantee that the area will be on the same node as cpu0 and the
percpu area for cpu0 ends up very far away from percpu areas for other
cpus which cause problems for congruent percpu allocator.

This patch makes percpu area initialization allocate percpu area for
cpu0 like any other cpus and copy it from __cpu0_per_cpu which now
resides in the __init area.  This means that for cpu0, percpu area is
first setup at __cpu0_per_cpu early by head.S and then moved to an
area in the linear mapping during memory initialization and it's not
allowed to take a pointer to percpu variables between head.S and
memory initialization.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: linux-ia64 <linux-ia64@vger.kernel.org>
---
Tony Luck didn't receive this one and couldn't find it in archives
either.  Repost.  Unchanged from the original posting.

 arch/ia64/kernel/vmlinux.lds.S |   11 +++++----
 arch/ia64/mm/contig.c          |   47 +++++++++++++++++++++++++--------------
 arch/ia64/mm/discontig.c       |   35 ++++++++++++++++++++---------
 3 files changed, 60 insertions(+), 33 deletions(-)

diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 0a0c77b..1295ba3 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -166,6 +166,12 @@ SECTIONS
 	}
 #endif

+#ifdef	CONFIG_SMP
+  . = ALIGN(PERCPU_PAGE_SIZE);
+  __cpu0_per_cpu = .;
+  . = . + PERCPU_PAGE_SIZE;	/* cpu0 per-cpu space */
+#endif
+
   . = ALIGN(PAGE_SIZE);
   __init_end = .;

@@ -198,11 +204,6 @@ SECTIONS
   data : { } :data
   .data : AT(ADDR(.data) - LOAD_OFFSET)
 	{
-#ifdef	CONFIG_SMP
-  . = ALIGN(PERCPU_PAGE_SIZE);
-		__cpu0_per_cpu = .;
-  . = . + PERCPU_PAGE_SIZE;	/* cpu0 per-cpu space */
-#endif
 		INIT_TASK_DATA(PAGE_SIZE)
 		CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
 		READ_MOSTLY_DATA(SMP_CACHE_BYTES)
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 1341437..351da0a 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -154,36 +154,49 @@ static void *cpu_data;
 void * __cpuinit
 per_cpu_init (void)
 {
-	int cpu;
-	static int first_time=1;
+	static bool first_time = true;
+	void *cpu0_data = __cpu0_per_cpu;
+	unsigned int cpu;
+
+	if (!first_time)
+		goto skip;
+	first_time = false;

 	/*
 	 * get_free_pages() cannot be used before cpu_init() done.  BSP
 	 * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls
 	 * get_zeroed_page().
 	 */
-	if (first_time) {
-		void *cpu0_data = __cpu0_per_cpu;
+	for (cpu = 0; cpu < NR_CPUS; cpu++) {
+		void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start;

-		first_time=0;
+		memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start);
+		__per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start;
+		per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];

-		__per_cpu_offset[0] = (char *) cpu0_data - __per_cpu_start;
-		per_cpu(local_per_cpu_offset, 0) = __per_cpu_offset[0];
+		/*
+		 * percpu area for cpu0 is moved from the __init area
+		 * which is setup by head.S and used till this point.
+		 * Update ar.k3.  This move is ensures that percpu
+		 * area for cpu0 is on the correct node and its
+		 * virtual address isn't insanely far from other
+		 * percpu areas which is important for congruent
+		 * percpu allocator.
+		 */
+		if (cpu == 0)
+			ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data) -
+				    (unsigned long)__per_cpu_start);

-		for (cpu = 1; cpu < NR_CPUS; cpu++) {
-			memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
-			__per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
-			cpu_data += PERCPU_PAGE_SIZE;
-			per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
-		}
+		cpu_data += PERCPU_PAGE_SIZE;
 	}
+skip:
 	return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
 }

 static inline void
 alloc_per_cpu_data(void)
 {
-	cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS-1,
+	cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
 				   PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
 }
 #else
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 9f24b3c..200282b 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -143,17 +143,30 @@ static void *per_cpu_node_setup(void *cpu_data, int node)
 	int cpu;

 	for_each_possible_early_cpu(cpu) {
-		if (cpu == 0) {
-			void *cpu0_data = __cpu0_per_cpu;
-			__per_cpu_offset[cpu] = (char*)cpu0_data -
-				__per_cpu_start;
-		} else if (node == node_cpuid[cpu].nid) {
-			memcpy(__va(cpu_data), __phys_per_cpu_start,
-			       __per_cpu_end - __per_cpu_start);
-			__per_cpu_offset[cpu] = (char*)__va(cpu_data) -
-				__per_cpu_start;
-			cpu_data += PERCPU_PAGE_SIZE;
-		}
+		void *src = cpu == 0 ? __cpu0_per_cpu : __phys_per_cpu_start;
+
+		if (node != node_cpuid[cpu].nid)
+			continue;
+
+		memcpy(__va(cpu_data), src, __per_cpu_end - __per_cpu_start);
+		__per_cpu_offset[cpu] = (char *)__va(cpu_data) -
+			__per_cpu_start;
+
+		/*
+		 * percpu area for cpu0 is moved from the __init area
+		 * which is setup by head.S and used till this point.
+		 * Update ar.k3.  This move is ensures that percpu
+		 * area for cpu0 is on the correct node and its
+		 * virtual address isn't insanely far from other
+		 * percpu areas which is important for congruent
+		 * percpu allocator.
+		 */
+		if (cpu == 0)
+			ia64_set_kr(IA64_KR_PER_CPU_DATA,
+				    (unsigned long)cpu_data -
+				    (unsigned long)__per_cpu_start);
+
+		cpu_data += PERCPU_PAGE_SIZE;
 	}
 #endif
 	return cpu_data;
-- 
1.6.4.2


WARNING: multiple messages have this Message-ID (diff)
From: Tejun Heo <teheo@suse.de>
To: Nick Piggin <npiggin@suse.de>, Tony Luck <tony.luck@intel.com>,
	Fenghua Yu <fenghua.yu@intel.com>,
	linux-ia64 <linux-ia64@vger.kernel.org>,
	Ingo Molnar <mingo@redhat.com>,
	Rusty Russell <rusty@rustcorp.com.au>,
	Christoph Lameter <cl@linux-foundation.org>,
	linux-kernel@vger.kernel.org
Subject: [PATCH REPOST 3/5] ia64: allocate percpu area for cpu0 like percpu
Date: Wed, 30 Sep 2009 01:24:14 +0000	[thread overview]
Message-ID: <4AC2B33E.6000902@suse.de> (raw)
In-Reply-To: <1253682382-24740-1-git-send-email-tj@kernel.org>

cpu0 used special percpu area reserved by the linker, __cpu0_per_cpu,
which is set up early in boot by head.S.  However, this doesn't
guarantee that the area will be on the same node as cpu0 and the
percpu area for cpu0 ends up very far away from percpu areas for other
cpus which cause problems for congruent percpu allocator.

This patch makes percpu area initialization allocate percpu area for
cpu0 like any other cpus and copy it from __cpu0_per_cpu which now
resides in the __init area.  This means that for cpu0, percpu area is
first setup at __cpu0_per_cpu early by head.S and then moved to an
area in the linear mapping during memory initialization and it's not
allowed to take a pointer to percpu variables between head.S and
memory initialization.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: linux-ia64 <linux-ia64@vger.kernel.org>
---
Tony Luck didn't receive this one and couldn't find it in archives
either.  Repost.  Unchanged from the original posting.

 arch/ia64/kernel/vmlinux.lds.S |   11 +++++----
 arch/ia64/mm/contig.c          |   47 +++++++++++++++++++++++++--------------
 arch/ia64/mm/discontig.c       |   35 ++++++++++++++++++++---------
 3 files changed, 60 insertions(+), 33 deletions(-)

diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 0a0c77b..1295ba3 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -166,6 +166,12 @@ SECTIONS
 	}
 #endif

+#ifdef	CONFIG_SMP
+  . = ALIGN(PERCPU_PAGE_SIZE);
+  __cpu0_per_cpu = .;
+  . = . + PERCPU_PAGE_SIZE;	/* cpu0 per-cpu space */
+#endif
+
   . = ALIGN(PAGE_SIZE);
   __init_end = .;

@@ -198,11 +204,6 @@ SECTIONS
   data : { } :data
   .data : AT(ADDR(.data) - LOAD_OFFSET)
 	{
-#ifdef	CONFIG_SMP
-  . = ALIGN(PERCPU_PAGE_SIZE);
-		__cpu0_per_cpu = .;
-  . = . + PERCPU_PAGE_SIZE;	/* cpu0 per-cpu space */
-#endif
 		INIT_TASK_DATA(PAGE_SIZE)
 		CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
 		READ_MOSTLY_DATA(SMP_CACHE_BYTES)
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 1341437..351da0a 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -154,36 +154,49 @@ static void *cpu_data;
 void * __cpuinit
 per_cpu_init (void)
 {
-	int cpu;
-	static int first_time=1;
+	static bool first_time = true;
+	void *cpu0_data = __cpu0_per_cpu;
+	unsigned int cpu;
+
+	if (!first_time)
+		goto skip;
+	first_time = false;

 	/*
 	 * get_free_pages() cannot be used before cpu_init() done.  BSP
 	 * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls
 	 * get_zeroed_page().
 	 */
-	if (first_time) {
-		void *cpu0_data = __cpu0_per_cpu;
+	for (cpu = 0; cpu < NR_CPUS; cpu++) {
+		void *src = cpu = 0 ? cpu0_data : __phys_per_cpu_start;

-		first_time=0;
+		memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start);
+		__per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start;
+		per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];

-		__per_cpu_offset[0] = (char *) cpu0_data - __per_cpu_start;
-		per_cpu(local_per_cpu_offset, 0) = __per_cpu_offset[0];
+		/*
+		 * percpu area for cpu0 is moved from the __init area
+		 * which is setup by head.S and used till this point.
+		 * Update ar.k3.  This move is ensures that percpu
+		 * area for cpu0 is on the correct node and its
+		 * virtual address isn't insanely far from other
+		 * percpu areas which is important for congruent
+		 * percpu allocator.
+		 */
+		if (cpu = 0)
+			ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data) -
+				    (unsigned long)__per_cpu_start);

-		for (cpu = 1; cpu < NR_CPUS; cpu++) {
-			memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
-			__per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
-			cpu_data += PERCPU_PAGE_SIZE;
-			per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
-		}
+		cpu_data += PERCPU_PAGE_SIZE;
 	}
+skip:
 	return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
 }

 static inline void
 alloc_per_cpu_data(void)
 {
-	cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS-1,
+	cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
 				   PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
 }
 #else
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 9f24b3c..200282b 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -143,17 +143,30 @@ static void *per_cpu_node_setup(void *cpu_data, int node)
 	int cpu;

 	for_each_possible_early_cpu(cpu) {
-		if (cpu = 0) {
-			void *cpu0_data = __cpu0_per_cpu;
-			__per_cpu_offset[cpu] = (char*)cpu0_data -
-				__per_cpu_start;
-		} else if (node = node_cpuid[cpu].nid) {
-			memcpy(__va(cpu_data), __phys_per_cpu_start,
-			       __per_cpu_end - __per_cpu_start);
-			__per_cpu_offset[cpu] = (char*)__va(cpu_data) -
-				__per_cpu_start;
-			cpu_data += PERCPU_PAGE_SIZE;
-		}
+		void *src = cpu = 0 ? __cpu0_per_cpu : __phys_per_cpu_start;
+
+		if (node != node_cpuid[cpu].nid)
+			continue;
+
+		memcpy(__va(cpu_data), src, __per_cpu_end - __per_cpu_start);
+		__per_cpu_offset[cpu] = (char *)__va(cpu_data) -
+			__per_cpu_start;
+
+		/*
+		 * percpu area for cpu0 is moved from the __init area
+		 * which is setup by head.S and used till this point.
+		 * Update ar.k3.  This move is ensures that percpu
+		 * area for cpu0 is on the correct node and its
+		 * virtual address isn't insanely far from other
+		 * percpu areas which is important for congruent
+		 * percpu allocator.
+		 */
+		if (cpu = 0)
+			ia64_set_kr(IA64_KR_PER_CPU_DATA,
+				    (unsigned long)cpu_data -
+				    (unsigned long)__per_cpu_start);
+
+		cpu_data += PERCPU_PAGE_SIZE;
 	}
 #endif
 	return cpu_data;
-- 
1.6.4.2


  parent reply	other threads:[~2009-09-30  1:24 UTC|newest]

Thread overview: 72+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2009-09-23  5:06 [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu and drop the old one, take#2 Tejun Heo
2009-09-23  5:06 ` Tejun Heo
2009-09-23  5:06 ` [PATCH 1/5] ia64: don't alias VMALLOC_END to vmalloc_end Tejun Heo
2009-09-23  5:06   ` Tejun Heo
2009-09-23  5:06 ` [PATCH 2/5] ia64: initialize cpu maps early Tejun Heo
2009-09-23  5:06   ` Tejun Heo
2009-09-23  5:06 ` [PATCH 3/5] ia64: allocate percpu area for cpu0 like percpu areas for other cpus Tejun Heo
2009-09-23  5:06   ` Tejun Heo
2009-09-23  5:06 ` [PATCH 4/5] ia64: convert to dynamic percpu allocator Tejun Heo
2009-09-23  5:06   ` Tejun Heo
2009-09-23  5:06 ` [PATCH 5/5] percpu: kill legacy " Tejun Heo
2009-09-23  5:06   ` Tejun Heo
2009-09-23 11:10   ` Rusty Russell
2009-09-23 11:22     ` Rusty Russell
2009-09-29  0:25 ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu and drop the old one, take#2 Tejun Heo
2009-09-29  0:25   ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu Tejun Heo
2009-09-30 20:32   ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu and drop the old one, take#2 Luck, Tony
2009-09-30 20:32     ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic Luck, Tony
2009-09-30 20:47     ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu and drop the old one, take#2 Christoph Lameter
2009-09-30 20:47       ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu Christoph Lameter
2009-09-30 22:05       ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu and drop the old one, take#2 Luck, Tony
2009-09-30 22:05         ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic Luck, Tony
2009-09-30 23:06         ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu and drop the old one, take#2 Peter Chubb
2009-09-30 23:06           ` Peter Chubb
2009-09-30 23:49           ` Christoph Lameter
2009-09-30 23:49             ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu Christoph Lameter
2009-09-30  1:24 ` Tejun Heo [this message]
2009-09-30  1:24   ` [PATCH REPOST 3/5] ia64: allocate percpu area for cpu0 like percpu Tejun Heo
2009-10-02  5:11 ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu and drop the old one, take#2 Tejun Heo
2009-10-02  5:11   ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu Tejun Heo
  -- strict thread matches above, loose matches on Subject: below --
2009-09-22  7:40 [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu and drop the old one Tejun Heo
2009-09-22  7:40 ` Tejun Heo
2009-09-22  7:40 ` [PATCH 1/4] vmalloc: rename local variables vmalloc_start and vmalloc_end Tejun Heo
2009-09-22  7:40   ` Tejun Heo
2009-09-22 22:52   ` Christoph Lameter
2009-09-22 22:52     ` [PATCH 1/4] vmalloc: rename local variables vmalloc_start and Christoph Lameter
2009-09-23  2:08     ` [PATCH 1/4] vmalloc: rename local variables vmalloc_start and vmalloc_end Tejun Heo
2009-09-23  2:08       ` [PATCH 1/4] vmalloc: rename local variables vmalloc_start and Tejun Heo
2009-09-22  7:40 ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu areas for other cpus Tejun Heo
2009-09-22  7:40   ` Tejun Heo
2009-09-22 22:59   ` Christoph Lameter
2009-09-22 22:59     ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu Christoph Lameter
2009-09-23  2:11     ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu areas for other cpus Tejun Heo
2009-09-23  2:11       ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu areas Tejun Heo
2009-09-23 13:44       ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu areas for other cpus Christoph Lameter
2009-09-23 13:44         ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu Christoph Lameter
2009-09-23 14:01         ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu areas for other cpus Tejun Heo
2009-09-23 14:01           ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu areas Tejun Heo
2009-09-23 17:17           ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu areas for other cpus Christoph Lameter
2009-09-23 17:17             ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu Christoph Lameter
2009-09-23 22:03             ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu areas for other cpus Tejun Heo
2009-09-23 22:03               ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu areas Tejun Heo
2009-09-24  7:36               ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu areas for other cpus Christoph Lameter
2009-09-24  7:36                 ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu Christoph Lameter
2009-09-24  8:37                 ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu areas for other cpus Tejun Heo
2009-09-24  8:37                   ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu areas Tejun Heo
2009-09-28 15:12                   ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu areas for other cpus Christoph Lameter
2009-09-28 15:12                     ` [PATCH 2/4] ia64: allocate percpu area for cpu0 like percpu Christoph Lameter
2009-09-22  7:40 ` [PATCH 3/4] ia64: convert to dynamic percpu allocator Tejun Heo
2009-09-22  7:40   ` Tejun Heo
2009-09-22  7:40 ` [PATCH 4/4] percpu: kill legacy " Tejun Heo
2009-09-22  7:40   ` Tejun Heo
2009-09-22  8:16 ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu and drop the old one Ingo Molnar
2009-09-22  8:16   ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic Ingo Molnar
2009-09-22 20:49   ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu and drop the old one Luck, Tony
2009-09-22 20:49     ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic Luck, Tony
2009-09-22 21:10     ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu and drop the old one Luck, Tony
2009-09-22 21:10       ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic Luck, Tony
2009-09-22 21:24       ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu and drop the old one Luck, Tony
2009-09-22 21:24         ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic Luck, Tony
2009-09-22 21:50         ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu and drop the old one Tejun Heo
2009-09-22 21:50           ` [PATCHSET percpu#for-next] percpu: convert ia64 to dynamic percpu Tejun Heo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=4AC2B33E.6000902@suse.de \
    --to=teheo@suse.de \
    --cc=cl@linux-foundation.org \
    --cc=fenghua.yu@intel.com \
    --cc=linux-ia64@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=npiggin@suse.de \
    --cc=rusty@rustcorp.com.au \
    --cc=tony.luck@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.