linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/9 - v2] Integrate system.h
@ 2007-12-05 15:08 Glauber de Oliveira Costa
  2007-12-05 15:08 ` [PATCH 1/9] remove volatile keyword from clflush Glauber de Oliveira Costa
  2007-12-05 20:22 ` [PATCH 0/9 - v2] Integrate system.h Ingo Molnar
  0 siblings, 2 replies; 12+ messages in thread
From: Glauber de Oliveira Costa @ 2007-12-05 15:08 UTC (permalink / raw)
  To: linux-kernel
  Cc: akpm, glommer, tglx, mingo, ehabkost, jeremy, avi, anthony,
	virtualization, rusty, ak, chrisw, rostedt, hpa

Hi,

At Ingo's request, here it goes a new patchset, that actually
applies ontop of the x86 tree (mm branch). Besides this issue, 
I've also included a patch that remove the cr8 references, as Andi
suggested.



^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 1/9] remove volatile keyword from clflush.
  2007-12-05 15:08 [PATCH 0/9 - v2] Integrate system.h Glauber de Oliveira Costa
@ 2007-12-05 15:08 ` Glauber de Oliveira Costa
  2007-12-05 15:08   ` [PATCH 2/9] put together equal pieces of system.h Glauber de Oliveira Costa
  2007-12-06 21:11   ` [PATCH 1/9] remove volatile keyword from clflush Jeremy Fitzhardinge
  2007-12-05 20:22 ` [PATCH 0/9 - v2] Integrate system.h Ingo Molnar
  1 sibling, 2 replies; 12+ messages in thread
From: Glauber de Oliveira Costa @ 2007-12-05 15:08 UTC (permalink / raw)
  To: linux-kernel
  Cc: akpm, glommer, tglx, mingo, ehabkost, jeremy, avi, anthony,
	virtualization, rusty, ak, chrisw, rostedt, hpa,
	Glauber de Oliveira Costa

the p parameter is an explicit memory reference, and is
enough to prevent gcc to being nasty here. The volatile
seems completely not needed.

Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>
---
 include/asm-x86/system_32.h |    2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/include/asm-x86/system_32.h b/include/asm-x86/system_32.h
index ef84688..27e106d 100644
--- a/include/asm-x86/system_32.h
+++ b/include/asm-x86/system_32.h
@@ -161,7 +161,7 @@ static inline void native_wbinvd(void)
 	asm volatile("wbinvd": : :"memory");
 }
 
-static inline void clflush(volatile void *__p)
+static inline void clflush(void *__p)
 {
 	asm volatile("clflush %0" : "+m" (*(char __force *)__p));
 }
-- 
1.4.4.2


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 2/9] put together equal pieces of system.h
  2007-12-05 15:08 ` [PATCH 1/9] remove volatile keyword from clflush Glauber de Oliveira Costa
@ 2007-12-05 15:08   ` Glauber de Oliveira Costa
  2007-12-05 15:08     ` [PATCH 3/9] unify load_segment macro Glauber de Oliveira Costa
  2007-12-06 21:11   ` [PATCH 1/9] remove volatile keyword from clflush Jeremy Fitzhardinge
  1 sibling, 1 reply; 12+ messages in thread
From: Glauber de Oliveira Costa @ 2007-12-05 15:08 UTC (permalink / raw)
  To: linux-kernel
  Cc: akpm, glommer, tglx, mingo, ehabkost, jeremy, avi, anthony,
	virtualization, rusty, ak, chrisw, rostedt, hpa,
	Glauber de Oliveira Costa

This patch puts together pieces of system_{32,64}.h that
looks like the same. It's the first step towards integration
of this file.

Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>
---
 arch/x86/kernel/process_64.c |    2 +-
 include/asm-x86/system.h     |   70 ++++++++++++++++++++++++++++++++++++++++++
 include/asm-x86/system_32.h  |   59 -----------------------------------
 include/asm-x86/system_64.h  |   12 -------
 4 files changed, 71 insertions(+), 72 deletions(-)

Index: linux-2.6-x86/arch/x86/kernel/process_64.c
===================================================================
--- linux-2.6-x86.orig/arch/x86/kernel/process_64.c
+++ linux-2.6-x86/arch/x86/kernel/process_64.c
@@ -99,7 +99,7 @@ void exit_idle(void)
  * We use this if we don't have any better
  * idle routine..
  */
-static void default_idle(void)
+void default_idle(void)
 {
 	current_thread_info()->status &= ~TS_POLLING;
 	/*
Index: linux-2.6-x86/include/asm-x86/system.h
===================================================================
--- linux-2.6-x86.orig/include/asm-x86/system.h
+++ linux-2.6-x86/include/asm-x86/system.h
@@ -1,5 +1,74 @@
+#ifndef _ASM_X86_SYSTEM_H_
+#define _ASM_X86_SYSTEM_H_
+
+#include <asm/asm.h>
+
 #ifdef CONFIG_X86_32
 # include "system_32.h"
 #else
 # include "system_64.h"
 #endif
+
+#ifdef __KERNEL__
+#define _set_base(addr, base) do { unsigned long __pr; \
+__asm__ __volatile__ ("movw %%dx,%1\n\t" \
+	"rorl $16,%%edx\n\t" \
+	"movb %%dl,%2\n\t" \
+	"movb %%dh,%3" \
+	:"=&d" (__pr) \
+	:"m" (*((addr)+2)), \
+	 "m" (*((addr)+4)), \
+	 "m" (*((addr)+7)), \
+	 "0" (base) \
+	); } while (0)
+
+#define _set_limit(addr, limit) do { unsigned long __lr; \
+__asm__ __volatile__ ("movw %%dx,%1\n\t" \
+	"rorl $16,%%edx\n\t" \
+	"movb %2,%%dh\n\t" \
+	"andb $0xf0,%%dh\n\t" \
+	"orb %%dh,%%dl\n\t" \
+	"movb %%dl,%2" \
+	:"=&d" (__lr) \
+	:"m" (*(addr)), \
+	 "m" (*((addr)+6)), \
+	 "0" (limit) \
+	); } while (0)
+
+#define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base))
+#define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1))
+
+/*
+ * Save a segment register away
+ */
+#define savesegment(seg, value) \
+	asm volatile("mov %%" #seg ",%0":"=rm" (value))
+
+static inline unsigned long get_limit(unsigned long segment)
+{
+	unsigned long __limit;
+	__asm__("lsll %1,%0"
+		:"=r" (__limit):"r" (segment));
+	return __limit+1;
+}
+#endif /* __KERNEL__ */
+
+static inline void clflush(void *__p)
+{
+	asm volatile("clflush %0" : "+m" (*(char __force *)__p));
+}
+
+#define nop() __asm__ __volatile__ ("nop")
+
+void disable_hlt(void);
+void enable_hlt(void);
+
+extern int es7000_plat;
+void cpu_idle_wait(void);
+
+extern unsigned long arch_align_stack(unsigned long sp);
+extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
+
+void default_idle(void);
+
+#endif
Index: linux-2.6-x86/include/asm-x86/system_32.h
===================================================================
--- linux-2.6-x86.orig/include/asm-x86/system_32.h
+++ linux-2.6-x86/include/asm-x86/system_32.h
@@ -34,34 +34,6 @@ extern struct task_struct * FASTCALL(__s
 		      "2" (prev), "d" (next));				\
 } while (0)
 
-#define _set_base(addr,base) do { unsigned long __pr; \
-__asm__ __volatile__ ("movw %%dx,%1\n\t" \
-	"rorl $16,%%edx\n\t" \
-	"movb %%dl,%2\n\t" \
-	"movb %%dh,%3" \
-	:"=&d" (__pr) \
-	:"m" (*((addr)+2)), \
-	 "m" (*((addr)+4)), \
-	 "m" (*((addr)+7)), \
-         "0" (base) \
-        ); } while(0)
-
-#define _set_limit(addr,limit) do { unsigned long __lr; \
-__asm__ __volatile__ ("movw %%dx,%1\n\t" \
-	"rorl $16,%%edx\n\t" \
-	"movb %2,%%dh\n\t" \
-	"andb $0xf0,%%dh\n\t" \
-	"orb %%dh,%%dl\n\t" \
-	"movb %%dl,%2" \
-	:"=&d" (__lr) \
-	:"m" (*(addr)), \
-	 "m" (*((addr)+6)), \
-	 "0" (limit) \
-        ); } while(0)
-
-#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
-#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) )
-
 /*
  * Load a segment. Fall back on loading the zero
  * segment if something goes wrong..
@@ -83,12 +55,6 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t"
 		".previous"			\
 		: :"rm" (value))
 
-/*
- * Save a segment register away
- */
-#define savesegment(seg, value) \
-	asm volatile("mov %%" #seg ",%0":"=rm" (value))
-
 
 static inline void native_clts(void)
 {
@@ -161,11 +127,6 @@ static inline void native_wbinvd(void)
 	asm volatile("wbinvd": : :"memory");
 }
 
-static inline void clflush(void *__p)
-{
-	asm volatile("clflush %0" : "+m" (*(char __force *)__p));
-}
-
 #ifdef CONFIG_PARAVIRT
 #include <asm/paravirt.h>
 #else
@@ -190,15 +151,6 @@ static inline void clflush(void *__p)
 
 #endif	/* __KERNEL__ */
 
-static inline unsigned long get_limit(unsigned long segment)
-{
-	unsigned long __limit;
-	__asm__("lsll %1,%0"
-		:"=r" (__limit):"r" (segment));
-	return __limit+1;
-}
-
-#define nop() __asm__ __volatile__ ("nop")
 
 /*
  * Force strict CPU ordering.
@@ -305,15 +257,5 @@ static inline unsigned long get_limit(un
  * disable hlt during certain critical i/o operations
  */
 #define HAVE_DISABLE_HLT
-void disable_hlt(void);
-void enable_hlt(void);
-
-extern int es7000_plat;
-void cpu_idle_wait(void);
-
-extern unsigned long arch_align_stack(unsigned long sp);
-extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
-
-void default_idle(void);
 
 #endif
Index: linux-2.6-x86/include/asm-x86/system_64.h
===================================================================
--- linux-2.6-x86.orig/include/asm-x86/system_64.h
+++ linux-2.6-x86/include/asm-x86/system_64.h
@@ -134,13 +134,6 @@ static inline void write_cr8(unsigned lo
 
 #endif	/* __KERNEL__ */
 
-static inline void clflush(volatile void *__p)
-{
-	asm volatile("clflush %0" : "+m" (*(char __force *)__p));
-}
-
-#define nop() __asm__ __volatile__ ("nop")
-
 #ifdef CONFIG_SMP
 #define smp_mb()	mb()
 #define smp_rmb()	barrier()
@@ -170,9 +163,4 @@ static inline void clflush(volatile void
 
 #include <linux/irqflags.h>
 
-void cpu_idle_wait(void);
-
-extern unsigned long arch_align_stack(unsigned long sp);
-extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
-
 #endif

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 3/9] unify load_segment macro
  2007-12-05 15:08   ` [PATCH 2/9] put together equal pieces of system.h Glauber de Oliveira Costa
@ 2007-12-05 15:08     ` Glauber de Oliveira Costa
  2007-12-05 15:08       ` [PATCH 4/9] remove references to cr8 register Glauber de Oliveira Costa
  0 siblings, 1 reply; 12+ messages in thread
From: Glauber de Oliveira Costa @ 2007-12-05 15:08 UTC (permalink / raw)
  To: linux-kernel
  Cc: akpm, glommer, tglx, mingo, ehabkost, jeremy, avi, anthony,
	virtualization, rusty, ak, chrisw, rostedt, hpa,
	Glauber de Oliveira Costa

This patch unifies the load_segment() macro, making them equal in both
x86_64 and i386 architectures. The common version goes to system.h,
and the old are deleted.

Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>
---
 include/asm-x86/system.h    |   21 +++++++++++++++++++++
 include/asm-x86/system_32.h |   22 ----------------------
 include/asm-x86/system_64.h |   20 --------------------
 3 files changed, 21 insertions(+), 42 deletions(-)

diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h
index 6e9491d..1ac6088 100644
--- a/include/asm-x86/system.h
+++ b/include/asm-x86/system.h
@@ -39,6 +39,27 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
 #define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1))
 
 /*
+ * Load a segment. Fall back on loading the zero
+ * segment if something goes wrong..
+ */
+#define loadsegment(seg, value)			\
+	asm volatile("\n"			\
+		"1:\t"				\
+		"movl %k0,%%" #seg "\n"		\
+		"2:\n"				\
+		".section .fixup,\"ax\"\n"	\
+		"3:\t"				\
+		"movl %k1, %%" #seg "\n\t"	\
+		"jmp 2b\n"			\
+		".previous\n"			\
+		".section __ex_table,\"a\"\n\t"	\
+		_ASM_ALIGN "\n\t"		\
+		_ASM_PTR " 1b,3b\n"		\
+		".previous"			\
+		: :"r" (value), "r" (0))
+
+
+/*
  * Save a segment register away
  */
 #define savesegment(seg, value) \
diff --git a/include/asm-x86/system_32.h b/include/asm-x86/system_32.h
index 717aeb9..a0641a3 100644
--- a/include/asm-x86/system_32.h
+++ b/include/asm-x86/system_32.h
@@ -34,28 +34,6 @@ extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struc
 		      "2" (prev), "d" (next));				\
 } while (0)
 
-/*
- * Load a segment. Fall back on loading the zero
- * segment if something goes wrong..
- */
-#define loadsegment(seg,value)			\
-	asm volatile("\n"			\
-		"1:\t"				\
-		"mov %0,%%" #seg "\n"		\
-		"2:\n"				\
-		".section .fixup,\"ax\"\n"	\
-		"3:\t"				\
-		"pushl $0\n\t"			\
-		"popl %%" #seg "\n\t"		\
-		"jmp 2b\n"			\
-		".previous\n"			\
-		".section __ex_table,\"a\"\n\t"	\
-		".align 4\n\t"			\
-		".long 1b,3b\n"			\
-		".previous"			\
-		: :"rm" (value))
-
-
 static inline void native_clts(void)
 {
 	asm volatile ("clts");
diff --git a/include/asm-x86/system_64.h b/include/asm-x86/system_64.h
index f340060..da46059 100644
--- a/include/asm-x86/system_64.h
+++ b/include/asm-x86/system_64.h
@@ -43,26 +43,6 @@
 extern void load_gs_index(unsigned); 
 
 /*
- * Load a segment. Fall back on loading the zero
- * segment if something goes wrong..
- */
-#define loadsegment(seg,value)	\
-	asm volatile("\n"			\
-		"1:\t"				\
-		"movl %k0,%%" #seg "\n"		\
-		"2:\n"				\
-		".section .fixup,\"ax\"\n"	\
-		"3:\t"				\
-		"movl %1,%%" #seg "\n\t" 	\
-		"jmp 2b\n"			\
-		".previous\n"			\
-		".section __ex_table,\"a\"\n\t"	\
-		".align 8\n\t"			\
-		".quad 1b,3b\n"			\
-		".previous"			\
-		: :"r" (value), "r" (0))
-
-/*
  * Clear and set 'TS' bit respectively
  */
 #define clts() __asm__ __volatile__ ("clts")
-- 
1.4.4.2


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 4/9] remove references to cr8 register
  2007-12-05 15:08     ` [PATCH 3/9] unify load_segment macro Glauber de Oliveira Costa
@ 2007-12-05 15:08       ` Glauber de Oliveira Costa
  2007-12-05 15:08         ` [PATCH 5/9] unify paravirt parts of system.h Glauber de Oliveira Costa
  0 siblings, 1 reply; 12+ messages in thread
From: Glauber de Oliveira Costa @ 2007-12-05 15:08 UTC (permalink / raw)
  To: linux-kernel
  Cc: akpm, glommer, tglx, mingo, ehabkost, jeremy, avi, anthony,
	virtualization, rusty, ak, chrisw, rostedt, hpa,
	Glauber de Oliveira Costa

As pointed out by Andi, linux never really uses this register
so saving and restoring is not really necessary. This patch
removes all references to it.

Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>
---
 arch/x86/kernel/asm-offsets_64.c |    1 -
 arch/x86/kernel/suspend_64.c     |    2 --
 include/asm-x86/suspend_64.h     |    2 +-
 include/asm-x86/system_64.h      |   12 ------------
 4 files changed, 1 insertions(+), 16 deletions(-)

Index: linux-2.6-x86/arch/x86/kernel/asm-offsets_64.c
===================================================================
--- linux-2.6-x86.orig/arch/x86/kernel/asm-offsets_64.c
+++ linux-2.6-x86/arch/x86/kernel/asm-offsets_64.c
@@ -107,7 +107,6 @@ int main(void)
 	ENTRY(cr2);
 	ENTRY(cr3);
 	ENTRY(cr4);
-	ENTRY(cr8);
 	BLANK();
 #undef ENTRY
 	DEFINE(TSS_ist, offsetof(struct tss_struct, ist));
Index: linux-2.6-x86/arch/x86/kernel/suspend_64.c
===================================================================
--- linux-2.6-x86.orig/arch/x86/kernel/suspend_64.c
+++ linux-2.6-x86/arch/x86/kernel/suspend_64.c
@@ -53,7 +53,6 @@ void __save_processor_state(struct saved
 	ctxt->cr2 = read_cr2();
 	ctxt->cr3 = read_cr3();
 	ctxt->cr4 = read_cr4();
-	ctxt->cr8 = read_cr8();
 }
 
 void save_processor_state(void)
@@ -75,7 +74,6 @@ void __restore_processor_state(struct sa
 	 * control registers
 	 */
 	wrmsrl(MSR_EFER, ctxt->efer);
-	write_cr8(ctxt->cr8);
 	write_cr4(ctxt->cr4);
 	write_cr3(ctxt->cr3);
 	write_cr2(ctxt->cr2);
Index: linux-2.6-x86/include/asm-x86/suspend_64.h
===================================================================
--- linux-2.6-x86.orig/include/asm-x86/suspend_64.h
+++ linux-2.6-x86/include/asm-x86/suspend_64.h
@@ -20,7 +20,7 @@ struct saved_context {
 	struct pt_regs regs;
   	u16 ds, es, fs, gs, ss;
 	unsigned long gs_base, gs_kernel_base, fs_base;
-	unsigned long cr0, cr2, cr3, cr4, cr8;
+	unsigned long cr0, cr2, cr3, cr4;
 	unsigned long efer;
 	u16 gdt_pad;
 	u16 gdt_limit;
Index: linux-2.6-x86/include/asm-x86/system_64.h
===================================================================
--- linux-2.6-x86.orig/include/asm-x86/system_64.h
+++ linux-2.6-x86/include/asm-x86/system_64.h
@@ -95,18 +95,6 @@ static inline void write_cr4(unsigned lo
 	asm volatile("movq %0,%%cr4" :: "r" (val) : "memory");
 }
 
-static inline unsigned long read_cr8(void)
-{
-	unsigned long cr8;
-	asm volatile("movq %%cr8,%0" : "=r" (cr8));
-	return cr8;
-}
-
-static inline void write_cr8(unsigned long val)
-{
-	asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
-}
-
 #define stts() write_cr0(8 | read_cr0())
 
 #define wbinvd() \

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 5/9] unify paravirt parts of system.h
  2007-12-05 15:08       ` [PATCH 4/9] remove references to cr8 register Glauber de Oliveira Costa
@ 2007-12-05 15:08         ` Glauber de Oliveira Costa
  2007-12-05 15:08           ` [PATCH 6/9] remove unused macro Glauber de Oliveira Costa
  0 siblings, 1 reply; 12+ messages in thread
From: Glauber de Oliveira Costa @ 2007-12-05 15:08 UTC (permalink / raw)
  To: linux-kernel
  Cc: akpm, glommer, tglx, mingo, ehabkost, jeremy, avi, anthony,
	virtualization, rusty, ak, chrisw, rostedt, hpa,
	Glauber de Oliveira Costa

This patch moves the i386 control registers manipulation functions,
wbinvd, and clts functions to system.h. They are essentially the same
as in x86_64.

With this, system.h paravirt comes for free in x86_64.

Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>
---
 include/asm-x86/system.h    |  110 +++++++++++++++++++++++++++++++++++++++++++
 include/asm-x86/system_32.h |   94 ------------------------------------
 include/asm-x86/system_64.h |   61 ------------------------
 3 files changed, 110 insertions(+), 155 deletions(-)

diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h
index 1ac6088..518b7bd 100644
--- a/include/asm-x86/system.h
+++ b/include/asm-x86/system.h
@@ -3,6 +3,8 @@
 
 #include <asm/asm.h>
 
+#include <linux/kernel.h>
+
 #ifdef CONFIG_X86_32
 # include "system_32.h"
 #else
@@ -38,6 +40,8 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
 #define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base))
 #define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1))
 
+extern void load_gs_index(unsigned);
+
 /*
  * Load a segment. Fall back on loading the zero
  * segment if something goes wrong..
@@ -72,6 +76,112 @@ static inline unsigned long get_limit(unsigned long segment)
 		:"=r" (__limit):"r" (segment));
 	return __limit+1;
 }
+
+static inline void native_clts(void)
+{
+	asm volatile ("clts");
+}
+
+/*
+ * Volatile isn't enough to prevent the compiler from reordering the
+ * read/write functions for the control registers and messing everything up.
+ * A memory clobber would solve the problem, but would prevent reordering of
+ * all loads stores around it, which can hurt performance. Solution is to
+ * use a variable and mimic reads and writes to it to enforce serialization
+ */
+static unsigned long __force_order;
+
+static inline unsigned long native_read_cr0(void)
+{
+	unsigned long val;
+	asm volatile("mov %%cr0,%0\n\t" :"=r" (val), "=m" (__force_order));
+	return val;
+}
+
+static inline void native_write_cr0(unsigned long val)
+{
+	asm volatile("mov %0,%%cr0": :"r" (val), "m" (__force_order));
+}
+
+static inline unsigned long native_read_cr2(void)
+{
+	unsigned long val;
+	asm volatile("mov %%cr2,%0\n\t" :"=r" (val), "=m" (__force_order));
+	return val;
+}
+
+static inline void native_write_cr2(unsigned long val)
+{
+	asm volatile("mov %0,%%cr2": :"r" (val), "m" (__force_order));
+}
+
+static inline unsigned long native_read_cr3(void)
+{
+	unsigned long val;
+	asm volatile("mov %%cr3,%0\n\t" :"=r" (val), "=m" (__force_order));
+	return val;
+}
+
+static inline void native_write_cr3(unsigned long val)
+{
+	asm volatile("mov %0,%%cr3": :"r" (val), "m" (__force_order));
+}
+
+static inline unsigned long native_read_cr4(void)
+{
+	unsigned long val;
+	asm volatile("mov %%cr4,%0\n\t" :"=r" (val), "=m" (__force_order));
+	return val;
+}
+
+static inline unsigned long native_read_cr4_safe(void)
+{
+	unsigned long val;
+	/* This could fault if %cr4 does not exist. In x86_64, a cr4 always
+	 * exists, so it will never fail. */
+#ifdef CONFIG_X86_32
+	asm volatile("1: mov %%cr4, %0		\n"
+		"2:				\n"
+		".section __ex_table,\"a\"	\n"
+		".long 1b,2b			\n"
+		".previous			\n"
+		: "=r" (val), "=m" (__force_order) : "0" (0));
+#else
+	val = native_read_cr4();
+#endif
+	return val;
+}
+
+static inline void native_write_cr4(unsigned long val)
+{
+	asm volatile("mov %0,%%cr4": :"r" (val), "m" (__force_order));
+}
+
+static inline void native_wbinvd(void)
+{
+	asm volatile("wbinvd": : :"memory");
+}
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+#define read_cr0()	(native_read_cr0())
+#define write_cr0(x)	(native_write_cr0(x))
+#define read_cr2()	(native_read_cr2())
+#define write_cr2(x)	(native_write_cr2(x))
+#define read_cr3()	(native_read_cr3())
+#define write_cr3(x)	(native_write_cr3(x))
+#define read_cr4()	(native_read_cr4())
+#define read_cr4_safe()	(native_read_cr4_safe())
+#define write_cr4(x)	(native_write_cr4(x))
+#define wbinvd()	(native_wbinvd())
+
+/* Clear the 'TS' bit */
+#define clts()		(native_clts())
+
+#endif/* CONFIG_PARAVIRT */
+
+#define stts() write_cr0(8 | read_cr0())
+
 #endif /* __KERNEL__ */
 
 static inline void clflush(void *__p)
diff --git a/include/asm-x86/system_32.h b/include/asm-x86/system_32.h
index a0641a3..6c69567 100644
--- a/include/asm-x86/system_32.h
+++ b/include/asm-x86/system_32.h
@@ -1,7 +1,6 @@
 #ifndef __ASM_SYSTEM_H
 #define __ASM_SYSTEM_H
 
-#include <linux/kernel.h>
 #include <asm/segment.h>
 #include <asm/cpufeature.h>
 #include <asm/cmpxchg.h>
@@ -34,99 +33,6 @@ extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struc
 		      "2" (prev), "d" (next));				\
 } while (0)
 
-static inline void native_clts(void)
-{
-	asm volatile ("clts");
-}
-
-static inline unsigned long native_read_cr0(void)
-{
-	unsigned long val;
-	asm volatile("movl %%cr0,%0\n\t" :"=r" (val));
-	return val;
-}
-
-static inline void native_write_cr0(unsigned long val)
-{
-	asm volatile("movl %0,%%cr0": :"r" (val));
-}
-
-static inline unsigned long native_read_cr2(void)
-{
-	unsigned long val;
-	asm volatile("movl %%cr2,%0\n\t" :"=r" (val));
-	return val;
-}
-
-static inline void native_write_cr2(unsigned long val)
-{
-	asm volatile("movl %0,%%cr2": :"r" (val));
-}
-
-static inline unsigned long native_read_cr3(void)
-{
-	unsigned long val;
-	asm volatile("movl %%cr3,%0\n\t" :"=r" (val));
-	return val;
-}
-
-static inline void native_write_cr3(unsigned long val)
-{
-	asm volatile("movl %0,%%cr3": :"r" (val));
-}
-
-static inline unsigned long native_read_cr4(void)
-{
-	unsigned long val;
-	asm volatile("movl %%cr4,%0\n\t" :"=r" (val));
-	return val;
-}
-
-static inline unsigned long native_read_cr4_safe(void)
-{
-	unsigned long val;
-	/* This could fault if %cr4 does not exist */
-	asm volatile("1: movl %%cr4, %0		\n"
-		"2:				\n"
-		".section __ex_table,\"a\"	\n"
-		".long 1b,2b			\n"
-		".previous			\n"
-		: "=r" (val): "0" (0));
-	return val;
-}
-
-static inline void native_write_cr4(unsigned long val)
-{
-	asm volatile("movl %0,%%cr4": :"r" (val));
-}
-
-static inline void native_wbinvd(void)
-{
-	asm volatile("wbinvd": : :"memory");
-}
-
-#ifdef CONFIG_PARAVIRT
-#include <asm/paravirt.h>
-#else
-#define read_cr0()	(native_read_cr0())
-#define write_cr0(x)	(native_write_cr0(x))
-#define read_cr2()	(native_read_cr2())
-#define write_cr2(x)	(native_write_cr2(x))
-#define read_cr3()	(native_read_cr3())
-#define write_cr3(x)	(native_write_cr3(x))
-#define read_cr4()	(native_read_cr4())
-#define read_cr4_safe()	(native_read_cr4_safe())
-#define write_cr4(x)	(native_write_cr4(x))
-#define wbinvd()	(native_wbinvd())
-
-/* Clear the 'TS' bit */
-#define clts()		(native_clts())
-
-#endif/* CONFIG_PARAVIRT */
-
-/* Set the 'TS' bit */
-#define stts() write_cr0(8 | read_cr0())
-
 #endif	/* __KERNEL__ */
 
 
diff --git a/include/asm-x86/system_64.h b/include/asm-x86/system_64.h
index d7f0776..2757258 100644
--- a/include/asm-x86/system_64.h
+++ b/include/asm-x86/system_64.h
@@ -1,7 +1,6 @@
 #ifndef __ASM_SYSTEM_H
 #define __ASM_SYSTEM_H
 
-#include <linux/kernel.h>
 #include <asm/segment.h>
 #include <asm/cmpxchg.h>
 
@@ -40,66 +39,6 @@
 		       [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent))   \
 		     : "memory", "cc" __EXTRA_CLOBBER)
     
-extern void load_gs_index(unsigned); 
-
-/*
- * Clear and set 'TS' bit respectively
- */
-#define clts() __asm__ __volatile__ ("clts")
-
-static inline unsigned long read_cr0(void)
-{ 
-	unsigned long cr0;
-	asm volatile("movq %%cr0,%0" : "=r" (cr0));
-	return cr0;
-}
-
-static inline void write_cr0(unsigned long val) 
-{ 
-	asm volatile("movq %0,%%cr0" :: "r" (val));
-}
-
-static inline unsigned long read_cr2(void)
-{
-	unsigned long cr2;
-	asm volatile("movq %%cr2,%0" : "=r" (cr2));
-	return cr2;
-}
-
-static inline void write_cr2(unsigned long val)
-{
-	asm volatile("movq %0,%%cr2" :: "r" (val));
-}
-
-static inline unsigned long read_cr3(void)
-{ 
-	unsigned long cr3;
-	asm volatile("movq %%cr3,%0" : "=r" (cr3));
-	return cr3;
-}
-
-static inline void write_cr3(unsigned long val)
-{
-	asm volatile("movq %0,%%cr3" :: "r" (val) : "memory");
-}
-
-static inline unsigned long read_cr4(void)
-{ 
-	unsigned long cr4;
-	asm volatile("movq %%cr4,%0" : "=r" (cr4));
-	return cr4;
-}
-
-static inline void write_cr4(unsigned long val)
-{ 
-	asm volatile("movq %0,%%cr4" :: "r" (val) : "memory");
-}
-
-#define stts() write_cr0(8 | read_cr0())
-
-#define wbinvd() \
-	__asm__ __volatile__ ("wbinvd": : :"memory")
-
 #endif	/* __KERNEL__ */
 
 #ifdef CONFIG_SMP
-- 
1.4.4.2


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 6/9] remove unused macro
  2007-12-05 15:08         ` [PATCH 5/9] unify paravirt parts of system.h Glauber de Oliveira Costa
@ 2007-12-05 15:08           ` Glauber de Oliveira Costa
  2007-12-05 15:08             ` [PATCH 7/9] unify smp parts of system.h Glauber de Oliveira Costa
  0 siblings, 1 reply; 12+ messages in thread
From: Glauber de Oliveira Costa @ 2007-12-05 15:08 UTC (permalink / raw)
  To: linux-kernel
  Cc: akpm, glommer, tglx, mingo, ehabkost, jeremy, avi, anthony,
	virtualization, rusty, ak, chrisw, rostedt, hpa,
	Glauber de Oliveira Costa

Mr. Grep says warn_if_not_ulong() is not used anymore anywhere
in the code. So, we remove it.

Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>
---
 include/asm-x86/system_64.h |    2 --
 1 files changed, 0 insertions(+), 2 deletions(-)

diff --git a/include/asm-x86/system_64.h b/include/asm-x86/system_64.h
index 2757258..621e0b8 100644
--- a/include/asm-x86/system_64.h
+++ b/include/asm-x86/system_64.h
@@ -66,8 +66,6 @@
 #define read_barrier_depends()	do {} while(0)
 #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
 
-#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
-
 #include <linux/irqflags.h>
 
 #endif
-- 
1.4.4.2


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 7/9] unify smp parts of system.h
  2007-12-05 15:08           ` [PATCH 6/9] remove unused macro Glauber de Oliveira Costa
@ 2007-12-05 15:08             ` Glauber de Oliveira Costa
  2007-12-05 15:08               ` [PATCH 8/9] move switch_to macro to system.h Glauber de Oliveira Costa
  0 siblings, 1 reply; 12+ messages in thread
From: Glauber de Oliveira Costa @ 2007-12-05 15:08 UTC (permalink / raw)
  To: linux-kernel
  Cc: akpm, glommer, tglx, mingo, ehabkost, jeremy, avi, anthony,
	virtualization, rusty, ak, chrisw, rostedt, hpa,
	Glauber de Oliveira Costa

The memory barrier parts of system.h are not very different between
i386 and x86_64, the main difference being the availability of
instructions, which we handle with the use of ifdefs.

They are consolidated in system.h file, and then removed from
the arch-specific headers.

Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>
---
 include/asm-x86/system.h    |  105 +++++++++++++++++++++++++++++++++++++++++++
 include/asm-x86/system_32.h |   99 ----------------------------------------
 include/asm-x86/system_64.h |   25 ----------
 3 files changed, 105 insertions(+), 124 deletions(-)

Index: linux-2.6-x86/include/asm-x86/system.h
===================================================================
--- linux-2.6-x86.orig/include/asm-x86/system.h
+++ linux-2.6-x86/include/asm-x86/system.h
@@ -202,4 +202,109 @@ extern void free_init_pages(char *what, 
 
 void default_idle(void);
 
+/*
+ * Force strict CPU ordering.
+ * And yes, this is required on UP too when we're talking
+ * to devices.
+ */
+#ifdef CONFIG_X86_32
+/*
+ * For now, "wmb()" doesn't actually do anything, as all
+ * Intel CPU's follow what Intel calls a *Processor Order*,
+ * in which all writes are seen in the program order even
+ * outside the CPU.
+ *
+ * I expect future Intel CPU's to have a weaker ordering,
+ * but I'd also expect them to finally get their act together
+ * and add some real memory barriers if so.
+ *
+ * Some non intel clones support out of order store. wmb() ceases to be a
+ * nop for these.
+ */
+#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
+#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
+#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
+#else
+#define mb() 	asm volatile("mfence":::"memory")
+#define rmb()	asm volatile("lfence":::"memory")
+#define wmb()	asm volatile("sfence" ::: "memory")
+#endif
+
+/**
+ * read_barrier_depends - Flush all pending reads that subsequents reads
+ * depend on.
+ *
+ * No data-dependent reads from memory-like regions are ever reordered
+ * over this barrier.  All reads preceding this primitive are guaranteed
+ * to access memory (but not necessarily other CPUs' caches) before any
+ * reads following this primitive that depend on the data return by
+ * any of the preceding reads.  This primitive is much lighter weight than
+ * rmb() on most CPUs, and is never heavier weight than is
+ * rmb().
+ *
+ * These ordering constraints are respected by both the local CPU
+ * and the compiler.
+ *
+ * Ordering is not guaranteed by anything other than these primitives,
+ * not even by data dependencies.  See the documentation for
+ * memory_barrier() for examples and URLs to more information.
+ *
+ * For example, the following code would force ordering (the initial
+ * value of "a" is zero, "b" is one, and "p" is "&a"):
+ *
+ * <programlisting>
+ *	CPU 0				CPU 1
+ *
+ *	b = 2;
+ *	memory_barrier();
+ *	p = &b;				q = p;
+ *					read_barrier_depends();
+ *					d = *q;
+ * </programlisting>
+ *
+ * because the read of "*q" depends on the read of "p" and these
+ * two reads are separated by a read_barrier_depends().  However,
+ * the following code, with the same initial values for "a" and "b":
+ *
+ * <programlisting>
+ *	CPU 0				CPU 1
+ *
+ *	a = 2;
+ *	memory_barrier();
+ *	b = 3;				y = b;
+ *					read_barrier_depends();
+ *					x = a;
+ * </programlisting>
+ *
+ * does not enforce ordering, since there is no data dependency between
+ * the read of "a" and the read of "b".  Therefore, on some CPUs, such
+ * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
+ * in cases like this where there are no data dependencies.
+ **/
+
+#define read_barrier_depends()	do { } while (0)
+
+#ifdef CONFIG_SMP
+#define smp_mb()	mb()
+#ifdef CONFIG_X86_PPRO_FENCE
+# define smp_rmb()	rmb()
+#else
+# define smp_rmb()	barrier()
+#endif
+#ifdef CONFIG_X86_OOSTORE
+# define smp_wmb() 	wmb()
+#else
+# define smp_wmb()	barrier()
+#endif
+#define smp_read_barrier_depends()	read_barrier_depends()
+#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
+#else
+#define smp_mb()	barrier()
+#define smp_rmb()	barrier()
+#define smp_wmb()	barrier()
+#define smp_read_barrier_depends()	do { } while (0)
+#define set_mb(var, value) do { var = value; barrier(); } while (0)
+#endif
+
+
 #endif
Index: linux-2.6-x86/include/asm-x86/system_32.h
===================================================================
--- linux-2.6-x86.orig/include/asm-x86/system_32.h
+++ linux-2.6-x86/include/asm-x86/system_32.h
@@ -36,105 +36,6 @@ extern struct task_struct * FASTCALL(__s
 #endif	/* __KERNEL__ */
 
 
-/*
- * Force strict CPU ordering.
- * And yes, this is required on UP too when we're talking
- * to devices.
- *
- * For now, "wmb()" doesn't actually do anything, as all
- * Intel CPU's follow what Intel calls a *Processor Order*,
- * in which all writes are seen in the program order even
- * outside the CPU.
- *
- * I expect future Intel CPU's to have a weaker ordering,
- * but I'd also expect them to finally get their act together
- * and add some real memory barriers if so.
- *
- * Some non intel clones support out of order store. wmb() ceases to be a
- * nop for these.
- */
- 
-
-#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
-#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
-#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
-
-/**
- * read_barrier_depends - Flush all pending reads that subsequents reads
- * depend on.
- *
- * No data-dependent reads from memory-like regions are ever reordered
- * over this barrier.  All reads preceding this primitive are guaranteed
- * to access memory (but not necessarily other CPUs' caches) before any
- * reads following this primitive that depend on the data return by
- * any of the preceding reads.  This primitive is much lighter weight than
- * rmb() on most CPUs, and is never heavier weight than is
- * rmb().
- *
- * These ordering constraints are respected by both the local CPU
- * and the compiler.
- *
- * Ordering is not guaranteed by anything other than these primitives,
- * not even by data dependencies.  See the documentation for
- * memory_barrier() for examples and URLs to more information.
- *
- * For example, the following code would force ordering (the initial
- * value of "a" is zero, "b" is one, and "p" is "&a"):
- *
- * <programlisting>
- *	CPU 0				CPU 1
- *
- *	b = 2;
- *	memory_barrier();
- *	p = &b;				q = p;
- *					read_barrier_depends();
- *					d = *q;
- * </programlisting>
- *
- * because the read of "*q" depends on the read of "p" and these
- * two reads are separated by a read_barrier_depends().  However,
- * the following code, with the same initial values for "a" and "b":
- *
- * <programlisting>
- *	CPU 0				CPU 1
- *
- *	a = 2;
- *	memory_barrier();
- *	b = 3;				y = b;
- *					read_barrier_depends();
- *					x = a;
- * </programlisting>
- *
- * does not enforce ordering, since there is no data dependency between
- * the read of "a" and the read of "b".  Therefore, on some CPUs, such
- * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
- * in cases like this where there are no data dependencies.
- **/
-
-#define read_barrier_depends()	do { } while(0)
-
-#ifdef CONFIG_SMP
-#define smp_mb()	mb()
-#ifdef CONFIG_X86_PPRO_FENCE
-# define smp_rmb()	rmb()
-#else
-# define smp_rmb()	barrier()
-#endif
-#ifdef CONFIG_X86_OOSTORE
-# define smp_wmb() 	wmb()
-#else
-# define smp_wmb()	barrier()
-#endif
-#define smp_read_barrier_depends()	read_barrier_depends()
-#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
-#else
-#define smp_mb()	barrier()
-#define smp_rmb()	barrier()
-#define smp_wmb()	barrier()
-#define smp_read_barrier_depends()	do { } while(0)
-#define set_mb(var, value) do { var = value; barrier(); } while (0)
-#endif
-
 #include <linux/irqflags.h>
 
 /*
Index: linux-2.6-x86/include/asm-x86/system_64.h
===================================================================
--- linux-2.6-x86.orig/include/asm-x86/system_64.h
+++ linux-2.6-x86/include/asm-x86/system_64.h
@@ -41,31 +41,6 @@
     
 #endif	/* __KERNEL__ */
 
-#ifdef CONFIG_SMP
-#define smp_mb()	mb()
-#define smp_rmb()	barrier()
-#define smp_wmb()	barrier()
-#define smp_read_barrier_depends()	do {} while(0)
-#else
-#define smp_mb()	barrier()
-#define smp_rmb()	barrier()
-#define smp_wmb()	barrier()
-#define smp_read_barrier_depends()	do {} while(0)
-#endif
-
-    
-/*
- * Force strict CPU ordering.
- * And yes, this is required on UP too when we're talking
- * to devices.
- */
-#define mb() 	asm volatile("mfence":::"memory")
-#define rmb()	asm volatile("lfence":::"memory")
-#define wmb()	asm volatile("sfence" ::: "memory")
-
-#define read_barrier_depends()	do {} while(0)
-#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
-
 #include <linux/irqflags.h>
 
 #endif

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 8/9] move switch_to macro to system.h
  2007-12-05 15:08             ` [PATCH 7/9] unify smp parts of system.h Glauber de Oliveira Costa
@ 2007-12-05 15:08               ` Glauber de Oliveira Costa
  2007-12-05 15:08                 ` [PATCH 9/9] unify system.h Glauber de Oliveira Costa
  0 siblings, 1 reply; 12+ messages in thread
From: Glauber de Oliveira Costa @ 2007-12-05 15:08 UTC (permalink / raw)
  To: linux-kernel
  Cc: akpm, glommer, tglx, mingo, ehabkost, jeremy, avi, anthony,
	virtualization, rusty, ak, chrisw, rostedt, hpa,
	Glauber de Oliveira Costa

This patch moves the switch_to() macro to system.h

As those macros are fundamentally different between i386 and x86_64,
they are enclosed around an ifdef.

Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>
---
 include/asm-x86/system.h    |   61 +++++++++++++++++++++++++++++++++++++++++++
 include/asm-x86/system_32.h |   31 ----------------------
 include/asm-x86/system_64.h |   36 -------------------------
 3 files changed, 61 insertions(+), 67 deletions(-)

Index: linux-2.6-x86/include/asm-x86/system.h
===================================================================
--- linux-2.6-x86.orig/include/asm-x86/system.h
+++ linux-2.6-x86/include/asm-x86/system.h
@@ -6,8 +6,69 @@
 #include <linux/kernel.h>
 
 #ifdef CONFIG_X86_32
+#define AT_VECTOR_SIZE_ARCH 2 /* entries in ARCH_DLINFO */
+
+struct task_struct; /* one of the stranger aspects of C forward declarations */
+extern struct task_struct *FASTCALL(__switch_to(struct task_struct *prev,
+						struct task_struct *next));
+
+/*
+ * Saving eflags is important. It switches not only IOPL between tasks,
+ * it also protects other tasks from NT leaking through sysenter etc.
+ */
+#define switch_to(prev, next, last) do {				\
+	unsigned long esi, edi;						\
+	asm volatile("pushfl\n\t"		/* Save flags */	\
+		     "pushl %%ebp\n\t"					\
+		     "movl %%esp,%0\n\t"	/* save ESP */		\
+		     "movl %5,%%esp\n\t"	/* restore ESP */	\
+		     "movl $1f,%1\n\t"		/* save EIP */		\
+		     "pushl %6\n\t"		/* restore EIP */	\
+		     "jmp __switch_to\n"				\
+		     "1:\t"						\
+		     "popl %%ebp\n\t"					\
+		     "popfl"						\
+		     :"=m" (prev->thread.sp), "=m" (prev->thread.ip),	\
+		      "=a" (last), "=S" (esi), "=D" (edi)		\
+		     :"m" (next->thread.sp), "m" (next->thread.ip),	\
+		      "2" (prev), "d" (next));				\
+} while (0)
+
 # include "system_32.h"
 #else
+#define __SAVE(reg, offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
+#define __RESTORE(reg, offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
+
+/* frame pointer must be last for get_wchan */
+#define SAVE_CONTEXT    "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
+#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
+
+#define __EXTRA_CLOBBER  \
+	, "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
+	  "r12", "r13", "r14", "r15"
+
+/* Save restore flags to clear handle leaking NT */
+#define switch_to(prev, next, last) \
+	asm volatile(SAVE_CONTEXT					  \
+	     "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */	  \
+	     "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */	  \
+	     "call __switch_to\n\t"					  \
+	     ".globl thread_return\n"					  \
+	     "thread_return:\n\t"					  \
+	     "movq %%gs:%P[pda_pcurrent],%%rsi\n\t"			  \
+	     "movq %P[thread_info](%%rsi),%%r8\n\t"			  \
+	     LOCK_PREFIX "btr  %[tif_fork],%P[ti_flags](%%r8)\n\t"	  \
+	     "movq %%rax,%%rdi\n\t" 					  \
+	     "jc   ret_from_fork\n\t"					  \
+	     RESTORE_CONTEXT						  \
+	     : "=a" (last)					  	  \
+	     : [next] "S" (next), [prev] "D" (prev),			  \
+	       [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
+	       [ti_flags] "i" (offsetof(struct thread_info, flags)),	  \
+	       [tif_fork] "i" (TIF_FORK),			  	  \
+	       [thread_info] "i" (offsetof(struct task_struct, stack)),   \
+	       [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent))  \
+	     : "memory", "cc" __EXTRA_CLOBBER)
 # include "system_64.h"
 #endif
 
Index: linux-2.6-x86/include/asm-x86/system_32.h
===================================================================
--- linux-2.6-x86.orig/include/asm-x86/system_32.h
+++ linux-2.6-x86/include/asm-x86/system_32.h
@@ -5,37 +5,6 @@
 #include <asm/cpufeature.h>
 #include <asm/cmpxchg.h>
 
-#ifdef __KERNEL__
-#define AT_VECTOR_SIZE_ARCH 2 /* entries in ARCH_DLINFO */
-
-struct task_struct;	/* one of the stranger aspects of C forward declarations.. */
-extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
-
-/*
- * Saving eflags is important. It switches not only IOPL between tasks,
- * it also protects other tasks from NT leaking through sysenter etc.
- */
-#define switch_to(prev,next,last) do {					\
-	unsigned long esi,edi;						\
-	asm volatile("pushfl\n\t"		/* Save flags */	\
-		     "pushl %%ebp\n\t"					\
-		     "movl %%esp,%0\n\t"	/* save ESP */		\
-		     "movl %5,%%esp\n\t"	/* restore ESP */	\
-		     "movl $1f,%1\n\t"		/* save EIP */		\
-		     "pushl %6\n\t"		/* restore EIP */	\
-		     "jmp __switch_to\n"				\
-		     "1:\t"						\
-		     "popl %%ebp\n\t"					\
-		     "popfl"						\
-		     :"=m" (prev->thread.sp),"=m" (prev->thread.ip),	\
-		      "=a" (last),"=S" (esi),"=D" (edi)			\
-		     :"m" (next->thread.sp),"m" (next->thread.ip),	\
-		      "2" (prev), "d" (next));				\
-} while (0)
-
-#endif	/* __KERNEL__ */
-
-
 #include <linux/irqflags.h>
 
 /*
Index: linux-2.6-x86/include/asm-x86/system_64.h
===================================================================
--- linux-2.6-x86.orig/include/asm-x86/system_64.h
+++ linux-2.6-x86/include/asm-x86/system_64.h
@@ -4,42 +4,6 @@
 #include <asm/segment.h>
 #include <asm/cmpxchg.h>
 
-#ifdef __KERNEL__
-
-#define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
-#define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
-
-/* frame pointer must be last for get_wchan */
-#define SAVE_CONTEXT    "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
-#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
-
-#define __EXTRA_CLOBBER  \
-	,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
-
-/* Save restore flags to clear handle leaking NT */
-#define switch_to(prev,next,last) \
-	asm volatile(SAVE_CONTEXT						    \
-		     "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */	  \
-		     "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */	  \
-		     "call __switch_to\n\t"					  \
-		     ".globl thread_return\n"					\
-		     "thread_return:\n\t"					    \
-		     "movq %%gs:%P[pda_pcurrent],%%rsi\n\t"			  \
-		     "movq %P[thread_info](%%rsi),%%r8\n\t"			  \
-		     LOCK_PREFIX "btr  %[tif_fork],%P[ti_flags](%%r8)\n\t"	  \
-		     "movq %%rax,%%rdi\n\t" 					  \
-		     "jc   ret_from_fork\n\t"					  \
-		     RESTORE_CONTEXT						    \
-		     : "=a" (last)					  	  \
-		     : [next] "S" (next), [prev] "D" (prev),			  \
-		       [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
-		       [ti_flags] "i" (offsetof(struct thread_info, flags)),\
-		       [tif_fork] "i" (TIF_FORK),			  \
-		       [thread_info] "i" (offsetof(struct task_struct, stack)), \
-		       [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent))   \
-		     : "memory", "cc" __EXTRA_CLOBBER)
-    
-#endif	/* __KERNEL__ */
 
 #include <linux/irqflags.h>
 

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 9/9] unify system.h
  2007-12-05 15:08               ` [PATCH 8/9] move switch_to macro to system.h Glauber de Oliveira Costa
@ 2007-12-05 15:08                 ` Glauber de Oliveira Costa
  0 siblings, 0 replies; 12+ messages in thread
From: Glauber de Oliveira Costa @ 2007-12-05 15:08 UTC (permalink / raw)
  To: linux-kernel
  Cc: akpm, glommer, tglx, mingo, ehabkost, jeremy, avi, anthony,
	virtualization, rusty, ak, chrisw, rostedt, hpa,
	Glauber de Oliveira Costa

This patch finishes the unification of system.h file.
i386 needs a constant to be defined, and it is defined inside an ifdef

Other than that, pretty much nothing but includes are left in the arch
specific headers, and they are deleted.

Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>
---
 include/asm-x86/system.h    |   10 ++++++++--
 include/asm-x86/system_32.h |   15 ---------------
 include/asm-x86/system_64.h |   10 ----------
 3 files changed, 8 insertions(+), 27 deletions(-)

diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h
index fd2abfd..9460db4 100644
--- a/include/asm-x86/system.h
+++ b/include/asm-x86/system.h
@@ -2,8 +2,12 @@
 #define _ASM_X86_SYSTEM_H_
 
 #include <asm/asm.h>
+#include <asm/segment.h>
+#include <asm/cpufeature.h>
+#include <asm/cmpxchg.h>
 
 #include <linux/kernel.h>
+#include <linux/irqflags.h>
 
 #ifdef CONFIG_X86_32
 #define AT_VECTOR_SIZE_ARCH 2 /* entries in ARCH_DLINFO */
@@ -34,7 +38,10 @@ extern struct task_struct *FASTCALL(__switch_to(struct task_struct *prev,
 		      "2" (prev), "d" (next));				\
 } while (0)
 
-# include "system_32.h"
+/*
+ * disable hlt during certain critical i/o operations
+ */
+#define HAVE_DISABLE_HLT
 #else
 #define __SAVE(reg, offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
 #define __RESTORE(reg, offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
@@ -69,7 +76,6 @@ extern struct task_struct *FASTCALL(__switch_to(struct task_struct *prev,
 	       [thread_info] "i" (offsetof(struct task_struct, stack)),   \
 	       [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent))  \
 	     : "memory", "cc" __EXTRA_CLOBBER)
-# include "system_64.h"
 #endif
 
 #ifdef __KERNEL__
diff --git a/include/asm-x86/system_32.h b/include/asm-x86/system_32.h
deleted file mode 100644
index 83af464..0000000
--- a/include/asm-x86/system_32.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef __ASM_SYSTEM_H
-#define __ASM_SYSTEM_H
-
-#include <asm/segment.h>
-#include <asm/cpufeature.h>
-#include <asm/cmpxchg.h>
-
-#include <linux/irqflags.h>
-
-/*
- * disable hlt during certain critical i/o operations
- */
-#define HAVE_DISABLE_HLT
-
-#endif
diff --git a/include/asm-x86/system_64.h b/include/asm-x86/system_64.h
deleted file mode 100644
index 8d2059f..0000000
--- a/include/asm-x86/system_64.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef __ASM_SYSTEM_H
-#define __ASM_SYSTEM_H
-
-#include <asm/segment.h>
-#include <asm/cmpxchg.h>
-
-
-#include <linux/irqflags.h>
-
-#endif
-- 
1.4.4.2


^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH 0/9 - v2] Integrate system.h
  2007-12-05 15:08 [PATCH 0/9 - v2] Integrate system.h Glauber de Oliveira Costa
  2007-12-05 15:08 ` [PATCH 1/9] remove volatile keyword from clflush Glauber de Oliveira Costa
@ 2007-12-05 20:22 ` Ingo Molnar
  1 sibling, 0 replies; 12+ messages in thread
From: Ingo Molnar @ 2007-12-05 20:22 UTC (permalink / raw)
  To: Glauber de Oliveira Costa
  Cc: linux-kernel, akpm, glommer, tglx, ehabkost, jeremy, avi,
	anthony, virtualization, rusty, ak, chrisw, rostedt, hpa


* Glauber de Oliveira Costa <gcosta@redhat.com> wrote:

> At Ingo's request, here it goes a new patchset, that actually applies 
> ontop of the x86 tree (mm branch). Besides this issue, I've also 
> included a patch that remove the cr8 references, as Andi suggested.

thanks - i've picked them up.

	Ingo

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/9] remove volatile keyword from clflush.
  2007-12-05 15:08 ` [PATCH 1/9] remove volatile keyword from clflush Glauber de Oliveira Costa
  2007-12-05 15:08   ` [PATCH 2/9] put together equal pieces of system.h Glauber de Oliveira Costa
@ 2007-12-06 21:11   ` Jeremy Fitzhardinge
  1 sibling, 0 replies; 12+ messages in thread
From: Jeremy Fitzhardinge @ 2007-12-06 21:11 UTC (permalink / raw)
  To: Glauber de Oliveira Costa
  Cc: linux-kernel, akpm, glommer, tglx, mingo, ehabkost, avi, anthony,
	virtualization, rusty, ak, chrisw, rostedt, hpa

Glauber de Oliveira Costa wrote:
> the p parameter is an explicit memory reference, and is
> enough to prevent gcc to being nasty here. The volatile
> seems completely not needed.
>   

The usual reason for these types of "volatiles" is to make type checking
happier, since "volatile void *" is compatible with any argument you
might pass.  IOW, if you pass a plain "char *" then the compiler will
promote it to "volatile char *" and not complain, and passing an already
volatile pointer will be OK too.

The volatile isn't there to modify the generated code in any way.

    J

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2007-12-06 21:11 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2007-12-05 15:08 [PATCH 0/9 - v2] Integrate system.h Glauber de Oliveira Costa
2007-12-05 15:08 ` [PATCH 1/9] remove volatile keyword from clflush Glauber de Oliveira Costa
2007-12-05 15:08   ` [PATCH 2/9] put together equal pieces of system.h Glauber de Oliveira Costa
2007-12-05 15:08     ` [PATCH 3/9] unify load_segment macro Glauber de Oliveira Costa
2007-12-05 15:08       ` [PATCH 4/9] remove references to cr8 register Glauber de Oliveira Costa
2007-12-05 15:08         ` [PATCH 5/9] unify paravirt parts of system.h Glauber de Oliveira Costa
2007-12-05 15:08           ` [PATCH 6/9] remove unused macro Glauber de Oliveira Costa
2007-12-05 15:08             ` [PATCH 7/9] unify smp parts of system.h Glauber de Oliveira Costa
2007-12-05 15:08               ` [PATCH 8/9] move switch_to macro to system.h Glauber de Oliveira Costa
2007-12-05 15:08                 ` [PATCH 9/9] unify system.h Glauber de Oliveira Costa
2007-12-06 21:11   ` [PATCH 1/9] remove volatile keyword from clflush Jeremy Fitzhardinge
2007-12-05 20:22 ` [PATCH 0/9 - v2] Integrate system.h Ingo Molnar

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).