linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/8] Integrate system.h
@ 2007-12-04 16:06 Glauber de Oliveira Costa
  2007-12-04 16:06 ` [PATCH 1/8] remove volatile keyword from clflush Glauber de Oliveira Costa
  0 siblings, 1 reply; 21+ messages in thread
From: Glauber de Oliveira Costa @ 2007-12-04 16:06 UTC (permalink / raw)
  To: linux-kernel
  Cc: akpm, glommer, tglx, mingo, ehabkost, jeremy, avi, anthony,
	virtualization, rusty, ak, chrisw, rostedt, hpa

Hi,

In the same lines of the msr.h integration, here it goes a series for
system.h. Again, after the headers are turned into one, the paravirt
pieces related to system.h comes for free.




^ permalink raw reply	[flat|nested] 21+ messages in thread

* [PATCH 1/8] remove volatile keyword from clflush.
  2007-12-04 16:06 [PATCH 0/8] Integrate system.h Glauber de Oliveira Costa
@ 2007-12-04 16:06 ` Glauber de Oliveira Costa
  2007-12-04 16:06   ` [PATCH 2/8] put together equal pieces of system.h Glauber de Oliveira Costa
  0 siblings, 1 reply; 21+ messages in thread
From: Glauber de Oliveira Costa @ 2007-12-04 16:06 UTC (permalink / raw)
  To: linux-kernel
  Cc: akpm, glommer, tglx, mingo, ehabkost, jeremy, avi, anthony,
	virtualization, rusty, ak, chrisw, rostedt, hpa,
	Glauber de Oliveira Costa

the p parameter is an explicit memory reference, and is
enough to prevent gcc to being nasty here. The volatile
seems completely not needed.

Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>
---
 include/asm-x86/system_32.h |    2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/include/asm-x86/system_32.h b/include/asm-x86/system_32.h
index ef84688..27e106d 100644
--- a/include/asm-x86/system_32.h
+++ b/include/asm-x86/system_32.h
@@ -161,7 +161,7 @@ static inline void native_wbinvd(void)
 	asm volatile("wbinvd": : :"memory");
 }
 
-static inline void clflush(volatile void *__p)
+static inline void clflush(void *__p)
 {
 	asm volatile("clflush %0" : "+m" (*(char __force *)__p));
 }
-- 
1.4.4.2


^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 2/8] put together equal pieces of system.h
  2007-12-04 16:06 ` [PATCH 1/8] remove volatile keyword from clflush Glauber de Oliveira Costa
@ 2007-12-04 16:06   ` Glauber de Oliveira Costa
  2007-12-04 16:06     ` [PATCH 3/8] unify load_segment macro Glauber de Oliveira Costa
  0 siblings, 1 reply; 21+ messages in thread
From: Glauber de Oliveira Costa @ 2007-12-04 16:06 UTC (permalink / raw)
  To: linux-kernel
  Cc: akpm, glommer, tglx, mingo, ehabkost, jeremy, avi, anthony,
	virtualization, rusty, ak, chrisw, rostedt, hpa,
	Glauber de Oliveira Costa

This patch puts together pieces of system_{32,64}.h that
looks like the same. It's the first step towards integration
of this file.

Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>
---
 arch/x86/kernel/process_64.c |    2 +-
 include/asm-x86/system.h     |   70 ++++++++++++++++++++++++++++++++++++++++++
 include/asm-x86/system_32.h  |   59 -----------------------------------
 include/asm-x86/system_64.h  |   12 -------
 4 files changed, 71 insertions(+), 72 deletions(-)

diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 6309b27..8924790 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -106,7 +106,7 @@ void exit_idle(void)
  * We use this if we don't have any better
  * idle routine..
  */
-static void default_idle(void)
+void default_idle(void)
 {
 	current_thread_info()->status &= ~TS_POLLING;
 	/*
diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h
index 692562b..6e9491d 100644
--- a/include/asm-x86/system.h
+++ b/include/asm-x86/system.h
@@ -1,5 +1,75 @@
+#ifndef _ASM_X86_SYSTEM_H_
+#define _ASM_X86_SYSTEM_H_
+
+#include <asm/asm.h>
+
 #ifdef CONFIG_X86_32
 # include "system_32.h"
 #else
 # include "system_64.h"
 #endif
+
+#ifdef __KERNEL__
+#define _set_base(addr, base) do { unsigned long __pr; \
+__asm__ __volatile__ ("movw %%dx,%1\n\t" \
+	"rorl $16,%%edx\n\t" \
+	"movb %%dl,%2\n\t" \
+	"movb %%dh,%3" \
+	:"=&d" (__pr) \
+	:"m" (*((addr)+2)), \
+	 "m" (*((addr)+4)), \
+	 "m" (*((addr)+7)), \
+	 "0" (base) \
+	); } while (0)
+
+#define _set_limit(addr, limit) do { unsigned long __lr; \
+__asm__ __volatile__ ("movw %%dx,%1\n\t" \
+	"rorl $16,%%edx\n\t" \
+	"movb %2,%%dh\n\t" \
+	"andb $0xf0,%%dh\n\t" \
+	"orb %%dh,%%dl\n\t" \
+	"movb %%dl,%2" \
+	:"=&d" (__lr) \
+	:"m" (*(addr)), \
+	 "m" (*((addr)+6)), \
+	 "0" (limit) \
+	); } while (0)
+
+#define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base))
+#define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1))
+
+/*
+ * Save a segment register away
+ */
+#define savesegment(seg, value) \
+	asm volatile("mov %%" #seg ",%0":"=rm" (value))
+
+static inline unsigned long get_limit(unsigned long segment)
+{
+	unsigned long __limit;
+	__asm__("lsll %1,%0"
+		:"=r" (__limit):"r" (segment));
+	return __limit+1;
+}
+#endif /* __KERNEL__ */
+
+static inline void clflush(void *__p)
+{
+	asm volatile("clflush %0" : "+m" (*(char __force *)__p));
+}
+
+#define nop() __asm__ __volatile__ ("nop")
+
+void disable_hlt(void);
+void enable_hlt(void);
+
+extern int es7000_plat;
+void cpu_idle_wait(void);
+
+extern unsigned long arch_align_stack(unsigned long sp);
+extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
+
+void default_idle(void);
+void __show_registers(struct pt_regs *, int all);
+
+#endif
diff --git a/include/asm-x86/system_32.h b/include/asm-x86/system_32.h
index 27e106d..717aeb9 100644
--- a/include/asm-x86/system_32.h
+++ b/include/asm-x86/system_32.h
@@ -34,34 +34,6 @@ extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struc
 		      "2" (prev), "d" (next));				\
 } while (0)
 
-#define _set_base(addr,base) do { unsigned long __pr; \
-__asm__ __volatile__ ("movw %%dx,%1\n\t" \
-	"rorl $16,%%edx\n\t" \
-	"movb %%dl,%2\n\t" \
-	"movb %%dh,%3" \
-	:"=&d" (__pr) \
-	:"m" (*((addr)+2)), \
-	 "m" (*((addr)+4)), \
-	 "m" (*((addr)+7)), \
-         "0" (base) \
-        ); } while(0)
-
-#define _set_limit(addr,limit) do { unsigned long __lr; \
-__asm__ __volatile__ ("movw %%dx,%1\n\t" \
-	"rorl $16,%%edx\n\t" \
-	"movb %2,%%dh\n\t" \
-	"andb $0xf0,%%dh\n\t" \
-	"orb %%dh,%%dl\n\t" \
-	"movb %%dl,%2" \
-	:"=&d" (__lr) \
-	:"m" (*(addr)), \
-	 "m" (*((addr)+6)), \
-	 "0" (limit) \
-        ); } while(0)
-
-#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
-#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) )
-
 /*
  * Load a segment. Fall back on loading the zero
  * segment if something goes wrong..
@@ -83,12 +55,6 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
 		".previous"			\
 		: :"rm" (value))
 
-/*
- * Save a segment register away
- */
-#define savesegment(seg, value) \
-	asm volatile("mov %%" #seg ",%0":"=rm" (value))
-
 
 static inline void native_clts(void)
 {
@@ -161,11 +127,6 @@ static inline void native_wbinvd(void)
 	asm volatile("wbinvd": : :"memory");
 }
 
-static inline void clflush(void *__p)
-{
-	asm volatile("clflush %0" : "+m" (*(char __force *)__p));
-}
-
 #ifdef CONFIG_PARAVIRT
 #include <asm/paravirt.h>
 #else
@@ -190,15 +151,6 @@ static inline void clflush(void *__p)
 
 #endif	/* __KERNEL__ */
 
-static inline unsigned long get_limit(unsigned long segment)
-{
-	unsigned long __limit;
-	__asm__("lsll %1,%0"
-		:"=r" (__limit):"r" (segment));
-	return __limit+1;
-}
-
-#define nop() __asm__ __volatile__ ("nop")
 
 /*
  * Force strict CPU ordering.
@@ -305,16 +257,5 @@ static inline unsigned long get_limit(unsigned long segment)
  * disable hlt during certain critical i/o operations
  */
 #define HAVE_DISABLE_HLT
-void disable_hlt(void);
-void enable_hlt(void);
-
-extern int es7000_plat;
-void cpu_idle_wait(void);
-
-extern unsigned long arch_align_stack(unsigned long sp);
-extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
-
-void default_idle(void);
-void __show_registers(struct pt_regs *, int all);
 
 #endif
diff --git a/include/asm-x86/system_64.h b/include/asm-x86/system_64.h
index 4cb2384..f340060 100644
--- a/include/asm-x86/system_64.h
+++ b/include/asm-x86/system_64.h
@@ -134,13 +134,6 @@ static inline void write_cr8(unsigned long val)
 
 #endif	/* __KERNEL__ */
 
-static inline void clflush(volatile void *__p)
-{
-	asm volatile("clflush %0" : "+m" (*(char __force *)__p));
-}
-
-#define nop() __asm__ __volatile__ ("nop")
-
 #ifdef CONFIG_SMP
 #define smp_mb()	mb()
 #define smp_rmb()	barrier()
@@ -170,9 +163,4 @@ static inline void clflush(volatile void *__p)
 
 #include <linux/irqflags.h>
 
-void cpu_idle_wait(void);
-
-extern unsigned long arch_align_stack(unsigned long sp);
-extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
-
 #endif
-- 
1.4.4.2


^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 3/8] unify load_segment macro
  2007-12-04 16:06   ` [PATCH 2/8] put together equal pieces of system.h Glauber de Oliveira Costa
@ 2007-12-04 16:06     ` Glauber de Oliveira Costa
  2007-12-04 16:06       ` [PATCH 4/8] unify paravirt parts of system.h Glauber de Oliveira Costa
  0 siblings, 1 reply; 21+ messages in thread
From: Glauber de Oliveira Costa @ 2007-12-04 16:06 UTC (permalink / raw)
  To: linux-kernel
  Cc: akpm, glommer, tglx, mingo, ehabkost, jeremy, avi, anthony,
	virtualization, rusty, ak, chrisw, rostedt, hpa,
	Glauber de Oliveira Costa

This patch unifies the load_segment() macro, making them equal in both
x86_64 and i386 architectures. The common version goes to system.h,
and the old are deleted.

Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>
---
 include/asm-x86/system.h    |   21 +++++++++++++++++++++
 include/asm-x86/system_32.h |   22 ----------------------
 include/asm-x86/system_64.h |   20 --------------------
 3 files changed, 21 insertions(+), 42 deletions(-)

diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h
index 6e9491d..1ac6088 100644
--- a/include/asm-x86/system.h
+++ b/include/asm-x86/system.h
@@ -39,6 +39,27 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
 #define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1))
 
 /*
+ * Load a segment. Fall back on loading the zero
+ * segment if something goes wrong..
+ */
+#define loadsegment(seg, value)			\
+	asm volatile("\n"			\
+		"1:\t"				\
+		"movl %k0,%%" #seg "\n"		\
+		"2:\n"				\
+		".section .fixup,\"ax\"\n"	\
+		"3:\t"				\
+		"movl %k1, %%" #seg "\n\t"	\
+		"jmp 2b\n"			\
+		".previous\n"			\
+		".section __ex_table,\"a\"\n\t"	\
+		_ASM_ALIGN "\n\t"		\
+		_ASM_PTR " 1b,3b\n"		\
+		".previous"			\
+		: :"r" (value), "r" (0))
+
+
+/*
  * Save a segment register away
  */
 #define savesegment(seg, value) \
diff --git a/include/asm-x86/system_32.h b/include/asm-x86/system_32.h
index 717aeb9..a0641a3 100644
--- a/include/asm-x86/system_32.h
+++ b/include/asm-x86/system_32.h
@@ -34,28 +34,6 @@ extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struc
 		      "2" (prev), "d" (next));				\
 } while (0)
 
-/*
- * Load a segment. Fall back on loading the zero
- * segment if something goes wrong..
- */
-#define loadsegment(seg,value)			\
-	asm volatile("\n"			\
-		"1:\t"				\
-		"mov %0,%%" #seg "\n"		\
-		"2:\n"				\
-		".section .fixup,\"ax\"\n"	\
-		"3:\t"				\
-		"pushl $0\n\t"			\
-		"popl %%" #seg "\n\t"		\
-		"jmp 2b\n"			\
-		".previous\n"			\
-		".section __ex_table,\"a\"\n\t"	\
-		".align 4\n\t"			\
-		".long 1b,3b\n"			\
-		".previous"			\
-		: :"rm" (value))
-
-
 static inline void native_clts(void)
 {
 	asm volatile ("clts");
diff --git a/include/asm-x86/system_64.h b/include/asm-x86/system_64.h
index f340060..da46059 100644
--- a/include/asm-x86/system_64.h
+++ b/include/asm-x86/system_64.h
@@ -43,26 +43,6 @@
 extern void load_gs_index(unsigned); 
 
 /*
- * Load a segment. Fall back on loading the zero
- * segment if something goes wrong..
- */
-#define loadsegment(seg,value)	\
-	asm volatile("\n"			\
-		"1:\t"				\
-		"movl %k0,%%" #seg "\n"		\
-		"2:\n"				\
-		".section .fixup,\"ax\"\n"	\
-		"3:\t"				\
-		"movl %1,%%" #seg "\n\t" 	\
-		"jmp 2b\n"			\
-		".previous\n"			\
-		".section __ex_table,\"a\"\n\t"	\
-		".align 8\n\t"			\
-		".quad 1b,3b\n"			\
-		".previous"			\
-		: :"r" (value), "r" (0))
-
-/*
  * Clear and set 'TS' bit respectively
  */
 #define clts() __asm__ __volatile__ ("clts")
-- 
1.4.4.2


^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 4/8] unify paravirt parts of system.h
  2007-12-04 16:06     ` [PATCH 3/8] unify load_segment macro Glauber de Oliveira Costa
@ 2007-12-04 16:06       ` Glauber de Oliveira Costa
  2007-12-04 16:06         ` [PATCH 5/8] remove unused macro Glauber de Oliveira Costa
  2007-12-04 19:18         ` [PATCH 4/8] unify paravirt parts of system.h Avi Kivity
  0 siblings, 2 replies; 21+ messages in thread
From: Glauber de Oliveira Costa @ 2007-12-04 16:06 UTC (permalink / raw)
  To: linux-kernel
  Cc: akpm, glommer, tglx, mingo, ehabkost, jeremy, avi, anthony,
	virtualization, rusty, ak, chrisw, rostedt, hpa,
	Glauber de Oliveira Costa

This patch moves the i386 control registers manipulation functions,
wbinvd, and clts functions to system.h. They are essentially the same
as in x86_64, except for the cr8 register, which we add.

With this, system.h paravirt comes for free in x86_64.

Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>
---
 include/asm-x86/system.h    |  124 +++++++++++++++++++++++++++++++++++++++++++
 include/asm-x86/system_32.h |   94 --------------------------------
 include/asm-x86/system_64.h |   73 -------------------------
 3 files changed, 124 insertions(+), 167 deletions(-)

diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h
index 1ac6088..f1fdc55 100644
--- a/include/asm-x86/system.h
+++ b/include/asm-x86/system.h
@@ -3,6 +3,8 @@
 
 #include <asm/asm.h>
 
+#include <linux/kernel.h>
+
 #ifdef CONFIG_X86_32
 # include "system_32.h"
 #else
@@ -38,6 +40,8 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
 #define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base))
 #define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1))
 
+extern void load_gs_index(unsigned);
+
 /*
  * Load a segment. Fall back on loading the zero
  * segment if something goes wrong..
@@ -72,6 +76,126 @@ static inline unsigned long get_limit(unsigned long segment)
 		:"=r" (__limit):"r" (segment));
 	return __limit+1;
 }
+
+static inline void native_clts(void)
+{
+	asm volatile ("clts");
+}
+
+/*
+ * Volatile isn't enough to prevent the compiler from reordering the
+ * read/write functions for the control registers and messing everything up.
+ * A memory clobber would solve the problem, but would prevent reordering of
+ * all loads stores around it, which can hurt performance. Solution is to
+ * use a variable and mimic reads and writes to it to enforce serialization
+ */
+static unsigned long __force_order;
+
+static inline unsigned long native_read_cr0(void)
+{
+	unsigned long val;
+	asm volatile("mov %%cr0,%0\n\t" :"=r" (val), "=m" (__force_order));
+	return val;
+}
+
+static inline void native_write_cr0(unsigned long val)
+{
+	asm volatile("mov %0,%%cr0": :"r" (val), "m" (__force_order));
+}
+
+static inline unsigned long native_read_cr2(void)
+{
+	unsigned long val;
+	asm volatile("mov %%cr2,%0\n\t" :"=r" (val), "=m" (__force_order));
+	return val;
+}
+
+static inline void native_write_cr2(unsigned long val)
+{
+	asm volatile("mov %0,%%cr2": :"r" (val), "m" (__force_order));
+}
+
+static inline unsigned long native_read_cr3(void)
+{
+	unsigned long val;
+	asm volatile("mov %%cr3,%0\n\t" :"=r" (val), "=m" (__force_order));
+	return val;
+}
+
+static inline void native_write_cr3(unsigned long val)
+{
+	asm volatile("mov %0,%%cr3": :"r" (val), "m" (__force_order));
+}
+
+static inline unsigned long native_read_cr4(void)
+{
+	unsigned long val;
+	asm volatile("mov %%cr4,%0\n\t" :"=r" (val), "=m" (__force_order));
+	return val;
+}
+
+static inline unsigned long native_read_cr4_safe(void)
+{
+	unsigned long val;
+	/* This could fault if %cr4 does not exist. In x86_64, a cr4 always
+	 * exists, so it will never fail. */
+#ifdef CONFIG_X86_32
+	asm volatile("1: mov %%cr4, %0		\n"
+		"2:				\n"
+		".section __ex_table,\"a\"	\n"
+		".long 1b,2b			\n"
+		".previous			\n"
+		: "=r" (val), "=m" (__force_order) : "0" (0));
+#else
+	val = native_read_cr4();
+#endif
+	return val;
+}
+
+static inline void native_write_cr4(unsigned long val)
+{
+	asm volatile("mov %0,%%cr4": :"r" (val), "m" (__force_order));
+}
+
+static inline unsigned long native_read_cr8(void)
+{
+	unsigned long cr8;
+	asm volatile("mov %%cr8,%0" : "=r" (cr8), "=m" (__force_order));
+	return cr8;
+}
+
+static inline void native_write_cr8(unsigned long val)
+{
+	asm volatile("mov %0,%%cr8" : : "r" (val));
+}
+
+static inline void native_wbinvd(void)
+{
+	asm volatile("wbinvd": : :"memory");
+}
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+#define read_cr0()	(native_read_cr0())
+#define write_cr0(x)	(native_write_cr0(x))
+#define read_cr2()	(native_read_cr2())
+#define write_cr2(x)	(native_write_cr2(x))
+#define read_cr3()	(native_read_cr3())
+#define write_cr3(x)	(native_write_cr3(x))
+#define read_cr4()	(native_read_cr4())
+#define read_cr4_safe()	(native_read_cr4_safe())
+#define write_cr4(x)	(native_write_cr4(x))
+#define read_cr8()	(native_read_cr8())
+#define write_cr8(x)	(native_write_cr8(x))
+#define wbinvd()	(native_wbinvd())
+
+/* Clear the 'TS' bit */
+#define clts()		(native_clts())
+
+#endif/* CONFIG_PARAVIRT */
+
+#define stts() write_cr0(8 | read_cr0())
+
 #endif /* __KERNEL__ */
 
 static inline void clflush(void *__p)
diff --git a/include/asm-x86/system_32.h b/include/asm-x86/system_32.h
index a0641a3..6c69567 100644
--- a/include/asm-x86/system_32.h
+++ b/include/asm-x86/system_32.h
@@ -1,7 +1,6 @@
 #ifndef __ASM_SYSTEM_H
 #define __ASM_SYSTEM_H
 
-#include <linux/kernel.h>
 #include <asm/segment.h>
 #include <asm/cpufeature.h>
 #include <asm/cmpxchg.h>
@@ -34,99 +33,6 @@ extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struc
 		      "2" (prev), "d" (next));				\
 } while (0)
 
-static inline void native_clts(void)
-{
-	asm volatile ("clts");
-}
-
-static inline unsigned long native_read_cr0(void)
-{
-	unsigned long val;
-	asm volatile("movl %%cr0,%0\n\t" :"=r" (val));
-	return val;
-}
-
-static inline void native_write_cr0(unsigned long val)
-{
-	asm volatile("movl %0,%%cr0": :"r" (val));
-}
-
-static inline unsigned long native_read_cr2(void)
-{
-	unsigned long val;
-	asm volatile("movl %%cr2,%0\n\t" :"=r" (val));
-	return val;
-}
-
-static inline void native_write_cr2(unsigned long val)
-{
-	asm volatile("movl %0,%%cr2": :"r" (val));
-}
-
-static inline unsigned long native_read_cr3(void)
-{
-	unsigned long val;
-	asm volatile("movl %%cr3,%0\n\t" :"=r" (val));
-	return val;
-}
-
-static inline void native_write_cr3(unsigned long val)
-{
-	asm volatile("movl %0,%%cr3": :"r" (val));
-}
-
-static inline unsigned long native_read_cr4(void)
-{
-	unsigned long val;
-	asm volatile("movl %%cr4,%0\n\t" :"=r" (val));
-	return val;
-}
-
-static inline unsigned long native_read_cr4_safe(void)
-{
-	unsigned long val;
-	/* This could fault if %cr4 does not exist */
-	asm volatile("1: movl %%cr4, %0		\n"
-		"2:				\n"
-		".section __ex_table,\"a\"	\n"
-		".long 1b,2b			\n"
-		".previous			\n"
-		: "=r" (val): "0" (0));
-	return val;
-}
-
-static inline void native_write_cr4(unsigned long val)
-{
-	asm volatile("movl %0,%%cr4": :"r" (val));
-}
-
-static inline void native_wbinvd(void)
-{
-	asm volatile("wbinvd": : :"memory");
-}
-
-#ifdef CONFIG_PARAVIRT
-#include <asm/paravirt.h>
-#else
-#define read_cr0()	(native_read_cr0())
-#define write_cr0(x)	(native_write_cr0(x))
-#define read_cr2()	(native_read_cr2())
-#define write_cr2(x)	(native_write_cr2(x))
-#define read_cr3()	(native_read_cr3())
-#define write_cr3(x)	(native_write_cr3(x))
-#define read_cr4()	(native_read_cr4())
-#define read_cr4_safe()	(native_read_cr4_safe())
-#define write_cr4(x)	(native_write_cr4(x))
-#define wbinvd()	(native_wbinvd())
-
-/* Clear the 'TS' bit */
-#define clts()		(native_clts())
-
-#endif/* CONFIG_PARAVIRT */
-
-/* Set the 'TS' bit */
-#define stts() write_cr0(8 | read_cr0())
-
 #endif	/* __KERNEL__ */
 
 
diff --git a/include/asm-x86/system_64.h b/include/asm-x86/system_64.h
index da46059..3da8ec2 100644
--- a/include/asm-x86/system_64.h
+++ b/include/asm-x86/system_64.h
@@ -1,7 +1,6 @@
 #ifndef __ASM_SYSTEM_H
 #define __ASM_SYSTEM_H
 
-#include <linux/kernel.h>
 #include <asm/segment.h>
 #include <asm/cmpxchg.h>
 
@@ -39,78 +38,6 @@
 		       [thread_info] "i" (offsetof(struct task_struct, stack)), \
 		       [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent))   \
 		     : "memory", "cc" __EXTRA_CLOBBER)
-    
-extern void load_gs_index(unsigned); 
-
-/*
- * Clear and set 'TS' bit respectively
- */
-#define clts() __asm__ __volatile__ ("clts")
-
-static inline unsigned long read_cr0(void)
-{ 
-	unsigned long cr0;
-	asm volatile("movq %%cr0,%0" : "=r" (cr0));
-	return cr0;
-}
-
-static inline void write_cr0(unsigned long val) 
-{ 
-	asm volatile("movq %0,%%cr0" :: "r" (val));
-}
-
-static inline unsigned long read_cr2(void)
-{
-	unsigned long cr2;
-	asm volatile("movq %%cr2,%0" : "=r" (cr2));
-	return cr2;
-}
-
-static inline void write_cr2(unsigned long val)
-{
-	asm volatile("movq %0,%%cr2" :: "r" (val));
-}
-
-static inline unsigned long read_cr3(void)
-{ 
-	unsigned long cr3;
-	asm volatile("movq %%cr3,%0" : "=r" (cr3));
-	return cr3;
-}
-
-static inline void write_cr3(unsigned long val)
-{
-	asm volatile("movq %0,%%cr3" :: "r" (val) : "memory");
-}
-
-static inline unsigned long read_cr4(void)
-{ 
-	unsigned long cr4;
-	asm volatile("movq %%cr4,%0" : "=r" (cr4));
-	return cr4;
-}
-
-static inline void write_cr4(unsigned long val)
-{ 
-	asm volatile("movq %0,%%cr4" :: "r" (val) : "memory");
-}
-
-static inline unsigned long read_cr8(void)
-{
-	unsigned long cr8;
-	asm volatile("movq %%cr8,%0" : "=r" (cr8));
-	return cr8;
-}
-
-static inline void write_cr8(unsigned long val)
-{
-	asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
-}
-
-#define stts() write_cr0(8 | read_cr0())
-
-#define wbinvd() \
-	__asm__ __volatile__ ("wbinvd": : :"memory")
 
 #endif	/* __KERNEL__ */
 
-- 
1.4.4.2


^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 5/8] remove unused macro
  2007-12-04 16:06       ` [PATCH 4/8] unify paravirt parts of system.h Glauber de Oliveira Costa
@ 2007-12-04 16:06         ` Glauber de Oliveira Costa
  2007-12-04 16:06           ` [PATCH 6/8] unify smp parts of system.h Glauber de Oliveira Costa
  2007-12-04 19:18         ` [PATCH 4/8] unify paravirt parts of system.h Avi Kivity
  1 sibling, 1 reply; 21+ messages in thread
From: Glauber de Oliveira Costa @ 2007-12-04 16:06 UTC (permalink / raw)
  To: linux-kernel
  Cc: akpm, glommer, tglx, mingo, ehabkost, jeremy, avi, anthony,
	virtualization, rusty, ak, chrisw, rostedt, hpa,
	Glauber de Oliveira Costa

Mr. Grep says warn_if_not_ulong() is not used anymore anywhere
in the code. So, we remove it.

Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>
---
 include/asm-x86/system_64.h |    2 --
 1 files changed, 0 insertions(+), 2 deletions(-)

diff --git a/include/asm-x86/system_64.h b/include/asm-x86/system_64.h
index 3da8ec2..bb058ad 100644
--- a/include/asm-x86/system_64.h
+++ b/include/asm-x86/system_64.h
@@ -66,8 +66,6 @@
 #define read_barrier_depends()	do {} while(0)
 #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
 
-#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
-
 #include <linux/irqflags.h>
 
 #endif
-- 
1.4.4.2


^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 6/8] unify smp parts of system.h
  2007-12-04 16:06         ` [PATCH 5/8] remove unused macro Glauber de Oliveira Costa
@ 2007-12-04 16:06           ` Glauber de Oliveira Costa
  2007-12-04 16:06             ` [PATCH 7/8] move switch_to macro to system.h Glauber de Oliveira Costa
  0 siblings, 1 reply; 21+ messages in thread
From: Glauber de Oliveira Costa @ 2007-12-04 16:06 UTC (permalink / raw)
  To: linux-kernel
  Cc: akpm, glommer, tglx, mingo, ehabkost, jeremy, avi, anthony,
	virtualization, rusty, ak, chrisw, rostedt, hpa,
	Glauber de Oliveira Costa

The memory barrier parts of system.h are not very different between
i386 and x86_64, the main difference being the availability of
instructions, which we handle with the use of ifdefs.

They are consolidated in system.h file, and then removed from
the arch-specific headers.

Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>
---
 include/asm-x86/system.h    |  105 +++++++++++++++++++++++++++++++++++++++++++
 include/asm-x86/system_32.h |   99 ----------------------------------------
 include/asm-x86/system_64.h |   25 ----------
 3 files changed, 105 insertions(+), 124 deletions(-)

diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h
index f1fdc55..ecb782b 100644
--- a/include/asm-x86/system.h
+++ b/include/asm-x86/system.h
@@ -217,4 +217,109 @@ extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
 void default_idle(void);
 void __show_registers(struct pt_regs *, int all);
 
+/*
+ * Force strict CPU ordering.
+ * And yes, this is required on UP too when we're talking
+ * to devices.
+ */
+#ifdef CONFIG_X86_32
+/*
+ * For now, "wmb()" doesn't actually do anything, as all
+ * Intel CPU's follow what Intel calls a *Processor Order*,
+ * in which all writes are seen in the program order even
+ * outside the CPU.
+ *
+ * I expect future Intel CPU's to have a weaker ordering,
+ * but I'd also expect them to finally get their act together
+ * and add some real memory barriers if so.
+ *
+ * Some non intel clones support out of order store. wmb() ceases to be a
+ * nop for these.
+ */
+#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
+#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
+#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
+#else
+#define mb() 	asm volatile("mfence":::"memory")
+#define rmb()	asm volatile("lfence":::"memory")
+#define wmb()	asm volatile("sfence" ::: "memory")
+#endif
+
+/**
+ * read_barrier_depends - Flush all pending reads that subsequents reads
+ * depend on.
+ *
+ * No data-dependent reads from memory-like regions are ever reordered
+ * over this barrier.  All reads preceding this primitive are guaranteed
+ * to access memory (but not necessarily other CPUs' caches) before any
+ * reads following this primitive that depend on the data return by
+ * any of the preceding reads.  This primitive is much lighter weight than
+ * rmb() on most CPUs, and is never heavier weight than is
+ * rmb().
+ *
+ * These ordering constraints are respected by both the local CPU
+ * and the compiler.
+ *
+ * Ordering is not guaranteed by anything other than these primitives,
+ * not even by data dependencies.  See the documentation for
+ * memory_barrier() for examples and URLs to more information.
+ *
+ * For example, the following code would force ordering (the initial
+ * value of "a" is zero, "b" is one, and "p" is "&a"):
+ *
+ * <programlisting>
+ *	CPU 0				CPU 1
+ *
+ *	b = 2;
+ *	memory_barrier();
+ *	p = &b;				q = p;
+ *					read_barrier_depends();
+ *					d = *q;
+ * </programlisting>
+ *
+ * because the read of "*q" depends on the read of "p" and these
+ * two reads are separated by a read_barrier_depends().  However,
+ * the following code, with the same initial values for "a" and "b":
+ *
+ * <programlisting>
+ *	CPU 0				CPU 1
+ *
+ *	a = 2;
+ *	memory_barrier();
+ *	b = 3;				y = b;
+ *					read_barrier_depends();
+ *					x = a;
+ * </programlisting>
+ *
+ * does not enforce ordering, since there is no data dependency between
+ * the read of "a" and the read of "b".  Therefore, on some CPUs, such
+ * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
+ * in cases like this where there are no data dependencies.
+ **/
+
+#define read_barrier_depends()	do { } while (0)
+
+#ifdef CONFIG_SMP
+#define smp_mb()	mb()
+#ifdef CONFIG_X86_PPRO_FENCE
+# define smp_rmb()	rmb()
+#else
+# define smp_rmb()	barrier()
+#endif
+#ifdef CONFIG_X86_OOSTORE
+# define smp_wmb() 	wmb()
+#else
+# define smp_wmb()	barrier()
+#endif
+#define smp_read_barrier_depends()	read_barrier_depends()
+#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
+#else
+#define smp_mb()	barrier()
+#define smp_rmb()	barrier()
+#define smp_wmb()	barrier()
+#define smp_read_barrier_depends()	do { } while (0)
+#define set_mb(var, value) do { var = value; barrier(); } while (0)
+#endif
+
+
 #endif
diff --git a/include/asm-x86/system_32.h b/include/asm-x86/system_32.h
index 6c69567..1201880 100644
--- a/include/asm-x86/system_32.h
+++ b/include/asm-x86/system_32.h
@@ -36,105 +36,6 @@ extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struc
 #endif	/* __KERNEL__ */
 
 
-/*
- * Force strict CPU ordering.
- * And yes, this is required on UP too when we're talking
- * to devices.
- *
- * For now, "wmb()" doesn't actually do anything, as all
- * Intel CPU's follow what Intel calls a *Processor Order*,
- * in which all writes are seen in the program order even
- * outside the CPU.
- *
- * I expect future Intel CPU's to have a weaker ordering,
- * but I'd also expect them to finally get their act together
- * and add some real memory barriers if so.
- *
- * Some non intel clones support out of order store. wmb() ceases to be a
- * nop for these.
- */
- 
-
-#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
-#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
-#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
-
-/**
- * read_barrier_depends - Flush all pending reads that subsequents reads
- * depend on.
- *
- * No data-dependent reads from memory-like regions are ever reordered
- * over this barrier.  All reads preceding this primitive are guaranteed
- * to access memory (but not necessarily other CPUs' caches) before any
- * reads following this primitive that depend on the data return by
- * any of the preceding reads.  This primitive is much lighter weight than
- * rmb() on most CPUs, and is never heavier weight than is
- * rmb().
- *
- * These ordering constraints are respected by both the local CPU
- * and the compiler.
- *
- * Ordering is not guaranteed by anything other than these primitives,
- * not even by data dependencies.  See the documentation for
- * memory_barrier() for examples and URLs to more information.
- *
- * For example, the following code would force ordering (the initial
- * value of "a" is zero, "b" is one, and "p" is "&a"):
- *
- * <programlisting>
- *	CPU 0				CPU 1
- *
- *	b = 2;
- *	memory_barrier();
- *	p = &b;				q = p;
- *					read_barrier_depends();
- *					d = *q;
- * </programlisting>
- *
- * because the read of "*q" depends on the read of "p" and these
- * two reads are separated by a read_barrier_depends().  However,
- * the following code, with the same initial values for "a" and "b":
- *
- * <programlisting>
- *	CPU 0				CPU 1
- *
- *	a = 2;
- *	memory_barrier();
- *	b = 3;				y = b;
- *					read_barrier_depends();
- *					x = a;
- * </programlisting>
- *
- * does not enforce ordering, since there is no data dependency between
- * the read of "a" and the read of "b".  Therefore, on some CPUs, such
- * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
- * in cases like this where there are no data dependencies.
- **/
-
-#define read_barrier_depends()	do { } while(0)
-
-#ifdef CONFIG_SMP
-#define smp_mb()	mb()
-#ifdef CONFIG_X86_PPRO_FENCE
-# define smp_rmb()	rmb()
-#else
-# define smp_rmb()	barrier()
-#endif
-#ifdef CONFIG_X86_OOSTORE
-# define smp_wmb() 	wmb()
-#else
-# define smp_wmb()	barrier()
-#endif
-#define smp_read_barrier_depends()	read_barrier_depends()
-#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
-#else
-#define smp_mb()	barrier()
-#define smp_rmb()	barrier()
-#define smp_wmb()	barrier()
-#define smp_read_barrier_depends()	do { } while(0)
-#define set_mb(var, value) do { var = value; barrier(); } while (0)
-#endif
-
 #include <linux/irqflags.h>
 
 /*
diff --git a/include/asm-x86/system_64.h b/include/asm-x86/system_64.h
index bb058ad..80be206 100644
--- a/include/asm-x86/system_64.h
+++ b/include/asm-x86/system_64.h
@@ -41,31 +41,6 @@
 
 #endif	/* __KERNEL__ */
 
-#ifdef CONFIG_SMP
-#define smp_mb()	mb()
-#define smp_rmb()	barrier()
-#define smp_wmb()	barrier()
-#define smp_read_barrier_depends()	do {} while(0)
-#else
-#define smp_mb()	barrier()
-#define smp_rmb()	barrier()
-#define smp_wmb()	barrier()
-#define smp_read_barrier_depends()	do {} while(0)
-#endif
-
-    
-/*
- * Force strict CPU ordering.
- * And yes, this is required on UP too when we're talking
- * to devices.
- */
-#define mb() 	asm volatile("mfence":::"memory")
-#define rmb()	asm volatile("lfence":::"memory")
-#define wmb()	asm volatile("sfence" ::: "memory")
-
-#define read_barrier_depends()	do {} while(0)
-#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
-
 #include <linux/irqflags.h>
 
 #endif
-- 
1.4.4.2


^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 7/8] move switch_to macro to system.h
  2007-12-04 16:06           ` [PATCH 6/8] unify smp parts of system.h Glauber de Oliveira Costa
@ 2007-12-04 16:06             ` Glauber de Oliveira Costa
  2007-12-04 16:06               ` [PATCH 8/8] unify system.h Glauber de Oliveira Costa
  0 siblings, 1 reply; 21+ messages in thread
From: Glauber de Oliveira Costa @ 2007-12-04 16:06 UTC (permalink / raw)
  To: linux-kernel
  Cc: akpm, glommer, tglx, mingo, ehabkost, jeremy, avi, anthony,
	virtualization, rusty, ak, chrisw, rostedt, hpa,
	Glauber de Oliveira Costa

This patch moves the switch_to() macro to system.h

As those macros are fundamentally different between i386 and x86_64,
they are enclosed around an ifdef.

Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>
---
 include/asm-x86/system.h    |   61 +++++++++++++++++++++++++++++++++++++++++++
 include/asm-x86/system_32.h |   31 ----------------------
 include/asm-x86/system_64.h |   37 --------------------------
 3 files changed, 61 insertions(+), 68 deletions(-)

diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h
index ecb782b..da4bcd1 100644
--- a/include/asm-x86/system.h
+++ b/include/asm-x86/system.h
@@ -6,8 +6,69 @@
 #include <linux/kernel.h>
 
 #ifdef CONFIG_X86_32
+#define AT_VECTOR_SIZE_ARCH 2 /* entries in ARCH_DLINFO */
+
+struct task_struct; /* one of the stranger aspects of C forward declarations */
+extern struct task_struct *FASTCALL(__switch_to(struct task_struct *prev,
+						struct task_struct *next));
+
+/*
+ * Saving eflags is important. It switches not only IOPL between tasks,
+ * it also protects other tasks from NT leaking through sysenter etc.
+ */
+#define switch_to(prev, next, last) do {				\
+	unsigned long esi, edi;						\
+	asm volatile("pushfl\n\t"		/* Save flags */	\
+		     "pushl %%ebp\n\t"					\
+		     "movl %%esp,%0\n\t"	/* save ESP */		\
+		     "movl %5,%%esp\n\t"	/* restore ESP */	\
+		     "movl $1f,%1\n\t"		/* save EIP */		\
+		     "pushl %6\n\t"		/* restore EIP */	\
+		     "jmp __switch_to\n"				\
+		     "1:\t"						\
+		     "popl %%ebp\n\t"					\
+		     "popfl"						\
+		     :"=m" (prev->thread.esp), "=m" (prev->thread.eip),	\
+		      "=a" (last), "=S" (esi), "=D" (edi)		\
+		     :"m" (next->thread.esp), "m" (next->thread.eip),	\
+		      "2" (prev), "d" (next));				\
+} while (0)
+
 # include "system_32.h"
 #else
+#define __SAVE(reg, offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
+#define __RESTORE(reg, offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
+
+/* frame pointer must be last for get_wchan */
+#define SAVE_CONTEXT    "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
+#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
+
+#define __EXTRA_CLOBBER  \
+	, "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
+	  "r12", "r13", "r14", "r15"
+
+/* Save restore flags to clear handle leaking NT */
+#define switch_to(prev, next, last) \
+	asm volatile(SAVE_CONTEXT					  \
+	     "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */	  \
+	     "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */	  \
+	     "call __switch_to\n\t"					  \
+	     ".globl thread_return\n"					  \
+	     "thread_return:\n\t"					  \
+	     "movq %%gs:%P[pda_pcurrent],%%rsi\n\t"			  \
+	     "movq %P[thread_info](%%rsi),%%r8\n\t"			  \
+	     LOCK_PREFIX "btr  %[tif_fork],%P[ti_flags](%%r8)\n\t"	  \
+	     "movq %%rax,%%rdi\n\t" 					  \
+	     "jc   ret_from_fork\n\t"					  \
+	     RESTORE_CONTEXT						  \
+	     : "=a" (last)					  	  \
+	     : [next] "S" (next), [prev] "D" (prev),			  \
+	       [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)),\
+	       [ti_flags] "i" (offsetof(struct thread_info, flags)),	  \
+	       [tif_fork] "i" (TIF_FORK),			  	  \
+	       [thread_info] "i" (offsetof(struct task_struct, stack)),   \
+	       [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent))  \
+	     : "memory", "cc" __EXTRA_CLOBBER)
 # include "system_64.h"
 #endif
 
diff --git a/include/asm-x86/system_32.h b/include/asm-x86/system_32.h
index 1201880..83af464 100644
--- a/include/asm-x86/system_32.h
+++ b/include/asm-x86/system_32.h
@@ -5,37 +5,6 @@
 #include <asm/cpufeature.h>
 #include <asm/cmpxchg.h>
 
-#ifdef __KERNEL__
-#define AT_VECTOR_SIZE_ARCH 2 /* entries in ARCH_DLINFO */
-
-struct task_struct;	/* one of the stranger aspects of C forward declarations.. */
-extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
-
-/*
- * Saving eflags is important. It switches not only IOPL between tasks,
- * it also protects other tasks from NT leaking through sysenter etc.
- */
-#define switch_to(prev,next,last) do {					\
-	unsigned long esi,edi;						\
-	asm volatile("pushfl\n\t"		/* Save flags */	\
-		     "pushl %%ebp\n\t"					\
-		     "movl %%esp,%0\n\t"	/* save ESP */		\
-		     "movl %5,%%esp\n\t"	/* restore ESP */	\
-		     "movl $1f,%1\n\t"		/* save EIP */		\
-		     "pushl %6\n\t"		/* restore EIP */	\
-		     "jmp __switch_to\n"				\
-		     "1:\t"						\
-		     "popl %%ebp\n\t"					\
-		     "popfl"						\
-		     :"=m" (prev->thread.esp),"=m" (prev->thread.eip),	\
-		      "=a" (last),"=S" (esi),"=D" (edi)			\
-		     :"m" (next->thread.esp),"m" (next->thread.eip),	\
-		      "2" (prev), "d" (next));				\
-} while (0)
-
-#endif	/* __KERNEL__ */
-
-
 #include <linux/irqflags.h>
 
 /*
diff --git a/include/asm-x86/system_64.h b/include/asm-x86/system_64.h
index 80be206..89af856 100644
--- a/include/asm-x86/system_64.h
+++ b/include/asm-x86/system_64.h
@@ -4,43 +4,6 @@
 #include <asm/segment.h>
 #include <asm/cmpxchg.h>
 
-#ifdef __KERNEL__
-
-#define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
-#define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
-
-/* frame pointer must be last for get_wchan */
-#define SAVE_CONTEXT    "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
-#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
-
-#define __EXTRA_CLOBBER  \
-	,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
-
-/* Save restore flags to clear handle leaking NT */
-#define switch_to(prev,next,last) \
-	asm volatile(SAVE_CONTEXT						    \
-		     "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */	  \
-		     "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */	  \
-		     "call __switch_to\n\t"					  \
-		     ".globl thread_return\n"					\
-		     "thread_return:\n\t"					    \
-		     "movq %%gs:%P[pda_pcurrent],%%rsi\n\t"			  \
-		     "movq %P[thread_info](%%rsi),%%r8\n\t"			  \
-		     LOCK_PREFIX "btr  %[tif_fork],%P[ti_flags](%%r8)\n\t"	  \
-		     "movq %%rax,%%rdi\n\t" 					  \
-		     "jc   ret_from_fork\n\t"					  \
-		     RESTORE_CONTEXT						    \
-		     : "=a" (last)					  	  \
-		     : [next] "S" (next), [prev] "D" (prev),			  \
-		       [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
-		       [ti_flags] "i" (offsetof(struct thread_info, flags)),\
-		       [tif_fork] "i" (TIF_FORK),			  \
-		       [thread_info] "i" (offsetof(struct task_struct, stack)), \
-		       [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent))   \
-		     : "memory", "cc" __EXTRA_CLOBBER)
-
-#endif	/* __KERNEL__ */
-
 #include <linux/irqflags.h>
 
 #endif
-- 
1.4.4.2


^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH 8/8] unify system.h
  2007-12-04 16:06             ` [PATCH 7/8] move switch_to macro to system.h Glauber de Oliveira Costa
@ 2007-12-04 16:06               ` Glauber de Oliveira Costa
  0 siblings, 0 replies; 21+ messages in thread
From: Glauber de Oliveira Costa @ 2007-12-04 16:06 UTC (permalink / raw)
  To: linux-kernel
  Cc: akpm, glommer, tglx, mingo, ehabkost, jeremy, avi, anthony,
	virtualization, rusty, ak, chrisw, rostedt, hpa,
	Glauber de Oliveira Costa

This patch finishes the unification of system.h file.
i386 needs a constant to be defined, and it is defined inside an ifdef

Other than that, pretty much nothing but includes are left in the arch
specific headers, and they are deleted.

Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>
---
 include/asm-x86/system.h    |   10 ++++++++--
 include/asm-x86/system_32.h |   15 ---------------
 include/asm-x86/system_64.h |    9 ---------
 3 files changed, 8 insertions(+), 26 deletions(-)

diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h
index da4bcd1..1a85892 100644
--- a/include/asm-x86/system.h
+++ b/include/asm-x86/system.h
@@ -2,8 +2,12 @@
 #define _ASM_X86_SYSTEM_H_
 
 #include <asm/asm.h>
+#include <asm/segment.h>
+#include <asm/cpufeature.h>
+#include <asm/cmpxchg.h>
 
 #include <linux/kernel.h>
+#include <linux/irqflags.h>
 
 #ifdef CONFIG_X86_32
 #define AT_VECTOR_SIZE_ARCH 2 /* entries in ARCH_DLINFO */
@@ -34,7 +38,10 @@ extern struct task_struct *FASTCALL(__switch_to(struct task_struct *prev,
 		      "2" (prev), "d" (next));				\
 } while (0)
 
-# include "system_32.h"
+/*
+ * disable hlt during certain critical i/o operations
+ */
+#define HAVE_DISABLE_HLT
 #else
 #define __SAVE(reg, offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
 #define __RESTORE(reg, offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
@@ -69,7 +76,6 @@ extern struct task_struct *FASTCALL(__switch_to(struct task_struct *prev,
 	       [thread_info] "i" (offsetof(struct task_struct, stack)),   \
 	       [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent))  \
 	     : "memory", "cc" __EXTRA_CLOBBER)
-# include "system_64.h"
 #endif
 
 #ifdef __KERNEL__
diff --git a/include/asm-x86/system_32.h b/include/asm-x86/system_32.h
deleted file mode 100644
index 83af464..0000000
--- a/include/asm-x86/system_32.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef __ASM_SYSTEM_H
-#define __ASM_SYSTEM_H
-
-#include <asm/segment.h>
-#include <asm/cpufeature.h>
-#include <asm/cmpxchg.h>
-
-#include <linux/irqflags.h>
-
-/*
- * disable hlt during certain critical i/o operations
- */
-#define HAVE_DISABLE_HLT
-
-#endif
diff --git a/include/asm-x86/system_64.h b/include/asm-x86/system_64.h
deleted file mode 100644
index 89af856..0000000
--- a/include/asm-x86/system_64.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef __ASM_SYSTEM_H
-#define __ASM_SYSTEM_H
-
-#include <asm/segment.h>
-#include <asm/cmpxchg.h>
-
-#include <linux/irqflags.h>
-
-#endif
-- 
1.4.4.2


^ permalink raw reply related	[flat|nested] 21+ messages in thread

* Re: [PATCH 4/8] unify paravirt parts of system.h
  2007-12-04 16:06       ` [PATCH 4/8] unify paravirt parts of system.h Glauber de Oliveira Costa
  2007-12-04 16:06         ` [PATCH 5/8] remove unused macro Glauber de Oliveira Costa
@ 2007-12-04 19:18         ` Avi Kivity
  2007-12-04 19:34           ` Andi Kleen
  2007-12-04 19:41           ` Glauber de Oliveira Costa
  1 sibling, 2 replies; 21+ messages in thread
From: Avi Kivity @ 2007-12-04 19:18 UTC (permalink / raw)
  To: Glauber de Oliveira Costa
  Cc: linux-kernel, akpm, glommer, tglx, mingo, ehabkost, jeremy,
	anthony, virtualization, rusty, ak, chrisw, rostedt, hpa

Glauber de Oliveira Costa wrote:
> This patch moves the i386 control registers manipulation functions,
> wbinvd, and clts functions to system.h. They are essentially the same
> as in x86_64, except for the cr8 register, which we add.
>
> +
> +static inline unsigned long native_read_cr8(void)
> +{
> +	unsigned long cr8;
> +	asm volatile("mov %%cr8,%0" : "=r" (cr8), "=m" (__force_order));
> +	return cr8;
> +}
> +
>   

There is no cr8 register on i386.  This had better be protected by an 
#ifdef.

(you're likely not getting an error since it's a static inline, so the 
asm is never emitted)

-- 
Any sufficiently difficult bug is indistinguishable from a feature.


^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 4/8] unify paravirt parts of system.h
  2007-12-04 19:18         ` [PATCH 4/8] unify paravirt parts of system.h Avi Kivity
@ 2007-12-04 19:34           ` Andi Kleen
  2007-12-05 16:30             ` Pavel Machek
  2007-12-04 19:41           ` Glauber de Oliveira Costa
  1 sibling, 1 reply; 21+ messages in thread
From: Andi Kleen @ 2007-12-04 19:34 UTC (permalink / raw)
  To: Avi Kivity
  Cc: Glauber de Oliveira Costa, linux-kernel, akpm, glommer, tglx,
	mingo, ehabkost, jeremy, anthony, virtualization, rusty, ak,
	chrisw, rostedt, hpa

On Tue, Dec 04, 2007 at 09:18:33PM +0200, Avi Kivity wrote:
> Glauber de Oliveira Costa wrote:
>> This patch moves the i386 control registers manipulation functions,
>> wbinvd, and clts functions to system.h. They are essentially the same
>> as in x86_64, except for the cr8 register, which we add.
>>
>> +
>> +static inline unsigned long native_read_cr8(void)
>> +{
>> +	unsigned long cr8;
>> +	asm volatile("mov %%cr8,%0" : "=r" (cr8), "=m" (__force_order));
>> +	return cr8;
>> +}
>> +
>>   
>
> There is no cr8 register on i386.  This had better be protected by an 
> #ifdef.
>
> (you're likely not getting an error since it's a static inline, so the asm 
> is never emitted)

Linux never uses that register. The only user is suspend save/restore, 
but that' bogus because it wasn't ever initialized by Linux in the first
place. It could be probably all safely removed.

-Andi


^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 4/8] unify paravirt parts of system.h
  2007-12-04 19:18         ` [PATCH 4/8] unify paravirt parts of system.h Avi Kivity
  2007-12-04 19:34           ` Andi Kleen
@ 2007-12-04 19:41           ` Glauber de Oliveira Costa
  2007-12-04 22:14             ` Denys Vlasenko
  1 sibling, 1 reply; 21+ messages in thread
From: Glauber de Oliveira Costa @ 2007-12-04 19:41 UTC (permalink / raw)
  To: Avi Kivity
  Cc: Glauber de Oliveira Costa, linux-kernel, akpm, tglx, mingo,
	ehabkost, jeremy, anthony, virtualization, rusty, ak, chrisw,
	rostedt, hpa

On Dec 4, 2007 5:18 PM, Avi Kivity <avi@qumranet.com> wrote:
> Glauber de Oliveira Costa wrote:
> > This patch moves the i386 control registers manipulation functions,
> > wbinvd, and clts functions to system.h. They are essentially the same
> > as in x86_64, except for the cr8 register, which we add.
> >
> > +
> > +static inline unsigned long native_read_cr8(void)
> > +{
> > +     unsigned long cr8;
> > +     asm volatile("mov %%cr8,%0" : "=r" (cr8), "=m" (__force_order));
> > +     return cr8;
> > +}
> > +
> >
>
> There is no cr8 register on i386.  This had better be protected by an
> #ifdef.

Sure. I mentioned it in the changelog. I, however, am not sure If I
agree it should be enclosed
in ifdefs. Me and Jeremy discussed it a while ago, and we seem to
agree that for those functions
that are exclusive of one architecture, there were no need for ifdefs.
Any usage by the other arch
is a bug.

But I admit that I'm not particularly biased here, and I can change
it, if there's agreement that
an ifdef here is the way to go.

> (you're likely not getting an error since it's a static inline, so the
> asm is never emitted)
>
Which also means it does not affect the binary in anyway. No bigger
code, no nothing.
My current approach is to save the ifdefs for pieces that in fact can
save us some resources
in the final image. But again... I can change it.



-- 
Glauber de Oliveira Costa.
"Free as in Freedom"
http://glommer.net

"The less confident you are, the more serious you have to act."

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 4/8] unify paravirt parts of system.h
  2007-12-04 19:41           ` Glauber de Oliveira Costa
@ 2007-12-04 22:14             ` Denys Vlasenko
  0 siblings, 0 replies; 21+ messages in thread
From: Denys Vlasenko @ 2007-12-04 22:14 UTC (permalink / raw)
  To: Glauber de Oliveira Costa
  Cc: Avi Kivity, Glauber de Oliveira Costa, linux-kernel, akpm, tglx,
	mingo, ehabkost, jeremy, anthony, virtualization, rusty, ak,
	chrisw, rostedt, hpa

On Tuesday 04 December 2007 11:41, Glauber de Oliveira Costa wrote:
> On Dec 4, 2007 5:18 PM, Avi Kivity <avi@qumranet.com> wrote:
> > There is no cr8 register on i386.  This had better be protected by an
> > #ifdef.
>
> Sure. I mentioned it in the changelog. I, however, am not sure If I
> agree it should be enclosed
> in ifdefs. Me and Jeremy discussed it a while ago, and we seem to
> agree that for those functions
> that are exclusive of one architecture, there were no need for ifdefs.
> Any usage by the other arch
> is a bug.
>
> But I admit that I'm not particularly biased here, and I can change
> it, if there's agreement that
> an ifdef here is the way to go.
>
> > (you're likely not getting an error since it's a static inline, so the
> > asm is never emitted)
>
> Which also means it does not affect the binary in anyway. No bigger
> code, no nothing.

If future changes will mistakenly make 32-bit x86 call native_read_cr8(),
you will get no warning. (Hmmm. Maybe as will complain, I'm not sure).
If it explicitly ifdefed out for 32 bits, it's easier to detect misuse.
--
vda

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 4/8] unify paravirt parts of system.h
  2007-12-04 19:34           ` Andi Kleen
@ 2007-12-05 16:30             ` Pavel Machek
  2007-12-15 13:17               ` Ingo Molnar
                                 ` (2 more replies)
  0 siblings, 3 replies; 21+ messages in thread
From: Pavel Machek @ 2007-12-05 16:30 UTC (permalink / raw)
  To: Andi Kleen
  Cc: Avi Kivity, Glauber de Oliveira Costa, linux-kernel, akpm,
	glommer, tglx, mingo, ehabkost, jeremy, anthony, virtualization,
	rusty, chrisw, rostedt, hpa, Rafael J. Wysocki

On Tue 2007-12-04 20:34:32, Andi Kleen wrote:
> On Tue, Dec 04, 2007 at 09:18:33PM +0200, Avi Kivity wrote:
> > Glauber de Oliveira Costa wrote:
> >> This patch moves the i386 control registers manipulation functions,
> >> wbinvd, and clts functions to system.h. They are essentially the same
> >> as in x86_64, except for the cr8 register, which we add.
> >>
> >> +
> >> +static inline unsigned long native_read_cr8(void)
> >> +{
> >> +	unsigned long cr8;
> >> +	asm volatile("mov %%cr8,%0" : "=r" (cr8), "=m" (__force_order));
> >> +	return cr8;
> >> +}
> >> +
> >>   
> >
> > There is no cr8 register on i386.  This had better be protected by an 
> > #ifdef.
> >
> > (you're likely not getting an error since it's a static inline, so the asm 
> > is never emitted)
> 
> Linux never uses that register. The only user is suspend save/restore, 
> but that' bogus because it wasn't ever initialized by Linux in the first
> place. It could be probably all safely removed.

It probably is safe to remove... but we currently support '2.8.95
kernel loads/resumes 2.6.24 image'... which would break if 2.8 uses
cr8.

So please keep it if it is not a big problem.

-- 
(english) http://www.livejournal.com/~pavelmachek
(cesky, pictures) http://atrey.karlin.mff.cuni.cz/~pavel/picture/horses/blog.html

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 4/8] unify paravirt parts of system.h
  2007-12-05 16:30             ` Pavel Machek
@ 2007-12-15 13:17               ` Ingo Molnar
  2007-12-17  0:27                 ` Rafael J. Wysocki
  2007-12-15 13:26               ` Andi Kleen
  2007-12-15 20:28               ` H. Peter Anvin
  2 siblings, 1 reply; 21+ messages in thread
From: Ingo Molnar @ 2007-12-15 13:17 UTC (permalink / raw)
  To: Pavel Machek
  Cc: Andi Kleen, Avi Kivity, Glauber de Oliveira Costa, linux-kernel,
	akpm, glommer, tglx, ehabkost, jeremy, anthony, virtualization,
	rusty, chrisw, rostedt, hpa, Rafael J. Wysocki


* Pavel Machek <pavel@ucw.cz> wrote:

> > Linux never uses that register. The only user is suspend 
> > save/restore, but that' bogus because it wasn't ever initialized by 
> > Linux in the first place. It could be probably all safely removed.
> 
> It probably is safe to remove... but we currently support '2.8.95 
> kernel loads/resumes 2.6.24 image'... which would break if 2.8 uses 
> cr8.
> 
> So please keep it if it is not a big problem.

hm, so __save_processor_state() is in essence an ABI? Could you please 
also send a patch that documents this prominently, in the structure 
itself?

	Ingo

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 4/8] unify paravirt parts of system.h
  2007-12-05 16:30             ` Pavel Machek
  2007-12-15 13:17               ` Ingo Molnar
@ 2007-12-15 13:26               ` Andi Kleen
  2007-12-15 22:54                 ` Pavel Machek
  2007-12-15 20:28               ` H. Peter Anvin
  2 siblings, 1 reply; 21+ messages in thread
From: Andi Kleen @ 2007-12-15 13:26 UTC (permalink / raw)
  To: Pavel Machek
  Cc: Andi Kleen, Avi Kivity, Glauber de Oliveira Costa, linux-kernel,
	akpm, glommer, tglx, mingo, ehabkost, jeremy, anthony,
	virtualization, rusty, chrisw, rostedt, hpa, Rafael J. Wysocki

> It probably is safe to remove... but we currently support '2.8.95
> kernel loads/resumes 2.6.24 image'... which would break if 2.8 uses
> cr8.

No it won't. 2.8 would just restore some random useless value.
If 2.8 wants to use CR8 it would have to re-initialize it

-Andi

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 4/8] unify paravirt parts of system.h
  2007-12-05 16:30             ` Pavel Machek
  2007-12-15 13:17               ` Ingo Molnar
  2007-12-15 13:26               ` Andi Kleen
@ 2007-12-15 20:28               ` H. Peter Anvin
  2 siblings, 0 replies; 21+ messages in thread
From: H. Peter Anvin @ 2007-12-15 20:28 UTC (permalink / raw)
  To: Pavel Machek
  Cc: Andi Kleen, Avi Kivity, Glauber de Oliveira Costa, linux-kernel,
	akpm, glommer, tglx, mingo, ehabkost, jeremy, anthony,
	virtualization, rusty, chrisw, rostedt, Rafael J. Wysocki

Pavel Machek wrote:
> 
> It probably is safe to remove... but we currently support '2.8.95
> kernel loads/resumes 2.6.24 image'... which would break if 2.8 uses
> cr8.
> 
> So please keep it if it is not a big problem.
> 

Note that CR8 is an alias for the TPR in the APIC.

	-hpa

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 4/8] unify paravirt parts of system.h
  2007-12-15 13:26               ` Andi Kleen
@ 2007-12-15 22:54                 ` Pavel Machek
  0 siblings, 0 replies; 21+ messages in thread
From: Pavel Machek @ 2007-12-15 22:54 UTC (permalink / raw)
  To: Andi Kleen
  Cc: Andi Kleen, Avi Kivity, Glauber de Oliveira Costa, linux-kernel,
	akpm, glommer, tglx, mingo, ehabkost, jeremy, anthony,
	virtualization, rusty, chrisw, rostedt, hpa, Rafael J. Wysocki

On Sat 2007-12-15 14:26:38, Andi Kleen wrote:
> > It probably is safe to remove... but we currently support '2.8.95
> > kernel loads/resumes 2.6.24 image'... which would break if 2.8 uses
> > cr8.
> 
> No it won't. 2.8 would just restore some random useless value.

Restoring random value seems wrong. Putting random values into cpu
registers can break stuff, right?

Even if 2.6.24 image being restored did not set %cr8 itself, it may
depend on %cr8 to have "sane" value.

> If 2.8 wants to use CR8 it would have to re-initialize it

We are talking "2.8 restores 2.6 image" here.

									Pavel
-- 
(english) http://www.livejournal.com/~pavelmachek
(cesky, pictures) http://atrey.karlin.mff.cuni.cz/~pavel/picture/horses/blog.html

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 4/8] unify paravirt parts of system.h
  2007-12-17  0:27                 ` Rafael J. Wysocki
@ 2007-12-17  0:23                   ` Pavel Machek
  2007-12-17  0:58                     ` Rafael J. Wysocki
  0 siblings, 1 reply; 21+ messages in thread
From: Pavel Machek @ 2007-12-17  0:23 UTC (permalink / raw)
  To: Rafael J. Wysocki
  Cc: Ingo Molnar, Andi Kleen, Avi Kivity, Glauber de Oliveira Costa,
	linux-kernel, akpm, glommer, tglx, ehabkost, jeremy, anthony,
	virtualization, rusty, chrisw, rostedt, hpa

On Mon 2007-12-17 01:27:29, Rafael J. Wysocki wrote:
> On Saturday, 15 of December 2007, Ingo Molnar wrote:
> > 
> > * Pavel Machek <pavel@ucw.cz> wrote:
> > 
> > > > Linux never uses that register. The only user is suspend 
> > > > save/restore, but that' bogus because it wasn't ever initialized by 
> > > > Linux in the first place. It could be probably all safely removed.
> > > 
> > > It probably is safe to remove... but we currently support '2.8.95 
> > > kernel loads/resumes 2.6.24 image'... which would break if 2.8 uses 
> > > cr8.
> > > 
> > > So please keep it if it is not a big problem.
> > 
> > hm, so __save_processor_state() is in essence an ABI? Could you please 
> > also send a patch that documents this prominently, in the structure 
> > itself?
> 
> Hmm, I'm not sure if it really is an ABI part.  It doesn't communicate anything
> outside of the kernel in which it is defined.

Well, it is not "application binary interface", but it is
"kernel-to-kernel binary interface"...

> The problem is, though, that if kernel A is used for resuming kernel B, and
> kernel B doesn't save/restore everything it will need after the resume, then
> things will break if kernel A modifies that.  So, yes, we'll need to document
> that explicitly.

Agreed.
									Pavel
-- 
(english) http://www.livejournal.com/~pavelmachek
(cesky, pictures) http://atrey.karlin.mff.cuni.cz/~pavel/picture/horses/blog.html

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 4/8] unify paravirt parts of system.h
  2007-12-15 13:17               ` Ingo Molnar
@ 2007-12-17  0:27                 ` Rafael J. Wysocki
  2007-12-17  0:23                   ` Pavel Machek
  0 siblings, 1 reply; 21+ messages in thread
From: Rafael J. Wysocki @ 2007-12-17  0:27 UTC (permalink / raw)
  To: Ingo Molnar
  Cc: Pavel Machek, Andi Kleen, Avi Kivity, Glauber de Oliveira Costa,
	linux-kernel, akpm, glommer, tglx, ehabkost, jeremy, anthony,
	virtualization, rusty, chrisw, rostedt, hpa

On Saturday, 15 of December 2007, Ingo Molnar wrote:
> 
> * Pavel Machek <pavel@ucw.cz> wrote:
> 
> > > Linux never uses that register. The only user is suspend 
> > > save/restore, but that' bogus because it wasn't ever initialized by 
> > > Linux in the first place. It could be probably all safely removed.
> > 
> > It probably is safe to remove... but we currently support '2.8.95 
> > kernel loads/resumes 2.6.24 image'... which would break if 2.8 uses 
> > cr8.
> > 
> > So please keep it if it is not a big problem.
> 
> hm, so __save_processor_state() is in essence an ABI? Could you please 
> also send a patch that documents this prominently, in the structure 
> itself?

Hmm, I'm not sure if it really is an ABI part.  It doesn't communicate anything
outside of the kernel in which it is defined.

The problem is, though, that if kernel A is used for resuming kernel B, and
kernel B doesn't save/restore everything it will need after the resume, then
things will break if kernel A modifies that.  So, yes, we'll need to document
that explicitly.

Greetings,
Rafael

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [PATCH 4/8] unify paravirt parts of system.h
  2007-12-17  0:23                   ` Pavel Machek
@ 2007-12-17  0:58                     ` Rafael J. Wysocki
  0 siblings, 0 replies; 21+ messages in thread
From: Rafael J. Wysocki @ 2007-12-17  0:58 UTC (permalink / raw)
  To: Pavel Machek
  Cc: Ingo Molnar, Andi Kleen, Avi Kivity, Glauber de Oliveira Costa,
	linux-kernel, akpm, glommer, tglx, ehabkost, jeremy, anthony,
	virtualization, rusty, chrisw, rostedt, hpa

On Monday, 17 of December 2007, Pavel Machek wrote:
> On Mon 2007-12-17 01:27:29, Rafael J. Wysocki wrote:
> > On Saturday, 15 of December 2007, Ingo Molnar wrote:
> > > 
> > > * Pavel Machek <pavel@ucw.cz> wrote:
> > > 
> > > > > Linux never uses that register. The only user is suspend 
> > > > > save/restore, but that' bogus because it wasn't ever initialized by 
> > > > > Linux in the first place. It could be probably all safely removed.
> > > > 
> > > > It probably is safe to remove... but we currently support '2.8.95 
> > > > kernel loads/resumes 2.6.24 image'... which would break if 2.8 uses 
> > > > cr8.
> > > > 
> > > > So please keep it if it is not a big problem.
> > > 
> > > hm, so __save_processor_state() is in essence an ABI? Could you please 
> > > also send a patch that documents this prominently, in the structure 
> > > itself?
> > 
> > Hmm, I'm not sure if it really is an ABI part.  It doesn't communicate anything
> > outside of the kernel in which it is defined.
> 
> Well, it is not "application binary interface", but it is
> "kernel-to-kernel binary interface"...

Hm, rather a kernel-to-itself interface. ;-)

Rafael

^ permalink raw reply	[flat|nested] 21+ messages in thread

end of thread, other threads:[~2007-12-17  0:39 UTC | newest]

Thread overview: 21+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2007-12-04 16:06 [PATCH 0/8] Integrate system.h Glauber de Oliveira Costa
2007-12-04 16:06 ` [PATCH 1/8] remove volatile keyword from clflush Glauber de Oliveira Costa
2007-12-04 16:06   ` [PATCH 2/8] put together equal pieces of system.h Glauber de Oliveira Costa
2007-12-04 16:06     ` [PATCH 3/8] unify load_segment macro Glauber de Oliveira Costa
2007-12-04 16:06       ` [PATCH 4/8] unify paravirt parts of system.h Glauber de Oliveira Costa
2007-12-04 16:06         ` [PATCH 5/8] remove unused macro Glauber de Oliveira Costa
2007-12-04 16:06           ` [PATCH 6/8] unify smp parts of system.h Glauber de Oliveira Costa
2007-12-04 16:06             ` [PATCH 7/8] move switch_to macro to system.h Glauber de Oliveira Costa
2007-12-04 16:06               ` [PATCH 8/8] unify system.h Glauber de Oliveira Costa
2007-12-04 19:18         ` [PATCH 4/8] unify paravirt parts of system.h Avi Kivity
2007-12-04 19:34           ` Andi Kleen
2007-12-05 16:30             ` Pavel Machek
2007-12-15 13:17               ` Ingo Molnar
2007-12-17  0:27                 ` Rafael J. Wysocki
2007-12-17  0:23                   ` Pavel Machek
2007-12-17  0:58                     ` Rafael J. Wysocki
2007-12-15 13:26               ` Andi Kleen
2007-12-15 22:54                 ` Pavel Machek
2007-12-15 20:28               ` H. Peter Anvin
2007-12-04 19:41           ` Glauber de Oliveira Costa
2007-12-04 22:14             ` Denys Vlasenko

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).