linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] 5/8 Move descriptor table management into the sub-arch layer
@ 2005-08-06  7:18 Zachary Amsden
  2005-08-07  1:10 ` Chris Wright
  2005-08-08 14:24 ` Pavel Machek
  0 siblings, 2 replies; 4+ messages in thread
From: Zachary Amsden @ 2005-08-06  7:18 UTC (permalink / raw)
  To: akpm, chrisw, linux-kernel, davej, hpa, Riley, pratap, zach, chrisl

[-- Attachment #1: Type: text/plain, Size: 1 bytes --]



[-- Attachment #2: subarch-desc --]
[-- Type: text/plain, Size: 5607 bytes --]

i386 Transparent paravirtualization subarch patch #5

This change encapsulates descriptor and task register management.

Diffs against: 2.6.13-rc4-mm1

Signed-off-by: Zachary Amsden <zach@vmware.com>
Index: linux-2.6.13/include/asm-i386/desc.h
===================================================================
--- linux-2.6.13.orig/include/asm-i386/desc.h	2005-08-03 16:24:09.000000000 -0700
+++ linux-2.6.13/include/asm-i386/desc.h	2005-08-03 16:31:40.000000000 -0700
@@ -27,19 +27,6 @@
 
 extern struct Xgt_desc_struct idt_descr, cpu_gdt_descr[NR_CPUS];
 
-#define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8))
-#define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8))
-
-#define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr))
-#define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr))
-#define load_tr(tr) __asm__ __volatile("ltr %0"::"mr" (tr))
-#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"mr" (ldt))
-
-#define store_gdt(dtr) __asm__ ("sgdt %0":"=m" (*dtr))
-#define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr))
-#define store_tr(tr) __asm__ ("str %0":"=mr" (tr))
-#define store_ldt(ldt) __asm__ ("sldt %0":"=mr" (ldt))
-
 /*
  * This is the ldt that every process will get unless we need
  * something other than this.
@@ -58,19 +45,10 @@
 	"rorl $16,%1" \
 	: "=m"(*(n)) : "q" (addr), "r"(n), "ir"(limit), "i"(type))
 
-static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr)
-{
-	_set_tssldt_desc(&per_cpu(cpu_gdt_table, cpu)[entry], (int)addr,
-		offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89);
-}
+#include <mach_desc.h>
 
 #define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
 
-static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size)
-{
-	_set_tssldt_desc(&per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82);
-}
-
 #define LDT_entry_a(info) \
 	((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
 
@@ -96,24 +74,6 @@
 	(info)->seg_not_present	== 1	&& \
 	(info)->useable		== 0	)
 
-static inline void write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b)
-{
-	__u32 *lp = (__u32 *)((char *)ldt + entry*8);
-	*lp = entry_a;
-	*(lp+1) = entry_b;
-}
-
-#if TLS_SIZE != 24
-# error update this code.
-#endif
-
-static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
-{
-#define C(i) per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]
-	C(0); C(1); C(2);
-#undef C
-}
-
 static inline void clear_LDT(void)
 {
 	int cpu = get_cpu();
Index: linux-2.6.13/include/asm-i386/mach-default/mach_desc.h
===================================================================
--- linux-2.6.13.orig/include/asm-i386/mach-default/mach_desc.h	2005-08-03 16:31:40.000000000 -0700
+++ linux-2.6.13/include/asm-i386/mach-default/mach_desc.h	2005-08-03 16:32:52.000000000 -0700
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2005, VMware, Inc.
+ * Copyright (C) 1992-2004, Linus Torvalds and authors
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __MACH_DESC_H
+#define __MACH_DESC_H
+
+#define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8))
+#define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8))
+
+#define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr))
+#define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr))
+#define load_tr(tr) __asm__ __volatile("ltr %0"::"mr" (tr))
+#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"mr" (ldt))
+
+#define store_gdt(dtr) __asm__ ("sgdt %0":"=m" (*dtr))
+#define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr))
+#define store_tr(tr) __asm__ ("str %0":"=mr" (tr))
+#define store_ldt(ldt) __asm__ ("sldt %0":"=mr" (ldt))
+
+static inline unsigned int get_TR_desc(void)
+{
+	unsigned int tr;
+	__asm__ ("str %w0":"=q" (tr));
+	return tr;
+}
+
+static inline unsigned int get_LDT_desc(void)
+{
+	unsigned int ldt;
+	__asm__ ("sldt %w0":"=q" (ldt));
+	return ldt;
+}
+
+static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr)
+{
+	_set_tssldt_desc(&per_cpu(cpu_gdt_table, cpu)[entry], (int)addr,
+		offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89);
+}
+
+static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size)
+{
+	_set_tssldt_desc(&per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82);
+}
+
+static inline void write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b)
+{
+	__u32 *lp = (__u32 *)((char *)ldt + entry*8);
+	*lp = entry_a;
+	*(lp+1) = entry_b;
+}
+
+#if TLS_SIZE != 24
+# error update this code.
+#endif
+
+static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
+{
+#define C(i) per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]
+	C(0); C(1); C(2);
+#undef C
+}
+
+#endif

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] 5/8 Move descriptor table management into the sub-arch layer
  2005-08-06  7:18 [PATCH] 5/8 Move descriptor table management into the sub-arch layer Zachary Amsden
@ 2005-08-07  1:10 ` Chris Wright
  2005-08-07 11:03   ` Zachary Amsden
  2005-08-08 14:24 ` Pavel Machek
  1 sibling, 1 reply; 4+ messages in thread
From: Chris Wright @ 2005-08-07  1:10 UTC (permalink / raw)
  To: Zachary Amsden
  Cc: akpm, chrisw, linux-kernel, davej, hpa, Riley, pratap, chrisl

* Zachary Amsden (zach@vmware.com) wrote:
> This change encapsulates descriptor and task register management.

These will need some merging together, will take a stab tomorrow.


--- linux-2.6.12-xen0-arch.orig/include/asm-i386/desc.h
+++ linux-2.6.12-xen0-arch/include/asm-i386/desc.h
@@ -14,9 +14,6 @@
 
 #include <asm/mmu.h>
 
-extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
-DECLARE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]);
-
 DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
 
 struct Xgt_desc_struct {
@@ -37,30 +34,16 @@ extern struct Xgt_desc_struct idt_descr,
 extern struct desc_struct default_ldt[];
 extern void set_intr_gate(unsigned int irq, void * addr);
 
-#define _set_tssldt_desc(n,addr,limit,type) \
-__asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
-	"movw %%ax,2(%2)\n\t" \
-	"rorl $16,%%eax\n\t" \
-	"movb %%al,4(%2)\n\t" \
-	"movb %4,5(%2)\n\t" \
-	"movb $0,6(%2)\n\t" \
-	"movb %%ah,7(%2)\n\t" \
-	"rorl $16,%%eax" \
-	: "=m"(*(n)) : "a" (addr), "r"(n), "ir"(limit), "i"(type))
+#include <mach_desc.h>
 
 static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr)
 {
-	_set_tssldt_desc(&per_cpu(cpu_gdt_table, cpu)[entry], (int)addr,
+	_set_tssldt_desc(&get_cpu_gdt_table(cpu)[entry], (int)addr,
 		offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89);
 }
 
 #define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
 
-static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size)
-{
-	_set_tssldt_desc(&per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82);
-}
-
 #define LDT_entry_a(info) \
 	((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
 
@@ -90,39 +73,6 @@ static inline void set_ldt_desc(unsigned
 # error update this code.
 #endif
 
-static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
-{
-#define C(i) per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]
-	C(0); C(1); C(2);
-#undef C
-}
-
-static inline void clear_LDT(void)
-{
-	int cpu = get_cpu();
-
-	set_ldt_desc(cpu, &default_ldt[0], 5);
-	load_LDT_desc();
-	put_cpu();
-}
-
-/*
- * load one particular LDT into the current CPU
- */
-static inline void load_LDT_nolock(mm_context_t *pc, int cpu)
-{
-	void *segments = pc->ldt;
-	int count = pc->size;
-
-	if (likely(!count)) {
-		segments = &default_ldt[0];
-		count = 5;
-	}
-		
-	set_ldt_desc(cpu, segments, count);
-	load_LDT_desc();
-}
-
 static inline void load_LDT(mm_context_t *pc)
 {
 	int cpu = get_cpu();
--- /dev/null
+++ linux-2.6.12-xen0-arch/include/asm-i386/mach-default/mach_desc.h
@@ -0,0 +1,57 @@
+#ifndef __ASM_MACH_DESC_H
+#define __ASM_MACH_DESC_H
+
+extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
+DECLARE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]);
+#define get_cpu_gdt_table(_cpu) per_cpu(cpu_gdt_table, cpu)
+
+#define _set_tssldt_desc(n,addr,limit,type) \
+__asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
+	"movw %%ax,2(%2)\n\t" \
+	"rorl $16,%%eax\n\t" \
+	"movb %%al,4(%2)\n\t" \
+	"movb %4,5(%2)\n\t" \
+	"movb $0,6(%2)\n\t" \
+	"movb %%ah,7(%2)\n\t" \
+	"rorl $16,%%eax" \
+	: "=m"(*(n)) : "a" (addr), "r"(n), "ir"(limit), "i"(type))
+
+static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size)
+{
+	_set_tssldt_desc(&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82);
+}
+
+static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
+{
+#define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]
+	C(0); C(1); C(2);
+#undef C
+}
+
+static inline void clear_LDT(void)
+{
+	int cpu = get_cpu();
+
+	set_ldt_desc(cpu, &default_ldt[0], 5);
+	load_LDT_desc();
+	put_cpu();
+}
+
+/*
+ * load one particular LDT into the current CPU
+ */
+static inline void load_LDT_nolock(mm_context_t *pc, int cpu)
+{
+	void *segments = pc->ldt;
+	int count = pc->size;
+
+	if (likely(!count)) {
+		segments = &default_ldt[0];
+		count = 5;
+	}
+		
+	set_ldt_desc(cpu, segments, count);
+	load_LDT_desc();
+}
+
+#endif

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] 5/8 Move descriptor table management into the sub-arch layer
  2005-08-07  1:10 ` Chris Wright
@ 2005-08-07 11:03   ` Zachary Amsden
  0 siblings, 0 replies; 4+ messages in thread
From: Zachary Amsden @ 2005-08-07 11:03 UTC (permalink / raw)
  To: Chris Wright; +Cc: linux-kernel, pratap, chrisl

Chris Wright wrote:

>* Zachary Amsden (zach@vmware.com) wrote:
>  
>
>>This change encapsulates descriptor and task register management.
>>    
>>
>
>These will need some merging together, will take a stab tomorrow.
>
>
>--- linux-2.6.12-xen0-arch.orig/include/asm-i386/desc.h
>+++ linux-2.6.12-xen0-arch/include/asm-i386/desc.h
>@@ -14,9 +14,6 @@
>
> static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr)
> {
>-	_set_tssldt_desc(&per_cpu(cpu_gdt_table, cpu)[entry], (int)addr,
>+	_set_tssldt_desc(&get_cpu_gdt_table(cpu)[entry], (int)addr,
>  
>

What is Xen doing for the GDT on SMP?  Does Xen have 16 pages of GDT per 
CPU?

>+++ linux-2.6.12-xen0-arch/include/asm-i386/mach-default/mach_desc.h
>@@ -0,0 +1,57 @@
>+#ifndef __ASM_MACH_DESC_H
>+#define __ASM_MACH_DESC_H
>+
>+extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
>+DECLARE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]);
>+#define get_cpu_gdt_table(_cpu) per_cpu(cpu_gdt_table, cpu)
>+
>+#define _set_tssldt_desc(n,addr,limit,type) \
>+__asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
>+	"movw %%ax,2(%2)\n\t" \
>+	"rorl $16,%%eax\n\t" \
>+	"movb %%al,4(%2)\n\t" \
>+	"movb %4,5(%2)\n\t" \
>+	"movb $0,6(%2)\n\t" \
>+	"movb %%ah,7(%2)\n\t" \
>+	"rorl $16,%%eax" \
>+	: "=m"(*(n)) : "a" (addr), "r"(n), "ir"(limit), "i"(type))
>  
>

This actually doesn't need to move into sub-arch.  You can redefine the 
call sites (set_ldt_desc / set_tss_desc) to operate on stack (implicit 
register) values instead and then notify the hypervisor about GDT 
updates.  Course, which way is cleaner looks still TBD.

>+static inline void clear_LDT(void)
>+{
>+	int cpu = get_cpu();
>+
>+	set_ldt_desc(cpu, &default_ldt[0], 5);
>+	load_LDT_desc();
>+	put_cpu();
>+}
>+
>+/*
>+ * load one particular LDT into the current CPU
>+ */
>+static inline void load_LDT_nolock(mm_context_t *pc, int cpu)
>+{
>+	void *segments = pc->ldt;
>+	int count = pc->size;
>+
>+	if (likely(!count)) {
>+		segments = &default_ldt[0];
>+		count = 5;
>+	}
>+		
>+	set_ldt_desc(cpu, segments, count);
>+	load_LDT_desc();
>+}
>+
>+#endif
>  
>

These two don't actually need to move into sub-arch ; they can call 
functions that have already moved.

So far looks like we are pretty much on the same page, with mostly 
cosmetic differences.

Zach

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] 5/8 Move descriptor table management into the sub-arch layer
  2005-08-06  7:18 [PATCH] 5/8 Move descriptor table management into the sub-arch layer Zachary Amsden
  2005-08-07  1:10 ` Chris Wright
@ 2005-08-08 14:24 ` Pavel Machek
  1 sibling, 0 replies; 4+ messages in thread
From: Pavel Machek @ 2005-08-08 14:24 UTC (permalink / raw)
  To: Zachary Amsden
  Cc: akpm, chrisw, linux-kernel, davej, hpa, Riley, pratap, chrisl

Hi!


> i386 Transparent paravirtualization subarch patch #5
> 
> This change encapsulates descriptor and task register management.
> 
> Diffs against: 2.6.13-rc4-mm1
> 
> Signed-off-by: Zachary Amsden <zach@vmware.com>
> Index: linux-2.6.13/include/asm-i386/desc.h
> ===================================================================
> --- linux-2.6.13.orig/include/asm-i386/desc.h	2005-08-03 16:24:09.000000000 -0700
> +++ linux-2.6.13/include/asm-i386/desc.h	2005-08-03 16:31:40.000000000 -0700
> @@ -27,19 +27,6 @@
>  
>  extern struct Xgt_desc_struct idt_descr, cpu_gdt_descr[NR_CPUS];
>  
> -#define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8))
> -#define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8))

Is not asm volatile (no underscores) enough?

> -#define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr))
> -#define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr))

Eh, I think volatile should be either "volatile" or "__volatile__",
but you not "__volatile".

-- 
if you have sharp zaurus hardware you don't need... you know my address

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2005-08-08 14:25 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2005-08-06  7:18 [PATCH] 5/8 Move descriptor table management into the sub-arch layer Zachary Amsden
2005-08-07  1:10 ` Chris Wright
2005-08-07 11:03   ` Zachary Amsden
2005-08-08 14:24 ` Pavel Machek

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).