提交 a939098a 编写于 作者: G Glauber Costa 提交者: Ingo Molnar

x86: move x86_64 gdt closer to i386

i386 and x86_64 used two different schemes for maintaining the gdt.
With this patch, x86_64 initial gdt table is defined in a .c file,
same way as i386 is now. Also, we call it "gdt_page", and the descriptor,
"early_gdt_descr". This way we achieve common naming, which can allow for
more code integration.
Signed-off-by: NGlauber Costa <gcosta@redhat.com>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 736f12bf
...@@ -203,7 +203,7 @@ ENTRY(secondary_startup_64) ...@@ -203,7 +203,7 @@ ENTRY(secondary_startup_64)
* addresses where we're currently running on. We have to do that here * addresses where we're currently running on. We have to do that here
* because in 32bit we couldn't load a 64bit linear address. * because in 32bit we couldn't load a 64bit linear address.
*/ */
lgdt cpu_gdt_descr(%rip) lgdt early_gdt_descr(%rip)
/* set up data segments. actually 0 would do too */ /* set up data segments. actually 0 would do too */
movl $__KERNEL_DS,%eax movl $__KERNEL_DS,%eax
...@@ -391,54 +391,16 @@ NEXT_PAGE(level2_spare_pgt) ...@@ -391,54 +391,16 @@ NEXT_PAGE(level2_spare_pgt)
.data .data
.align 16 .align 16
.globl cpu_gdt_descr .globl early_gdt_descr
cpu_gdt_descr: early_gdt_descr:
.word gdt_end-cpu_gdt_table-1 .word GDT_ENTRIES*8-1
gdt: .quad per_cpu__gdt_page
.quad cpu_gdt_table
#ifdef CONFIG_SMP
.rept NR_CPUS-1
.word 0
.quad 0
.endr
#endif
ENTRY(phys_base) ENTRY(phys_base)
/* This must match the first entry in level2_kernel_pgt */ /* This must match the first entry in level2_kernel_pgt */
.quad 0x0000000000000000 .quad 0x0000000000000000
/* We need valid kernel segments for data and code in long mode too
* IRET will check the segment types kkeil 2000/10/28
* Also sysret mandates a special GDT layout
*/
.section .data.page_aligned, "aw"
.align PAGE_SIZE
/* The TLS descriptors are currently at a different place compared to i386.
Hopefully nobody expects them at a fixed place (Wine?) */
ENTRY(cpu_gdt_table)
.quad 0x0000000000000000 /* NULL descriptor */
.quad 0x00cf9b000000ffff /* __KERNEL32_CS */
.quad 0x00af9b000000ffff /* __KERNEL_CS */
.quad 0x00cf93000000ffff /* __KERNEL_DS */
.quad 0x00cffb000000ffff /* __USER32_CS */
.quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
.quad 0x00affb000000ffff /* __USER_CS */
.quad 0x0 /* unused */
.quad 0,0 /* TSS */
.quad 0,0 /* LDT */
.quad 0,0,0 /* three TLS descriptors */
.quad 0x0000f40000000000 /* node/CPU stored in limit */
gdt_end:
/* asm/segment.h:GDT_ENTRIES must match this */
/* This should be a multiple of the cache line size */
/* GDTs of other CPUs are now dynamically allocated */
/* zero the remaining page */
.fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
.section .bss, "aw", @nobits .section .bss, "aw", @nobits
.align L1_CACHE_BYTES .align L1_CACHE_BYTES
ENTRY(idt_table) ENTRY(idt_table)
......
...@@ -202,11 +202,8 @@ void __cpuinit cpu_init (void) ...@@ -202,11 +202,8 @@ void __cpuinit cpu_init (void)
* Initialize the per-CPU GDT with the boot GDT, * Initialize the per-CPU GDT with the boot GDT,
* and set up the GDT descriptor: * and set up the GDT descriptor:
*/ */
if (cpu)
memcpy(get_cpu_gdt_table(cpu), cpu_gdt_table, GDT_SIZE);
cpu_gdt_descr[cpu].size = GDT_SIZE; switch_to_new_gdt();
load_gdt((const struct desc_ptr *)&cpu_gdt_descr[cpu]);
load_idt((const struct desc_ptr *)&idt_descr); load_idt((const struct desc_ptr *)&idt_descr);
memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
......
...@@ -81,8 +81,6 @@ ...@@ -81,8 +81,6 @@
#define ARCH_SETUP #define ARCH_SETUP
#endif #endif
#include "cpu/cpu.h"
/* /*
* Machine setup.. * Machine setup..
*/ */
...@@ -228,6 +226,23 @@ static inline void copy_edd(void) ...@@ -228,6 +226,23 @@ static inline void copy_edd(void)
} }
#endif #endif
/* Overridden in paravirt.c if CONFIG_PARAVIRT */
void __attribute__((weak)) __init memory_setup(void)
{
machine_specific_memory_setup();
}
/* Current gdt points %fs at the "master" per-cpu area: after this,
* it's on the real one. */
void switch_to_new_gdt(void)
{
struct desc_ptr gdt_descr;
gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
gdt_descr.size = GDT_SIZE - 1;
load_gdt(&gdt_descr);
}
/* /*
* setup_arch - architecture-specific boot-time initializations * setup_arch - architecture-specific boot-time initializations
* *
......
...@@ -849,14 +849,8 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) ...@@ -849,14 +849,8 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
}; };
INIT_WORK(&c_idle.work, do_fork_idle); INIT_WORK(&c_idle.work, do_fork_idle);
#ifdef CONFIG_X86_64
/* allocate memory for gdts of secondary cpus. Hotplug is considered */
if (!cpu_gdt_descr[cpu].address &&
!(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) {
printk(KERN_ERR "Failed to allocate GDT for CPU %d\n", cpu);
return -1;
}
#ifdef CONFIG_X86_64
/* Allocate node local memory for AP pdas */ /* Allocate node local memory for AP pdas */
if (cpu > 0) { if (cpu > 0) {
boot_error = get_local_pda(cpu); boot_error = get_local_pda(cpu);
...@@ -898,7 +892,6 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) ...@@ -898,7 +892,6 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
per_cpu(current_task, cpu) = c_idle.idle; per_cpu(current_task, cpu) = c_idle.idle;
init_gdt(cpu); init_gdt(cpu);
early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
c_idle.idle->thread.ip = (unsigned long) start_secondary; c_idle.idle->thread.ip = (unsigned long) start_secondary;
/* Stack for startup_32 can be just as for start_secondary onwards */ /* Stack for startup_32 can be just as for start_secondary onwards */
irq_ctx_init(cpu); irq_ctx_init(cpu);
...@@ -908,6 +901,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) ...@@ -908,6 +901,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
initial_code = (unsigned long)start_secondary; initial_code = (unsigned long)start_secondary;
clear_tsk_thread_flag(c_idle.idle, TIF_FORK); clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
#endif #endif
early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
stack_start.sp = (void *) c_idle.idle->thread.sp; stack_start.sp = (void *) c_idle.idle->thread.sp;
/* start_ip had better be page-aligned! */ /* start_ip had better be page-aligned! */
...@@ -1252,8 +1246,8 @@ void __init native_smp_prepare_boot_cpu(void) ...@@ -1252,8 +1246,8 @@ void __init native_smp_prepare_boot_cpu(void)
int me = smp_processor_id(); int me = smp_processor_id();
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
init_gdt(me); init_gdt(me);
switch_to_new_gdt();
#endif #endif
switch_to_new_gdt();
/* already set me in cpu_online_map in boot_cpu_init() */ /* already set me in cpu_online_map in boot_cpu_init() */
cpu_set(me, cpu_callout_map); cpu_set(me, cpu_callout_map);
per_cpu(cpu_state, me) = CPU_ONLINE; per_cpu(cpu_state, me) = CPU_ONLINE;
......
...@@ -53,8 +53,3 @@ EXPORT_SYMBOL(init_level4_pgt); ...@@ -53,8 +53,3 @@ EXPORT_SYMBOL(init_level4_pgt);
EXPORT_SYMBOL(load_gs_index); EXPORT_SYMBOL(load_gs_index);
EXPORT_SYMBOL(_proxy_pda); EXPORT_SYMBOL(_proxy_pda);
#ifdef CONFIG_PARAVIRT
/* Virtualized guests may want to use it */
EXPORT_SYMBOL_GPL(cpu_gdt_descr);
#endif
...@@ -29,11 +29,17 @@ static inline void fill_ldt(struct desc_struct *desc, ...@@ -29,11 +29,17 @@ static inline void fill_ldt(struct desc_struct *desc,
extern struct desc_ptr idt_descr; extern struct desc_ptr idt_descr;
extern gate_desc idt_table[]; extern gate_desc idt_table[];
struct gdt_page {
struct desc_struct gdt[GDT_ENTRIES];
} __attribute__((aligned(PAGE_SIZE)));
DECLARE_PER_CPU(struct gdt_page, gdt_page);
static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
{
return per_cpu(gdt_page, cpu).gdt;
}
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
extern struct desc_ptr cpu_gdt_descr[];
/* the cpu gdt accessor */
#define get_cpu_gdt_table(x) ((struct desc_struct *)cpu_gdt_descr[x].address)
static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func, static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func,
unsigned dpl, unsigned ist, unsigned seg) unsigned dpl, unsigned ist, unsigned seg)
...@@ -51,16 +57,6 @@ static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func, ...@@ -51,16 +57,6 @@ static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func,
} }
#else #else
struct gdt_page {
struct desc_struct gdt[GDT_ENTRIES];
} __attribute__((aligned(PAGE_SIZE)));
DECLARE_PER_CPU(struct gdt_page, gdt_page);
static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
{
return per_cpu(gdt_page, cpu).gdt;
}
static inline void pack_gate(gate_desc *gate, unsigned char type, static inline void pack_gate(gate_desc *gate, unsigned char type,
unsigned long base, unsigned dpl, unsigned flags, unsigned long base, unsigned dpl, unsigned flags,
unsigned short seg) unsigned short seg)
......
...@@ -61,18 +61,14 @@ ...@@ -61,18 +61,14 @@
#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1) #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
#define GDT_ENTRY_DEFAULT_USER_CS 14 #define GDT_ENTRY_DEFAULT_USER_CS 14
#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS * 8 + 3)
#define GDT_ENTRY_DEFAULT_USER_DS 15 #define GDT_ENTRY_DEFAULT_USER_DS 15
#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS * 8 + 3)
#define GDT_ENTRY_KERNEL_BASE 12 #define GDT_ENTRY_KERNEL_BASE 12
#define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0) #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
#define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1) #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
#define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4) #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
#define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE + 5) #define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE + 5)
...@@ -139,10 +135,11 @@ ...@@ -139,10 +135,11 @@
#else #else
#include <asm/cache.h> #include <asm/cache.h>
#define __KERNEL_CS 0x10 #define GDT_ENTRY_KERNEL32_CS 1
#define __KERNEL_DS 0x18 #define GDT_ENTRY_KERNEL_CS 2
#define GDT_ENTRY_KERNEL_DS 3
#define __KERNEL32_CS 0x08 #define __KERNEL32_CS (GDT_ENTRY_KERNEL32_CS * 8)
/* /*
* we cannot use the same code segment descriptor for user and kernel * we cannot use the same code segment descriptor for user and kernel
...@@ -150,10 +147,10 @@ ...@@ -150,10 +147,10 @@
* The segment offset needs to contain a RPL. Grr. -AK * The segment offset needs to contain a RPL. Grr. -AK
* GDT layout to get 64bit syscall right (sysret hardcodes gdt offsets) * GDT layout to get 64bit syscall right (sysret hardcodes gdt offsets)
*/ */
#define GDT_ENTRY_DEFAULT_USER32_CS 4
#define __USER32_CS 0x23 /* 4*8+3 */ #define GDT_ENTRY_DEFAULT_USER_DS 5
#define __USER_DS 0x2b /* 5*8+3 */ #define GDT_ENTRY_DEFAULT_USER_CS 6
#define __USER_CS 0x33 /* 6*8+3 */ #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
#define __USER32_DS __USER_DS #define __USER32_DS __USER_DS
#define GDT_ENTRY_TSS 8 /* needs two entries */ #define GDT_ENTRY_TSS 8 /* needs two entries */
...@@ -175,6 +172,10 @@ ...@@ -175,6 +172,10 @@
#endif #endif
#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
#ifndef CONFIG_PARAVIRT #ifndef CONFIG_PARAVIRT
#define get_kernel_rpl() 0 #define get_kernel_rpl() 0
#endif #endif
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册