[PATCH] x86: fix broken SMP boot sequence
Recent GDT changes broke the SMP boot sequence if the booting CPU is numbered anything other than zero. There's also a subtle source of error in that the boot time CPU now uses cpu_gdt_table (which is actually the GDT for booting CPUs in head.S). This patch fixes both problems by making GDT descriptors themselves allocated from a per_cpu area and switching to them in cpu_init(), which now means that cpu_gdt_table is exclusively used for booting CPUs again. Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com> Cc: Zachary Amsden <zach@vmware.com> Cc: Matt Tolentino <metolent@snoqualmie.dp.intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
1e275d406b
commit
2b932f6cf0
|
@ -4,6 +4,7 @@
|
|||
#include <linux/smp.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <asm/semaphore.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/i387.h>
|
||||
|
@ -18,6 +19,9 @@
|
|||
|
||||
#include "cpu.h"
|
||||
|
||||
DEFINE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr);
|
||||
EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr);
|
||||
|
||||
DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
|
||||
EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack);
|
||||
|
||||
|
@ -571,8 +575,9 @@ void __devinit cpu_init(void)
|
|||
int cpu = smp_processor_id();
|
||||
struct tss_struct * t = &per_cpu(init_tss, cpu);
|
||||
struct thread_struct *thread = ¤t->thread;
|
||||
struct desc_struct *gdt = get_cpu_gdt_table(cpu);
|
||||
struct desc_struct *gdt;
|
||||
__u32 stk16_off = (__u32)&per_cpu(cpu_16bit_stack, cpu);
|
||||
struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
|
||||
|
||||
if (cpu_test_and_set(cpu, cpu_initialized)) {
|
||||
printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
|
||||
|
@ -589,6 +594,25 @@ void __devinit cpu_init(void)
|
|||
set_in_cr4(X86_CR4_TSD);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is a horrible hack to allocate the GDT. The problem
|
||||
* is that cpu_init() is called really early for the boot CPU
|
||||
* (and hence needs bootmem) but much later for the secondary
|
||||
* CPUs, when bootmem will have gone away
|
||||
*/
|
||||
if (NODE_DATA(0)->bdata->node_bootmem_map) {
|
||||
gdt = (struct desc_struct *)alloc_bootmem_pages(PAGE_SIZE);
|
||||
/* alloc_bootmem_pages panics on failure, so no check */
|
||||
memset(gdt, 0, PAGE_SIZE);
|
||||
} else {
|
||||
gdt = (struct desc_struct *)get_zeroed_page(GFP_KERNEL);
|
||||
if (unlikely(!gdt)) {
|
||||
printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu);
|
||||
for (;;)
|
||||
local_irq_enable();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize the per-CPU GDT with the boot GDT,
|
||||
* and set up the GDT descriptor:
|
||||
|
@ -601,10 +625,10 @@ void __devinit cpu_init(void)
|
|||
((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) |
|
||||
(CPU_16BIT_STACK_SIZE - 1);
|
||||
|
||||
cpu_gdt_descr[cpu].size = GDT_SIZE - 1;
|
||||
cpu_gdt_descr[cpu].address = (unsigned long)gdt;
|
||||
cpu_gdt_descr->size = GDT_SIZE - 1;
|
||||
cpu_gdt_descr->address = (unsigned long)gdt;
|
||||
|
||||
load_gdt(&cpu_gdt_descr[cpu]);
|
||||
load_gdt(cpu_gdt_descr);
|
||||
load_idt(&idt_descr);
|
||||
|
||||
/*
|
||||
|
|
|
@ -103,17 +103,19 @@ static void efi_call_phys_prelog(void)
|
|||
*/
|
||||
local_flush_tlb();
|
||||
|
||||
cpu_gdt_descr[0].address = __pa(cpu_gdt_descr[0].address);
|
||||
load_gdt((struct Xgt_desc_struct *) __pa(&cpu_gdt_descr[0]));
|
||||
per_cpu(cpu_gdt_descr, 0).address =
|
||||
__pa(per_cpu(cpu_gdt_descr, 0).address);
|
||||
load_gdt((struct Xgt_desc_struct *)__pa(&per_cpu(cpu_gdt_descr, 0)));
|
||||
}
|
||||
|
||||
static void efi_call_phys_epilog(void)
|
||||
{
|
||||
unsigned long cr4;
|
||||
|
||||
cpu_gdt_descr[0].address =
|
||||
(unsigned long) __va(cpu_gdt_descr[0].address);
|
||||
load_gdt(&cpu_gdt_descr[0]);
|
||||
per_cpu(cpu_gdt_descr, 0).address =
|
||||
(unsigned long)__va(per_cpu(cpu_gdt_descr, 0).address);
|
||||
load_gdt((struct Xgt_desc_struct *)__va(&per_cpu(cpu_gdt_descr, 0)));
|
||||
|
||||
cr4 = read_cr4();
|
||||
|
||||
if (cr4 & X86_CR4_PSE) {
|
||||
|
|
|
@ -534,5 +534,3 @@ ENTRY(cpu_gdt_table)
|
|||
.quad 0x0000000000000000 /* 0xf0 - unused */
|
||||
.quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
|
||||
|
||||
/* Be sure this is zeroed to avoid false validations in Xen */
|
||||
.fill PAGE_SIZE_asm / 8 - GDT_ENTRIES,8,0
|
||||
|
|
|
@ -3,8 +3,6 @@
|
|||
#include <asm/checksum.h>
|
||||
#include <asm/desc.h>
|
||||
|
||||
EXPORT_SYMBOL_GPL(cpu_gdt_descr);
|
||||
|
||||
EXPORT_SYMBOL(__down_failed);
|
||||
EXPORT_SYMBOL(__down_failed_interruptible);
|
||||
EXPORT_SYMBOL(__down_failed_trylock);
|
||||
|
|
|
@ -898,12 +898,6 @@ static int __devinit do_boot_cpu(int apicid, int cpu)
|
|||
unsigned long start_eip;
|
||||
unsigned short nmi_high = 0, nmi_low = 0;
|
||||
|
||||
if (!cpu_gdt_descr[cpu].address &&
|
||||
!(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) {
|
||||
printk("Failed to allocate GDT for CPU %d\n", cpu);
|
||||
return 1;
|
||||
}
|
||||
|
||||
++cpucount;
|
||||
|
||||
/*
|
||||
|
|
|
@ -24,11 +24,13 @@ struct Xgt_desc_struct {
|
|||
unsigned short pad;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
extern struct Xgt_desc_struct idt_descr, cpu_gdt_descr[NR_CPUS];
|
||||
extern struct Xgt_desc_struct idt_descr;
|
||||
DECLARE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr);
|
||||
|
||||
|
||||
static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
|
||||
{
|
||||
return ((struct desc_struct *)cpu_gdt_descr[cpu].address);
|
||||
return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address;
|
||||
}
|
||||
|
||||
#define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8))
|
||||
|
|
Loading…
Reference in New Issue