x86/cpu: Remove segment load from switch_to_new_gdt()
On 32bit FS and on 64bit GS segments are already set up correctly, but load_percpu_segment() still sets [FG]S after switching from the early GDT to the direct GDT. For 32bit the segment load has no side effects, but on 64bit it causes GSBASE to become 0, which means that any per CPU access before GSBASE is set to the new value is going to fault. That's the reason why the whole file containing this code has stackprotector removed. But that's a pointless exercise for both 32 and 64 bit as the relevant segment selector is already correct. Loading the new GDT does not change that. Remove the segment loads and add comments. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20220915111143.097052006@infradead.org
This commit is contained in:
parent
9abf2313ad
commit
b5636d45aa
arch/x86
|
@ -670,7 +670,6 @@ extern struct desc_ptr early_gdt_descr;
|
|||
extern void switch_to_new_gdt(int);
|
||||
extern void load_direct_gdt(int);
|
||||
extern void load_fixmap_gdt(int);
|
||||
extern void load_percpu_segment(int);
|
||||
extern void cpu_init(void);
|
||||
extern void cpu_init_secondary(void);
|
||||
extern void cpu_init_exception_handling(void);
|
||||
|
|
|
@ -701,16 +701,6 @@ static const char *table_lookup_model(struct cpuinfo_x86 *c)
|
|||
__u32 cpu_caps_cleared[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long));
|
||||
__u32 cpu_caps_set[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long));
|
||||
|
||||
void load_percpu_segment(int cpu)
|
||||
{
|
||||
#ifdef CONFIG_X86_32
|
||||
loadsegment(fs, __KERNEL_PERCPU);
|
||||
#else
|
||||
__loadsegment_simple(gs, 0);
|
||||
wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu));
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/* The 32-bit entry code needs to find cpu_entry_area. */
|
||||
DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
|
||||
|
@ -738,16 +728,41 @@ void load_fixmap_gdt(int cpu)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(load_fixmap_gdt);
|
||||
|
||||
/*
|
||||
* Current gdt points %fs at the "master" per-cpu area: after this,
|
||||
* it's on the real one.
|
||||
/**
|
||||
* switch_to_new_gdt - Switch form early GDT to the direct one
|
||||
* @cpu: The CPU number for which this is invoked
|
||||
*
|
||||
* Invoked during early boot to switch from early GDT and early per CPU
|
||||
* (%fs on 32bit, GS_BASE on 64bit) to the direct GDT and the runtime per
|
||||
* CPU area.
|
||||
*/
|
||||
void switch_to_new_gdt(int cpu)
|
||||
{
|
||||
/* Load the original GDT */
|
||||
load_direct_gdt(cpu);
|
||||
/* Reload the per-cpu base */
|
||||
load_percpu_segment(cpu);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/*
|
||||
* No need to load %gs. It is already correct.
|
||||
*
|
||||
* Writing %gs on 64bit would zero GSBASE which would make any per
|
||||
* CPU operation up to the point of the wrmsrl() fault.
|
||||
*
|
||||
* Set GSBASE to the new offset. Until the wrmsrl() happens the
|
||||
* early mapping is still valid. That means the GSBASE update will
|
||||
* lose any prior per CPU data which was not copied over in
|
||||
* setup_per_cpu_areas().
|
||||
*/
|
||||
wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu));
|
||||
#else
|
||||
/*
|
||||
* %fs is already set to __KERNEL_PERCPU, but after switching GDT
|
||||
* it is required to load FS again so that the 'hidden' part is
|
||||
* updated from the new GDT. Up to this point the early per CPU
|
||||
* translation is active. Any content of the early per CPU data
|
||||
* which was not copied over in setup_per_cpu_areas() is lost.
|
||||
*/
|
||||
loadsegment(fs, __KERNEL_PERCPU);
|
||||
#endif
|
||||
}
|
||||
|
||||
static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
|
||||
|
|
Loading…
Reference in New Issue