x86/cpu: Prepare TSS.IST setup for guard pages
Convert the TSS.IST setup code to use the cpu entry area information directly instead of assuming a linear mapping of the IST stacks. The store to orig_ist[] is no longer required as there are no users anymore. This is the last preparatory step towards IST guard pages. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Borislav Petkov <bp@suse.de> Cc: Andy Lutomirski <luto@kernel.org> Cc: "Chang S. Bae" <chang.seok.bae@intel.com> Cc: Dominik Brodowski <linux@dominikbrodowski.net> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Sean Christopherson <sean.j.christopherson@intel.com> Cc: x86-ml <x86@kernel.org> Link: https://lkml.kernel.org/r/20190414160145.061686012@linutronix.de
This commit is contained in:
parent
afcd21dad8
commit
f6ef73224a
|
@ -507,19 +507,6 @@ void load_percpu_segment(int cpu)
|
||||||
DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
|
DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
|
||||||
/*
|
|
||||||
* Special IST stacks which the CPU switches to when it calls
|
|
||||||
* an IST-marked descriptor entry. Up to 7 stacks (hardware
|
|
||||||
* limit), all of them are 4K, except the debug stack which
|
|
||||||
* is 8K.
|
|
||||||
*/
|
|
||||||
static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
|
|
||||||
[0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
|
|
||||||
[ESTACK_DB] = DEBUG_STKSZ
|
|
||||||
};
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Load the original GDT from the per-cpu structure */
|
/* Load the original GDT from the per-cpu structure */
|
||||||
void load_direct_gdt(int cpu)
|
void load_direct_gdt(int cpu)
|
||||||
{
|
{
|
||||||
|
@ -1690,17 +1677,14 @@ static void setup_getcpu(int cpu)
|
||||||
* initialized (naturally) in the bootstrap process, such as the GDT
|
* initialized (naturally) in the bootstrap process, such as the GDT
|
||||||
* and IDT. We reload them nevertheless, this function acts as a
|
* and IDT. We reload them nevertheless, this function acts as a
|
||||||
* 'CPU state barrier', nothing should get across.
|
* 'CPU state barrier', nothing should get across.
|
||||||
* A lot of state is already set up in PDA init for 64 bit
|
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
|
|
||||||
void cpu_init(void)
|
void cpu_init(void)
|
||||||
{
|
{
|
||||||
struct orig_ist *oist;
|
int cpu = raw_smp_processor_id();
|
||||||
struct task_struct *me;
|
struct task_struct *me;
|
||||||
struct tss_struct *t;
|
struct tss_struct *t;
|
||||||
unsigned long v;
|
|
||||||
int cpu = raw_smp_processor_id();
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
wait_for_master_cpu(cpu);
|
wait_for_master_cpu(cpu);
|
||||||
|
@ -1715,7 +1699,6 @@ void cpu_init(void)
|
||||||
load_ucode_ap();
|
load_ucode_ap();
|
||||||
|
|
||||||
t = &per_cpu(cpu_tss_rw, cpu);
|
t = &per_cpu(cpu_tss_rw, cpu);
|
||||||
oist = &per_cpu(orig_ist, cpu);
|
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
if (this_cpu_read(numa_node) == 0 &&
|
if (this_cpu_read(numa_node) == 0 &&
|
||||||
|
@ -1753,16 +1736,12 @@ void cpu_init(void)
|
||||||
/*
|
/*
|
||||||
* set up and load the per-CPU TSS
|
* set up and load the per-CPU TSS
|
||||||
*/
|
*/
|
||||||
if (!oist->ist[0]) {
|
if (!t->x86_tss.ist[0]) {
|
||||||
char *estacks = (char *)&get_cpu_entry_area(cpu)->estacks;
|
t->x86_tss.ist[ESTACK_DF] = __this_cpu_ist_top_va(DF);
|
||||||
|
t->x86_tss.ist[ESTACK_NMI] = __this_cpu_ist_top_va(NMI);
|
||||||
for (v = 0; v < N_EXCEPTION_STACKS; v++) {
|
t->x86_tss.ist[ESTACK_DB] = __this_cpu_ist_top_va(DB);
|
||||||
estacks += exception_stack_sizes[v];
|
t->x86_tss.ist[ESTACK_MCE] = __this_cpu_ist_top_va(MCE);
|
||||||
oist->ist[v] = t->x86_tss.ist[v] =
|
per_cpu(debug_stack_addr, cpu) = t->x86_tss.ist[ESTACK_DB];
|
||||||
(unsigned long)estacks;
|
|
||||||
if (v == ESTACK_DB)
|
|
||||||
per_cpu(debug_stack_addr, cpu) = (unsigned long)estacks;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
|
t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
|
||||||
|
|
Loading…
Reference in New Issue