x86/asm/entry: Remove INIT_TSS and fold the definitions into 'cpu_tss'
The INIT_TSS is unnecessary. Just define the initial TSS where 'cpu_tss' is defined. While we're at it, merge the 32-bit and 64-bit definitions. The only syntactic change is that 32-bit kernels were computing sp0 as long, but now they compute it as unsigned long. Verified by objdump: the contents and relocations of .data..percpu..shared_aligned are unchanged on 32-bit and 64-bit kernels. Signed-off-by: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/8fc39fa3f6c5d635e93afbdd1a0fe0678a6d7913.1425611534.git.luto@amacapital.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
24933b82c0
commit
d0a0de21f8
|
@ -818,22 +818,6 @@ static inline void spin_lock_prefetch(const void *x)
|
|||
.io_bitmap_ptr = NULL, \
|
||||
}
|
||||
|
||||
/*
|
||||
* Note that the .io_bitmap member must be extra-big. This is because
|
||||
* the CPU will access an additional byte beyond the end of the IO
|
||||
* permission bitmap. The extra byte must be all 1 bits, and must
|
||||
* be within the limit.
|
||||
*/
|
||||
#define INIT_TSS { \
|
||||
.x86_tss = { \
|
||||
.sp0 = sizeof(init_stack) + (long)&init_stack, \
|
||||
.ss0 = __KERNEL_DS, \
|
||||
.ss1 = __KERNEL_CS, \
|
||||
.io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
|
||||
}, \
|
||||
.io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \
|
||||
}
|
||||
|
||||
extern unsigned long thread_saved_pc(struct task_struct *tsk);
|
||||
|
||||
#define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
|
||||
|
@ -892,10 +876,6 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
|
|||
.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
|
||||
}
|
||||
|
||||
#define INIT_TSS { \
|
||||
.x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
|
||||
}
|
||||
|
||||
/*
|
||||
* Return saved PC of a blocked thread.
|
||||
* What is this good for? it will be always the scheduler or ret_from_fork.
|
||||
|
|
|
@ -37,7 +37,25 @@
|
|||
* section. Since TSS's are completely CPU-local, we want them
|
||||
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
|
||||
*/
|
||||
__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = INIT_TSS;
|
||||
__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
|
||||
.x86_tss = {
|
||||
.sp0 = (unsigned long)&init_stack + sizeof(init_stack),
|
||||
#ifdef CONFIG_X86_32
|
||||
.ss0 = __KERNEL_DS,
|
||||
.ss1 = __KERNEL_CS,
|
||||
.io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
|
||||
#endif
|
||||
},
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* Note that the .io_bitmap member must be extra-big. This is because
|
||||
* the CPU will access an additional byte beyond the end of the IO
|
||||
* permission bitmap. The extra byte must be all 1 bits, and must
|
||||
* be within the limit.
|
||||
*/
|
||||
.io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 },
|
||||
#endif
|
||||
};
|
||||
EXPORT_PER_CPU_SYMBOL_GPL(cpu_tss);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
|
Loading…
Reference in New Issue