2012-03-29 01:11:12 +08:00
|
|
|
#ifndef _ASM_X86_SWITCH_TO_H
|
|
|
|
#define _ASM_X86_SWITCH_TO_H
|
|
|
|
|
|
|
|
struct task_struct; /* one of the stranger aspects of C forward declarations */
|
2016-08-14 00:38:19 +08:00
|
|
|
|
|
|
|
struct task_struct *__switch_to_asm(struct task_struct *prev,
|
|
|
|
struct task_struct *next);
|
|
|
|
|
2013-08-06 06:02:39 +08:00
|
|
|
__visible struct task_struct *__switch_to(struct task_struct *prev,
|
2016-08-14 00:38:19 +08:00
|
|
|
struct task_struct *next);
|
2012-03-29 01:11:12 +08:00
|
|
|
struct tss_struct;
|
|
|
|
void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
|
|
|
|
struct tss_struct *tss);
|
|
|
|
|
2016-08-11 17:35:23 +08:00
|
|
|
/* This runs runs on the previous thread's stack. */
|
|
|
|
static inline void prepare_switch_to(struct task_struct *prev,
|
|
|
|
struct task_struct *next)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_VMAP_STACK
|
|
|
|
/*
|
|
|
|
* If we switch to a stack that has a top-level paging entry
|
|
|
|
* that is not present in the current mm, the resulting #PF will
|
|
|
|
* will be promoted to a double-fault and we'll panic. Probe
|
|
|
|
* the new stack now so that vmalloc_fault can fix up the page
|
|
|
|
* tables if needed. This can only happen if we use a stack
|
|
|
|
* in vmap space.
|
|
|
|
*
|
|
|
|
* We assume that the stack is aligned so that it never spans
|
|
|
|
* more than one top-level paging entry.
|
|
|
|
*
|
|
|
|
* To minimize cache pollution, just follow the stack pointer.
|
|
|
|
*/
|
|
|
|
READ_ONCE(*(unsigned char *)next->thread.sp);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2016-08-14 00:38:20 +08:00
|
|
|
asmlinkage void ret_from_fork(void);
|
|
|
|
|
2017-01-10 02:00:24 +08:00
|
|
|
/*
|
|
|
|
* This is the structure pointed to by thread.sp for an inactive task. The
|
|
|
|
* order of the fields must match the code in __switch_to_asm().
|
|
|
|
*/
|
2016-08-14 00:38:18 +08:00
|
|
|
struct inactive_task_frame {
|
2016-08-14 00:38:19 +08:00
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
unsigned long r15;
|
|
|
|
unsigned long r14;
|
|
|
|
unsigned long r13;
|
|
|
|
unsigned long r12;
|
|
|
|
#else
|
|
|
|
unsigned long si;
|
|
|
|
unsigned long di;
|
|
|
|
#endif
|
|
|
|
unsigned long bx;
|
2017-01-10 02:00:24 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* These two fields must be together. They form a stack frame header,
|
|
|
|
* needed by get_frame_pointer().
|
|
|
|
*/
|
2016-08-14 00:38:18 +08:00
|
|
|
unsigned long bp;
|
2016-08-14 00:38:19 +08:00
|
|
|
unsigned long ret_addr;
|
2016-08-14 00:38:18 +08:00
|
|
|
};
|
|
|
|
|
2016-08-14 00:38:19 +08:00
|
|
|
struct fork_frame {
|
|
|
|
struct inactive_task_frame frame;
|
|
|
|
struct pt_regs regs;
|
|
|
|
};
|
2012-03-29 01:11:12 +08:00
|
|
|
|
|
|
|
#define switch_to(prev, next, last) \
|
|
|
|
do { \
|
2016-08-11 17:35:23 +08:00
|
|
|
prepare_switch_to(prev, next); \
|
|
|
|
\
|
2016-08-14 00:38:19 +08:00
|
|
|
((last) = __switch_to_asm((prev), (next))); \
|
2012-03-29 01:11:12 +08:00
|
|
|
} while (0)
|
|
|
|
|
2017-11-02 15:59:09 +08:00
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
static inline void refresh_sysenter_cs(struct thread_struct *thread)
|
|
|
|
{
|
|
|
|
/* Only happens when SEP is enabled, no need to test "SEP"arately: */
|
|
|
|
if (unlikely(this_cpu_read(cpu_tss.x86_tss.ss1) == thread->sysenter_cs))
|
|
|
|
return;
|
|
|
|
|
|
|
|
this_cpu_write(cpu_tss.x86_tss.ss1, thread->sysenter_cs);
|
|
|
|
wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-03-29 01:11:12 +08:00
|
|
|
#endif /* _ASM_X86_SWITCH_TO_H */
|