x86: Use CONFIG_PREEMPTION
CONFIG_PREEMPTION is selected by CONFIG_PREEMPT and by CONFIG_PREEMPT_RT. Both PREEMPT and PREEMPT_RT require the same functionality which today depends on CONFIG_PREEMPT. Switch the entry code, preempt and kprobes conditionals over to CONFIG_PREEMPTION. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Paul E. McKenney <paulmck@linux.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Link: http://lkml.kernel.org/r/20190726212124.608488448@linutronix.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
9261660636
commit
48593975ae
|
@ -63,7 +63,7 @@
|
|||
* enough to patch inline, increasing performance.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
# define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
|
||||
#else
|
||||
# define preempt_stop(clobbers)
|
||||
|
@ -1084,7 +1084,7 @@ restore_all:
|
|||
INTERRUPT_RETURN
|
||||
|
||||
restore_all_kernel:
|
||||
#ifdef CONFIG_PREEMPT
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
DISABLE_INTERRUPTS(CLBR_ANY)
|
||||
cmpl $0, PER_CPU_VAR(__preempt_count)
|
||||
jnz .Lno_preempt
|
||||
|
@ -1364,7 +1364,7 @@ ENTRY(xen_hypervisor_callback)
|
|||
ENTRY(xen_do_upcall)
|
||||
1: mov %esp, %eax
|
||||
call xen_evtchn_do_upcall
|
||||
#ifndef CONFIG_PREEMPT
|
||||
#ifndef CONFIG_PREEMPTION
|
||||
call xen_maybe_preempt_hcall
|
||||
#endif
|
||||
jmp ret_from_intr
|
||||
|
|
|
@ -662,7 +662,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode)
|
|||
|
||||
/* Returning to kernel space */
|
||||
retint_kernel:
|
||||
#ifdef CONFIG_PREEMPT
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
/* Interrupts are off */
|
||||
/* Check if we need preemption */
|
||||
btl $9, EFLAGS(%rsp) /* were interrupts off? */
|
||||
|
@ -1113,7 +1113,7 @@ ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */
|
|||
call xen_evtchn_do_upcall
|
||||
LEAVE_IRQ_STACK
|
||||
|
||||
#ifndef CONFIG_PREEMPT
|
||||
#ifndef CONFIG_PREEMPTION
|
||||
call xen_maybe_preempt_hcall
|
||||
#endif
|
||||
jmp error_exit
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
THUNK trace_hardirqs_off_thunk,trace_hardirqs_off_caller,1
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
THUNK ___preempt_schedule, preempt_schedule
|
||||
THUNK ___preempt_schedule_notrace, preempt_schedule_notrace
|
||||
EXPORT_SYMBOL(___preempt_schedule)
|
||||
|
|
|
@ -46,7 +46,7 @@
|
|||
THUNK lockdep_sys_exit_thunk,lockdep_sys_exit
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
THUNK ___preempt_schedule, preempt_schedule
|
||||
THUNK ___preempt_schedule_notrace, preempt_schedule_notrace
|
||||
EXPORT_SYMBOL(___preempt_schedule)
|
||||
|
@ -55,7 +55,7 @@
|
|||
|
||||
#if defined(CONFIG_TRACE_IRQFLAGS) \
|
||||
|| defined(CONFIG_DEBUG_LOCK_ALLOC) \
|
||||
|| defined(CONFIG_PREEMPT)
|
||||
|| defined(CONFIG_PREEMPTION)
|
||||
.L_restore:
|
||||
popq %r11
|
||||
popq %r10
|
||||
|
|
|
@ -102,7 +102,7 @@ static __always_inline bool should_resched(int preempt_offset)
|
|||
return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
extern asmlinkage void ___preempt_schedule(void);
|
||||
# define __preempt_schedule() \
|
||||
asm volatile ("call ___preempt_schedule" : ASM_CALL_CONSTRAINT)
|
||||
|
|
|
@ -580,7 +580,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
|
|||
if (setup_detour_execution(p, regs, reenter))
|
||||
return;
|
||||
|
||||
#if !defined(CONFIG_PREEMPT)
|
||||
#if !defined(CONFIG_PREEMPTION)
|
||||
if (p->ainsn.boostable && !p->post_handler) {
|
||||
/* Boost up -- we can execute copied instructions directly */
|
||||
if (!reenter)
|
||||
|
|
Loading…
Reference in New Issue