x86: Use CONFIG_PREEMPTION

CONFIG_PREEMPTION is selected by CONFIG_PREEMPT and by
CONFIG_PREEMPT_RT. Both PREEMPT and PREEMPT_RT require the same
functionality which today depends on CONFIG_PREEMPT.

Switch the entry code, preempt and kprobes conditionals over to
CONFIG_PREEMPTION.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Paul E. McKenney <paulmck@linux.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20190726212124.608488448@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Thomas Gleixner 2019-07-26 23:19:42 +02:00 committed by Ingo Molnar
parent 9261660636
commit 48593975ae
6 changed files with 10 additions and 10 deletions

View File

@ -63,7 +63,7 @@
* enough to patch inline, increasing performance. * enough to patch inline, increasing performance.
*/ */
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPTION
# define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF # define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
#else #else
# define preempt_stop(clobbers) # define preempt_stop(clobbers)
@ -1084,7 +1084,7 @@ restore_all:
INTERRUPT_RETURN INTERRUPT_RETURN
restore_all_kernel: restore_all_kernel:
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPTION
DISABLE_INTERRUPTS(CLBR_ANY) DISABLE_INTERRUPTS(CLBR_ANY)
cmpl $0, PER_CPU_VAR(__preempt_count) cmpl $0, PER_CPU_VAR(__preempt_count)
jnz .Lno_preempt jnz .Lno_preempt
@ -1364,7 +1364,7 @@ ENTRY(xen_hypervisor_callback)
ENTRY(xen_do_upcall) ENTRY(xen_do_upcall)
1: mov %esp, %eax 1: mov %esp, %eax
call xen_evtchn_do_upcall call xen_evtchn_do_upcall
#ifndef CONFIG_PREEMPT #ifndef CONFIG_PREEMPTION
call xen_maybe_preempt_hcall call xen_maybe_preempt_hcall
#endif #endif
jmp ret_from_intr jmp ret_from_intr

View File

@ -662,7 +662,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode)
/* Returning to kernel space */ /* Returning to kernel space */
retint_kernel: retint_kernel:
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPTION
/* Interrupts are off */ /* Interrupts are off */
/* Check if we need preemption */ /* Check if we need preemption */
btl $9, EFLAGS(%rsp) /* were interrupts off? */ btl $9, EFLAGS(%rsp) /* were interrupts off? */
@ -1113,7 +1113,7 @@ ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */
call xen_evtchn_do_upcall call xen_evtchn_do_upcall
LEAVE_IRQ_STACK LEAVE_IRQ_STACK
#ifndef CONFIG_PREEMPT #ifndef CONFIG_PREEMPTION
call xen_maybe_preempt_hcall call xen_maybe_preempt_hcall
#endif #endif
jmp error_exit jmp error_exit

View File

@ -34,7 +34,7 @@
THUNK trace_hardirqs_off_thunk,trace_hardirqs_off_caller,1 THUNK trace_hardirqs_off_thunk,trace_hardirqs_off_caller,1
#endif #endif
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPTION
THUNK ___preempt_schedule, preempt_schedule THUNK ___preempt_schedule, preempt_schedule
THUNK ___preempt_schedule_notrace, preempt_schedule_notrace THUNK ___preempt_schedule_notrace, preempt_schedule_notrace
EXPORT_SYMBOL(___preempt_schedule) EXPORT_SYMBOL(___preempt_schedule)

View File

@ -46,7 +46,7 @@
THUNK lockdep_sys_exit_thunk,lockdep_sys_exit THUNK lockdep_sys_exit_thunk,lockdep_sys_exit
#endif #endif
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPTION
THUNK ___preempt_schedule, preempt_schedule THUNK ___preempt_schedule, preempt_schedule
THUNK ___preempt_schedule_notrace, preempt_schedule_notrace THUNK ___preempt_schedule_notrace, preempt_schedule_notrace
EXPORT_SYMBOL(___preempt_schedule) EXPORT_SYMBOL(___preempt_schedule)
@ -55,7 +55,7 @@
#if defined(CONFIG_TRACE_IRQFLAGS) \ #if defined(CONFIG_TRACE_IRQFLAGS) \
|| defined(CONFIG_DEBUG_LOCK_ALLOC) \ || defined(CONFIG_DEBUG_LOCK_ALLOC) \
|| defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPTION)
.L_restore: .L_restore:
popq %r11 popq %r11
popq %r10 popq %r10

View File

@ -102,7 +102,7 @@ static __always_inline bool should_resched(int preempt_offset)
return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset); return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
} }
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPTION
extern asmlinkage void ___preempt_schedule(void); extern asmlinkage void ___preempt_schedule(void);
# define __preempt_schedule() \ # define __preempt_schedule() \
asm volatile ("call ___preempt_schedule" : ASM_CALL_CONSTRAINT) asm volatile ("call ___preempt_schedule" : ASM_CALL_CONSTRAINT)

View File

@ -580,7 +580,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
if (setup_detour_execution(p, regs, reenter)) if (setup_detour_execution(p, regs, reenter))
return; return;
#if !defined(CONFIG_PREEMPT) #if !defined(CONFIG_PREEMPTION)
if (p->ainsn.boostable && !p->post_handler) { if (p->ainsn.boostable && !p->post_handler) {
/* Boost up -- we can execute copied instructions directly */ /* Boost up -- we can execute copied instructions directly */
if (!reenter) if (!reenter)