sched/core: Stop setting PREEMPT_ACTIVE
Now that nothing tests for PREEMPT_ACTIVE anymore, stop setting it. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Steven Rostedt <rostedt@goodmis.org> Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
c73464b1c8
commit
3d8f74dd4c
|
@ -146,18 +146,6 @@ extern void preempt_count_sub(int val);
|
|||
#define preempt_count_inc() preempt_count_add(1)
|
||||
#define preempt_count_dec() preempt_count_sub(1)
|
||||
|
||||
#define preempt_active_enter() \
|
||||
do { \
|
||||
preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
|
||||
barrier(); \
|
||||
} while (0)
|
||||
|
||||
#define preempt_active_exit() \
|
||||
do { \
|
||||
barrier(); \
|
||||
preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
|
||||
} while (0)
|
||||
|
||||
#ifdef CONFIG_PREEMPT_COUNT
|
||||
|
||||
#define preempt_disable() \
|
||||
|
|
|
@ -3201,9 +3201,9 @@ void __sched schedule_preempt_disabled(void)
|
|||
static void __sched notrace preempt_schedule_common(void)
|
||||
{
|
||||
do {
|
||||
preempt_active_enter();
|
||||
preempt_disable();
|
||||
__schedule(true);
|
||||
preempt_active_exit();
|
||||
sched_preempt_enable_no_resched();
|
||||
|
||||
/*
|
||||
* Check again in case we missed a preemption opportunity
|
||||
|
@ -3254,13 +3254,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
|
|||
return;
|
||||
|
||||
do {
|
||||
/*
|
||||
* Use raw __prempt_count() ops that don't call function.
|
||||
* We can't call functions before disabling preemption which
|
||||
* disarm preemption tracing recursions.
|
||||
*/
|
||||
__preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET);
|
||||
barrier();
|
||||
preempt_disable_notrace();
|
||||
/*
|
||||
* Needs preempt disabled in case user_exit() is traced
|
||||
* and the tracer calls preempt_enable_notrace() causing
|
||||
|
@ -3270,8 +3264,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
|
|||
__schedule(true);
|
||||
exception_exit(prev_ctx);
|
||||
|
||||
barrier();
|
||||
__preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET);
|
||||
preempt_enable_no_resched_notrace();
|
||||
} while (need_resched());
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
|
||||
|
@ -3294,11 +3287,11 @@ asmlinkage __visible void __sched preempt_schedule_irq(void)
|
|||
prev_state = exception_enter();
|
||||
|
||||
do {
|
||||
preempt_active_enter();
|
||||
preempt_disable();
|
||||
local_irq_enable();
|
||||
__schedule(true);
|
||||
local_irq_disable();
|
||||
preempt_active_exit();
|
||||
sched_preempt_enable_no_resched();
|
||||
} while (need_resched());
|
||||
|
||||
exception_exit(prev_state);
|
||||
|
|
Loading…
Reference in New Issue