sched/preempt: Refactor sched_dynamic_update()
Currently sched_dynamic_update needs to open-code the enabled/disabled function names for each preemption model it supports, when in practice this is a boolean enabled/disabled state for each function. Make this clearer and avoid repetition by defining the enabled/disabled states at the function definition, and using helper macros to perform the static_call_update(). Where x86 currently overrides the enabled function, it is made to provide both the enabled and disabled states for consistency, with defaults provided by the core code otherwise. In subsequent patches this will allow us to support PREEMPT_DYNAMIC without static calls. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Ard Biesheuvel <ardb@kernel.org> Acked-by: Frederic Weisbecker <frederic@kernel.org> Link: https://lore.kernel.org/r/20220214165216.2231574-3-mark.rutland@arm.com
This commit is contained in:
parent
4c7485584d
commit
8a69fe0be1
|
@ -108,16 +108,18 @@ static __always_inline bool should_resched(int preempt_offset)
|
|||
extern asmlinkage void preempt_schedule(void);
|
||||
extern asmlinkage void preempt_schedule_thunk(void);
|
||||
|
||||
#define __preempt_schedule_func preempt_schedule_thunk
|
||||
#define preempt_schedule_dynamic_enabled preempt_schedule_thunk
|
||||
#define preempt_schedule_dynamic_disabled NULL
|
||||
|
||||
extern asmlinkage void preempt_schedule_notrace(void);
|
||||
extern asmlinkage void preempt_schedule_notrace_thunk(void);
|
||||
|
||||
#define __preempt_schedule_notrace_func preempt_schedule_notrace_thunk
|
||||
#define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace_thunk
|
||||
#define preempt_schedule_notrace_dynamic_disabled NULL
|
||||
|
||||
#ifdef CONFIG_PREEMPT_DYNAMIC
|
||||
|
||||
DECLARE_STATIC_CALL(preempt_schedule, __preempt_schedule_func);
|
||||
DECLARE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
|
||||
|
||||
#define __preempt_schedule() \
|
||||
do { \
|
||||
|
@ -125,7 +127,7 @@ do { \
|
|||
asm volatile ("call " STATIC_CALL_TRAMP_STR(preempt_schedule) : ASM_CALL_CONSTRAINT); \
|
||||
} while (0)
|
||||
|
||||
DECLARE_STATIC_CALL(preempt_schedule_notrace, __preempt_schedule_notrace_func);
|
||||
DECLARE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
|
||||
|
||||
#define __preempt_schedule_notrace() \
|
||||
do { \
|
||||
|
|
|
@ -456,6 +456,8 @@ irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs);
|
|||
*/
|
||||
void irqentry_exit_cond_resched(void);
|
||||
#ifdef CONFIG_PREEMPT_DYNAMIC
|
||||
#define irqentry_exit_cond_resched_dynamic_enabled irqentry_exit_cond_resched
|
||||
#define irqentry_exit_cond_resched_dynamic_disabled NULL
|
||||
DECLARE_STATIC_CALL(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
|
||||
#endif
|
||||
|
||||
|
|
|
@ -6491,7 +6491,11 @@ NOKPROBE_SYMBOL(preempt_schedule);
|
|||
EXPORT_SYMBOL(preempt_schedule);
|
||||
|
||||
#ifdef CONFIG_PREEMPT_DYNAMIC
|
||||
DEFINE_STATIC_CALL(preempt_schedule, __preempt_schedule_func);
|
||||
#ifndef preempt_schedule_dynamic_enabled
|
||||
#define preempt_schedule_dynamic_enabled preempt_schedule
|
||||
#define preempt_schedule_dynamic_disabled NULL
|
||||
#endif
|
||||
DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
|
||||
EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
|
||||
#endif
|
||||
|
||||
|
@ -6549,7 +6553,11 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
|
|||
EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
|
||||
|
||||
#ifdef CONFIG_PREEMPT_DYNAMIC
|
||||
DEFINE_STATIC_CALL(preempt_schedule_notrace, __preempt_schedule_notrace_func);
|
||||
#ifndef preempt_schedule_notrace_dynamic_enabled
|
||||
#define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace
|
||||
#define preempt_schedule_notrace_dynamic_disabled NULL
|
||||
#endif
|
||||
DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
|
||||
EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
|
||||
#endif
|
||||
|
||||
|
@ -8060,9 +8068,13 @@ EXPORT_SYMBOL(__cond_resched);
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_PREEMPT_DYNAMIC
|
||||
#define cond_resched_dynamic_enabled __cond_resched
|
||||
#define cond_resched_dynamic_disabled ((void *)&__static_call_return0)
|
||||
DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
|
||||
EXPORT_STATIC_CALL_TRAMP(cond_resched);
|
||||
|
||||
#define might_resched_dynamic_enabled __cond_resched
|
||||
#define might_resched_dynamic_disabled ((void *)&__static_call_return0)
|
||||
DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
|
||||
EXPORT_STATIC_CALL_TRAMP(might_resched);
|
||||
#endif
|
||||
|
@ -8192,43 +8204,46 @@ int sched_dynamic_mode(const char *str)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
#define preempt_dynamic_enable(f) static_call_update(f, f##_dynamic_enabled)
|
||||
#define preempt_dynamic_disable(f) static_call_update(f, f##_dynamic_disabled)
|
||||
|
||||
void sched_dynamic_update(int mode)
|
||||
{
|
||||
/*
|
||||
* Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
|
||||
* the ZERO state, which is invalid.
|
||||
*/
|
||||
static_call_update(cond_resched, __cond_resched);
|
||||
static_call_update(might_resched, __cond_resched);
|
||||
static_call_update(preempt_schedule, __preempt_schedule_func);
|
||||
static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func);
|
||||
static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
|
||||
preempt_dynamic_enable(cond_resched);
|
||||
preempt_dynamic_enable(might_resched);
|
||||
preempt_dynamic_enable(preempt_schedule);
|
||||
preempt_dynamic_enable(preempt_schedule_notrace);
|
||||
preempt_dynamic_enable(irqentry_exit_cond_resched);
|
||||
|
||||
switch (mode) {
|
||||
case preempt_dynamic_none:
|
||||
static_call_update(cond_resched, __cond_resched);
|
||||
static_call_update(might_resched, (void *)&__static_call_return0);
|
||||
static_call_update(preempt_schedule, NULL);
|
||||
static_call_update(preempt_schedule_notrace, NULL);
|
||||
static_call_update(irqentry_exit_cond_resched, NULL);
|
||||
preempt_dynamic_enable(cond_resched);
|
||||
preempt_dynamic_disable(might_resched);
|
||||
preempt_dynamic_disable(preempt_schedule);
|
||||
preempt_dynamic_disable(preempt_schedule_notrace);
|
||||
preempt_dynamic_disable(irqentry_exit_cond_resched);
|
||||
pr_info("Dynamic Preempt: none\n");
|
||||
break;
|
||||
|
||||
case preempt_dynamic_voluntary:
|
||||
static_call_update(cond_resched, __cond_resched);
|
||||
static_call_update(might_resched, __cond_resched);
|
||||
static_call_update(preempt_schedule, NULL);
|
||||
static_call_update(preempt_schedule_notrace, NULL);
|
||||
static_call_update(irqentry_exit_cond_resched, NULL);
|
||||
preempt_dynamic_enable(cond_resched);
|
||||
preempt_dynamic_enable(might_resched);
|
||||
preempt_dynamic_disable(preempt_schedule);
|
||||
preempt_dynamic_disable(preempt_schedule_notrace);
|
||||
preempt_dynamic_disable(irqentry_exit_cond_resched);
|
||||
pr_info("Dynamic Preempt: voluntary\n");
|
||||
break;
|
||||
|
||||
case preempt_dynamic_full:
|
||||
static_call_update(cond_resched, (void *)&__static_call_return0);
|
||||
static_call_update(might_resched, (void *)&__static_call_return0);
|
||||
static_call_update(preempt_schedule, __preempt_schedule_func);
|
||||
static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func);
|
||||
static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
|
||||
preempt_dynamic_disable(cond_resched);
|
||||
preempt_dynamic_disable(might_resched);
|
||||
preempt_dynamic_enable(preempt_schedule);
|
||||
preempt_dynamic_enable(preempt_schedule_notrace);
|
||||
preempt_dynamic_enable(irqentry_exit_cond_resched);
|
||||
pr_info("Dynamic Preempt: full\n");
|
||||
break;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue