sched/preempt: Move PREEMPT_DYNAMIC logic later

The PREEMPT_DYNAMIC logic in kernel/sched/core.c patches static calls
for a bunch of preemption functions. While most are defined prior to
this, the definition of cond_resched() is later in the file, and so we
only have its declarations from include/linux/sched.h.

In subsequent patches we'd like to define some macros alongside the
definition of each of the preemption functions, which we can use within
sched_dynamic_update(). For this to be possible, the PREEMPT_DYNAMIC
logic needs to be placed after the various preemption functions.

As a preparatory step, this patch moves the PREEMPT_DYNAMIC logic after
the various preemption functions, with no other changes -- this is
purely a move.

There should be no functional change as a result of this patch.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Acked-by: Frederic Weisbecker <frederic@kernel.org>
Link: https://lore.kernel.org/r/20220214165216.2231574-2-mark.rutland@arm.com
This commit is contained in:
Mark Rutland 2022-02-14 16:52:10 +00:00 committed by Peter Zijlstra
parent ed3b362d54
commit 4c7485584d
1 changed files with 136 additions and 136 deletions

View File

@ -6555,142 +6555,6 @@ EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
#endif /* CONFIG_PREEMPTION */ #endif /* CONFIG_PREEMPTION */
#ifdef CONFIG_PREEMPT_DYNAMIC
#include <linux/entry-common.h>
/*
* SC:cond_resched
* SC:might_resched
* SC:preempt_schedule
* SC:preempt_schedule_notrace
* SC:irqentry_exit_cond_resched
*
*
* NONE:
* cond_resched <- __cond_resched
* might_resched <- RET0
* preempt_schedule <- NOP
* preempt_schedule_notrace <- NOP
* irqentry_exit_cond_resched <- NOP
*
* VOLUNTARY:
* cond_resched <- __cond_resched
* might_resched <- __cond_resched
* preempt_schedule <- NOP
* preempt_schedule_notrace <- NOP
* irqentry_exit_cond_resched <- NOP
*
* FULL:
* cond_resched <- RET0
* might_resched <- RET0
* preempt_schedule <- preempt_schedule
* preempt_schedule_notrace <- preempt_schedule_notrace
* irqentry_exit_cond_resched <- irqentry_exit_cond_resched
*/
enum {
preempt_dynamic_undefined = -1,
preempt_dynamic_none,
preempt_dynamic_voluntary,
preempt_dynamic_full,
};
int preempt_dynamic_mode = preempt_dynamic_undefined;
int sched_dynamic_mode(const char *str)
{
if (!strcmp(str, "none"))
return preempt_dynamic_none;
if (!strcmp(str, "voluntary"))
return preempt_dynamic_voluntary;
if (!strcmp(str, "full"))
return preempt_dynamic_full;
return -EINVAL;
}
void sched_dynamic_update(int mode)
{
/*
* Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
* the ZERO state, which is invalid.
*/
static_call_update(cond_resched, __cond_resched);
static_call_update(might_resched, __cond_resched);
static_call_update(preempt_schedule, __preempt_schedule_func);
static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func);
static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
switch (mode) {
case preempt_dynamic_none:
static_call_update(cond_resched, __cond_resched);
static_call_update(might_resched, (void *)&__static_call_return0);
static_call_update(preempt_schedule, NULL);
static_call_update(preempt_schedule_notrace, NULL);
static_call_update(irqentry_exit_cond_resched, NULL);
pr_info("Dynamic Preempt: none\n");
break;
case preempt_dynamic_voluntary:
static_call_update(cond_resched, __cond_resched);
static_call_update(might_resched, __cond_resched);
static_call_update(preempt_schedule, NULL);
static_call_update(preempt_schedule_notrace, NULL);
static_call_update(irqentry_exit_cond_resched, NULL);
pr_info("Dynamic Preempt: voluntary\n");
break;
case preempt_dynamic_full:
static_call_update(cond_resched, (void *)&__static_call_return0);
static_call_update(might_resched, (void *)&__static_call_return0);
static_call_update(preempt_schedule, __preempt_schedule_func);
static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func);
static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
pr_info("Dynamic Preempt: full\n");
break;
}
preempt_dynamic_mode = mode;
}
static int __init setup_preempt_mode(char *str)
{
int mode = sched_dynamic_mode(str);
if (mode < 0) {
pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
return 0;
}
sched_dynamic_update(mode);
return 1;
}
__setup("preempt=", setup_preempt_mode);
static void __init preempt_dynamic_init(void)
{
if (preempt_dynamic_mode == preempt_dynamic_undefined) {
if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
sched_dynamic_update(preempt_dynamic_none);
} else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
sched_dynamic_update(preempt_dynamic_voluntary);
} else {
/* Default static call setting, nothing to do */
WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
preempt_dynamic_mode = preempt_dynamic_full;
pr_info("Dynamic Preempt: full\n");
}
}
}
#else /* !CONFIG_PREEMPT_DYNAMIC */
static inline void preempt_dynamic_init(void) { }
#endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */
/* /*
* This is the entry point to schedule() from kernel preemption * This is the entry point to schedule() from kernel preemption
* off of irq context. * off of irq context.
@ -8271,6 +8135,142 @@ int __cond_resched_rwlock_write(rwlock_t *lock)
} }
EXPORT_SYMBOL(__cond_resched_rwlock_write); EXPORT_SYMBOL(__cond_resched_rwlock_write);
#ifdef CONFIG_PREEMPT_DYNAMIC
#include <linux/entry-common.h>
/*
* SC:cond_resched
* SC:might_resched
* SC:preempt_schedule
* SC:preempt_schedule_notrace
* SC:irqentry_exit_cond_resched
*
*
* NONE:
* cond_resched <- __cond_resched
* might_resched <- RET0
* preempt_schedule <- NOP
* preempt_schedule_notrace <- NOP
* irqentry_exit_cond_resched <- NOP
*
* VOLUNTARY:
* cond_resched <- __cond_resched
* might_resched <- __cond_resched
* preempt_schedule <- NOP
* preempt_schedule_notrace <- NOP
* irqentry_exit_cond_resched <- NOP
*
* FULL:
* cond_resched <- RET0
* might_resched <- RET0
* preempt_schedule <- preempt_schedule
* preempt_schedule_notrace <- preempt_schedule_notrace
* irqentry_exit_cond_resched <- irqentry_exit_cond_resched
*/
enum {
preempt_dynamic_undefined = -1,
preempt_dynamic_none,
preempt_dynamic_voluntary,
preempt_dynamic_full,
};
int preempt_dynamic_mode = preempt_dynamic_undefined;
int sched_dynamic_mode(const char *str)
{
if (!strcmp(str, "none"))
return preempt_dynamic_none;
if (!strcmp(str, "voluntary"))
return preempt_dynamic_voluntary;
if (!strcmp(str, "full"))
return preempt_dynamic_full;
return -EINVAL;
}
void sched_dynamic_update(int mode)
{
/*
* Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
* the ZERO state, which is invalid.
*/
static_call_update(cond_resched, __cond_resched);
static_call_update(might_resched, __cond_resched);
static_call_update(preempt_schedule, __preempt_schedule_func);
static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func);
static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
switch (mode) {
case preempt_dynamic_none:
static_call_update(cond_resched, __cond_resched);
static_call_update(might_resched, (void *)&__static_call_return0);
static_call_update(preempt_schedule, NULL);
static_call_update(preempt_schedule_notrace, NULL);
static_call_update(irqentry_exit_cond_resched, NULL);
pr_info("Dynamic Preempt: none\n");
break;
case preempt_dynamic_voluntary:
static_call_update(cond_resched, __cond_resched);
static_call_update(might_resched, __cond_resched);
static_call_update(preempt_schedule, NULL);
static_call_update(preempt_schedule_notrace, NULL);
static_call_update(irqentry_exit_cond_resched, NULL);
pr_info("Dynamic Preempt: voluntary\n");
break;
case preempt_dynamic_full:
static_call_update(cond_resched, (void *)&__static_call_return0);
static_call_update(might_resched, (void *)&__static_call_return0);
static_call_update(preempt_schedule, __preempt_schedule_func);
static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func);
static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
pr_info("Dynamic Preempt: full\n");
break;
}
preempt_dynamic_mode = mode;
}
static int __init setup_preempt_mode(char *str)
{
int mode = sched_dynamic_mode(str);
if (mode < 0) {
pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
return 0;
}
sched_dynamic_update(mode);
return 1;
}
__setup("preempt=", setup_preempt_mode);
static void __init preempt_dynamic_init(void)
{
if (preempt_dynamic_mode == preempt_dynamic_undefined) {
if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
sched_dynamic_update(preempt_dynamic_none);
} else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
sched_dynamic_update(preempt_dynamic_voluntary);
} else {
/* Default static call setting, nothing to do */
WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
preempt_dynamic_mode = preempt_dynamic_full;
pr_info("Dynamic Preempt: full\n");
}
}
}
#else /* !CONFIG_PREEMPT_DYNAMIC */
static inline void preempt_dynamic_init(void) { }
#endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */
/** /**
* yield - yield the current processor to other threads. * yield - yield the current processor to other threads.
* *