2018-09-20 17:26:40 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef __ASM_PREEMPT_H
|
|
|
|
#define __ASM_PREEMPT_H
|
|
|
|
|
arm64: Support PREEMPT_DYNAMIC
This patch enables support for PREEMPT_DYNAMIC on arm64, allowing the
preemption model to be chosen at boot time.
Specifically, this patch selects HAVE_PREEMPT_DYNAMIC_KEY, so that each
preemption function is an out-of-line call with an early return
depending upon a static key. This leaves almost all the codegen up to
the compiler, and side-steps a number of pain points with static calls
(e.g. interaction with CFI schemes). This should have no worse overhead
than using non-inline static calls, as those use out-of-line trampolines
with early returns.
For example, the dynamic_cond_resched() wrapper looks as follows when
enabled. When disabled, the first `B` is replaced with a `NOP`,
resulting in an early return.
| <dynamic_cond_resched>:
| bti c
| b <dynamic_cond_resched+0x10> // or `nop`
| mov w0, #0x0
| ret
| mrs x0, sp_el0
| ldr x0, [x0, #8]
| cbnz x0, <dynamic_cond_resched+0x8>
| paciasp
| stp x29, x30, [sp, #-16]!
| mov x29, sp
| bl <preempt_schedule_common>
| mov w0, #0x1
| ldp x29, x30, [sp], #16
| autiasp
| ret
... compared to the regular form of the function:
| <__cond_resched>:
| bti c
| mrs x0, sp_el0
| ldr x1, [x0, #8]
| cbz x1, <__cond_resched+0x18>
| mov w0, #0x0
| ret
| paciasp
| stp x29, x30, [sp, #-16]!
| mov x29, sp
| bl <preempt_schedule_common>
| mov w0, #0x1
| ldp x29, x30, [sp], #16
| autiasp
| ret
Since arm64 does not yet use the generic entry code, we must define our
own `sk_dynamic_irqentry_exit_cond_resched`, which will be
enabled/disabled by the common code in kernel/sched/core.c. All other
preemption functions and associated static keys are defined there.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Frederic Weisbecker <frederic@kernel.org>
Link: https://lore.kernel.org/r/20220214165216.2231574-8-mark.rutland@arm.com
2022-02-15 00:52:16 +08:00
|
|
|
#include <linux/jump_label.h>
|
2018-09-20 17:26:40 +08:00
|
|
|
#include <linux/thread_info.h>
|
|
|
|
|
|
|
|
#define PREEMPT_NEED_RESCHED BIT(32)
|
|
|
|
#define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED)
|
|
|
|
|
|
|
|
static inline int preempt_count(void)
|
|
|
|
{
|
|
|
|
return READ_ONCE(current_thread_info()->preempt.count);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void preempt_count_set(u64 pc)
|
|
|
|
{
|
|
|
|
/* Preserve existing value of PREEMPT_NEED_RESCHED */
|
|
|
|
WRITE_ONCE(current_thread_info()->preempt.count, pc);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define init_task_preempt_count(p) do { \
|
|
|
|
task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define init_idle_preempt_count(p, cpu) do { \
|
sched/core: Initialize the idle task with preemption disabled
As pointed out by commit
de9b8f5dcbd9 ("sched: Fix crash trying to dequeue/enqueue the idle thread")
init_idle() can and will be invoked more than once on the same idle
task. At boot time, it is invoked for the boot CPU thread by
sched_init(). Then smp_init() creates the threads for all the secondary
CPUs and invokes init_idle() on them.
As the hotplug machinery brings the secondaries to life, it will issue
calls to idle_thread_get(), which itself invokes init_idle() yet again.
In this case it's invoked twice more per secondary: at _cpu_up(), and at
bringup_cpu().
Given smp_init() already initializes the idle tasks for all *possible*
CPUs, no further initialization should be required. Now, removing
init_idle() from idle_thread_get() exposes some interesting expectations
with regards to the idle task's preempt_count: the secondary startup always
issues a preempt_disable(), requiring some reset of the preempt count to 0
between hot-unplug and hotplug, which is currently served by
idle_thread_get() -> idle_init().
Given the idle task is supposed to have preemption disabled once and never
see it re-enabled, it seems that what we actually want is to initialize its
preempt_count to PREEMPT_DISABLED and leave it there. Do that, and remove
init_idle() from idle_thread_get().
Secondary startups were patched via coccinelle:
@begone@
@@
-preempt_disable();
...
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
Signed-off-by: Valentin Schneider <valentin.schneider@arm.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20210512094636.2958515-1-valentin.schneider@arm.com
2021-05-12 17:46:36 +08:00
|
|
|
task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
|
2018-09-20 17:26:40 +08:00
|
|
|
} while (0)
|
|
|
|
|
|
|
|
static inline void set_preempt_need_resched(void)
|
|
|
|
{
|
|
|
|
current_thread_info()->preempt.need_resched = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void clear_preempt_need_resched(void)
|
|
|
|
{
|
|
|
|
current_thread_info()->preempt.need_resched = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool test_preempt_need_resched(void)
|
|
|
|
{
|
|
|
|
return !current_thread_info()->preempt.need_resched;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __preempt_count_add(int val)
|
|
|
|
{
|
|
|
|
u32 pc = READ_ONCE(current_thread_info()->preempt.count);
|
|
|
|
pc += val;
|
|
|
|
WRITE_ONCE(current_thread_info()->preempt.count, pc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __preempt_count_sub(int val)
|
|
|
|
{
|
|
|
|
u32 pc = READ_ONCE(current_thread_info()->preempt.count);
|
|
|
|
pc -= val;
|
|
|
|
WRITE_ONCE(current_thread_info()->preempt.count, pc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool __preempt_count_dec_and_test(void)
|
|
|
|
{
|
|
|
|
struct thread_info *ti = current_thread_info();
|
|
|
|
u64 pc = READ_ONCE(ti->preempt_count);
|
|
|
|
|
|
|
|
/* Update only the count field, leaving need_resched unchanged */
|
|
|
|
WRITE_ONCE(ti->preempt.count, --pc);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we wrote back all zeroes, then we're preemptible and in
|
|
|
|
* need of a reschedule. Otherwise, we need to reload the
|
|
|
|
* preempt_count in case the need_resched flag was cleared by an
|
|
|
|
* interrupt occurring between the non-atomic READ_ONCE/WRITE_ONCE
|
|
|
|
* pair.
|
|
|
|
*/
|
|
|
|
return !pc || !READ_ONCE(ti->preempt_count);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool should_resched(int preempt_offset)
|
|
|
|
{
|
|
|
|
u64 pc = READ_ONCE(current_thread_info()->preempt_count);
|
|
|
|
return pc == preempt_offset;
|
|
|
|
}
|
|
|
|
|
2019-10-16 03:17:49 +08:00
|
|
|
#ifdef CONFIG_PREEMPTION
|
arm64: Support PREEMPT_DYNAMIC
This patch enables support for PREEMPT_DYNAMIC on arm64, allowing the
preemption model to be chosen at boot time.
Specifically, this patch selects HAVE_PREEMPT_DYNAMIC_KEY, so that each
preemption function is an out-of-line call with an early return
depending upon a static key. This leaves almost all the codegen up to
the compiler, and side-steps a number of pain points with static calls
(e.g. interaction with CFI schemes). This should have no worse overhead
than using non-inline static calls, as those use out-of-line trampolines
with early returns.
For example, the dynamic_cond_resched() wrapper looks as follows when
enabled. When disabled, the first `B` is replaced with a `NOP`,
resulting in an early return.
| <dynamic_cond_resched>:
| bti c
| b <dynamic_cond_resched+0x10> // or `nop`
| mov w0, #0x0
| ret
| mrs x0, sp_el0
| ldr x0, [x0, #8]
| cbnz x0, <dynamic_cond_resched+0x8>
| paciasp
| stp x29, x30, [sp, #-16]!
| mov x29, sp
| bl <preempt_schedule_common>
| mov w0, #0x1
| ldp x29, x30, [sp], #16
| autiasp
| ret
... compared to the regular form of the function:
| <__cond_resched>:
| bti c
| mrs x0, sp_el0
| ldr x1, [x0, #8]
| cbz x1, <__cond_resched+0x18>
| mov w0, #0x0
| ret
| paciasp
| stp x29, x30, [sp, #-16]!
| mov x29, sp
| bl <preempt_schedule_common>
| mov w0, #0x1
| ldp x29, x30, [sp], #16
| autiasp
| ret
Since arm64 does not yet use the generic entry code, we must define our
own `sk_dynamic_irqentry_exit_cond_resched`, which will be
enabled/disabled by the common code in kernel/sched/core.c. All other
preemption functions and associated static keys are defined there.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Frederic Weisbecker <frederic@kernel.org>
Link: https://lore.kernel.org/r/20220214165216.2231574-8-mark.rutland@arm.com
2022-02-15 00:52:16 +08:00
|
|
|
|
2018-09-20 17:26:40 +08:00
|
|
|
void preempt_schedule(void);
|
|
|
|
void preempt_schedule_notrace(void);
|
arm64: Support PREEMPT_DYNAMIC
This patch enables support for PREEMPT_DYNAMIC on arm64, allowing the
preemption model to be chosen at boot time.
Specifically, this patch selects HAVE_PREEMPT_DYNAMIC_KEY, so that each
preemption function is an out-of-line call with an early return
depending upon a static key. This leaves almost all the codegen up to
the compiler, and side-steps a number of pain points with static calls
(e.g. interaction with CFI schemes). This should have no worse overhead
than using non-inline static calls, as those use out-of-line trampolines
with early returns.
For example, the dynamic_cond_resched() wrapper looks as follows when
enabled. When disabled, the first `B` is replaced with a `NOP`,
resulting in an early return.
| <dynamic_cond_resched>:
| bti c
| b <dynamic_cond_resched+0x10> // or `nop`
| mov w0, #0x0
| ret
| mrs x0, sp_el0
| ldr x0, [x0, #8]
| cbnz x0, <dynamic_cond_resched+0x8>
| paciasp
| stp x29, x30, [sp, #-16]!
| mov x29, sp
| bl <preempt_schedule_common>
| mov w0, #0x1
| ldp x29, x30, [sp], #16
| autiasp
| ret
... compared to the regular form of the function:
| <__cond_resched>:
| bti c
| mrs x0, sp_el0
| ldr x1, [x0, #8]
| cbz x1, <__cond_resched+0x18>
| mov w0, #0x0
| ret
| paciasp
| stp x29, x30, [sp, #-16]!
| mov x29, sp
| bl <preempt_schedule_common>
| mov w0, #0x1
| ldp x29, x30, [sp], #16
| autiasp
| ret
Since arm64 does not yet use the generic entry code, we must define our
own `sk_dynamic_irqentry_exit_cond_resched`, which will be
enabled/disabled by the common code in kernel/sched/core.c. All other
preemption functions and associated static keys are defined there.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Frederic Weisbecker <frederic@kernel.org>
Link: https://lore.kernel.org/r/20220214165216.2231574-8-mark.rutland@arm.com
2022-02-15 00:52:16 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_PREEMPT_DYNAMIC
|
|
|
|
|
|
|
|
DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
|
|
|
|
void dynamic_preempt_schedule(void);
|
|
|
|
#define __preempt_schedule() dynamic_preempt_schedule()
|
|
|
|
void dynamic_preempt_schedule_notrace(void);
|
|
|
|
#define __preempt_schedule_notrace() dynamic_preempt_schedule_notrace()
|
|
|
|
|
|
|
|
#else /* CONFIG_PREEMPT_DYNAMIC */
|
|
|
|
|
|
|
|
#define __preempt_schedule() preempt_schedule()
|
|
|
|
#define __preempt_schedule_notrace() preempt_schedule_notrace()
|
|
|
|
|
|
|
|
#endif /* CONFIG_PREEMPT_DYNAMIC */
|
2019-10-16 03:17:49 +08:00
|
|
|
#endif /* CONFIG_PREEMPTION */
|
2018-09-20 17:26:40 +08:00
|
|
|
|
|
|
|
#endif /* __ASM_PREEMPT_H */
|