sched/clock/x86: Mark sched_clock() noinstr
In order to use sched_clock() from noinstr code, mark it and all it's implenentations noinstr. The whole pvclock thing (used by KVM/Xen) is a bit of a pain, since it calls out to watchdogs, create a pvclock_clocksource_read_nowd() variant doesn't do that and can be noinstr. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://lore.kernel.org/r/20230126151323.702003578@infradead.org
This commit is contained in:
parent
5c9da9fe82
commit
8739c68115
|
@ -8,7 +8,7 @@ extern struct clocksource kvm_clock;
|
||||||
|
|
||||||
DECLARE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
|
DECLARE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
|
||||||
|
|
||||||
static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
|
static __always_inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
|
||||||
{
|
{
|
||||||
return &this_cpu_read(hv_clock_per_cpu)->pvti;
|
return &this_cpu_read(hv_clock_per_cpu)->pvti;
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,7 +26,7 @@ DECLARE_STATIC_CALL(pv_sched_clock, dummy_sched_clock);
|
||||||
|
|
||||||
void paravirt_set_sched_clock(u64 (*func)(void));
|
void paravirt_set_sched_clock(u64 (*func)(void));
|
||||||
|
|
||||||
static inline u64 paravirt_sched_clock(void)
|
static __always_inline u64 paravirt_sched_clock(void)
|
||||||
{
|
{
|
||||||
return static_call(pv_sched_clock)();
|
return static_call(pv_sched_clock)();
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,6 +7,7 @@
|
||||||
|
|
||||||
/* some helper functions for xen and kvm pv clock sources */
|
/* some helper functions for xen and kvm pv clock sources */
|
||||||
u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src);
|
u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src);
|
||||||
|
u64 pvclock_clocksource_read_nowd(struct pvclock_vcpu_time_info *src);
|
||||||
u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src);
|
u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src);
|
||||||
void pvclock_set_flags(u8 flags);
|
void pvclock_set_flags(u8 flags);
|
||||||
unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src);
|
unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src);
|
||||||
|
@ -39,7 +40,7 @@ bool pvclock_read_retry(const struct pvclock_vcpu_time_info *src,
|
||||||
* Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
|
* Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
|
||||||
* yielding a 64-bit result.
|
* yielding a 64-bit result.
|
||||||
*/
|
*/
|
||||||
static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
|
static __always_inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
|
||||||
{
|
{
|
||||||
u64 product;
|
u64 product;
|
||||||
#ifdef __i386__
|
#ifdef __i386__
|
||||||
|
|
|
@ -143,7 +143,7 @@ static __init int parse_no_stealacc(char *arg)
|
||||||
}
|
}
|
||||||
early_param("no-steal-acc", parse_no_stealacc);
|
early_param("no-steal-acc", parse_no_stealacc);
|
||||||
|
|
||||||
static unsigned long long notrace vmware_sched_clock(void)
|
static noinstr u64 vmware_sched_clock(void)
|
||||||
{
|
{
|
||||||
unsigned long long ns;
|
unsigned long long ns;
|
||||||
|
|
||||||
|
|
|
@ -71,12 +71,12 @@ static int kvm_set_wallclock(const struct timespec64 *now)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 kvm_clock_read(void)
|
static noinstr u64 kvm_clock_read(void)
|
||||||
{
|
{
|
||||||
u64 ret;
|
u64 ret;
|
||||||
|
|
||||||
preempt_disable_notrace();
|
preempt_disable_notrace();
|
||||||
ret = pvclock_clocksource_read(this_cpu_pvti());
|
ret = pvclock_clocksource_read_nowd(this_cpu_pvti());
|
||||||
preempt_enable_notrace();
|
preempt_enable_notrace();
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -86,7 +86,7 @@ static u64 kvm_clock_get_cycles(struct clocksource *cs)
|
||||||
return kvm_clock_read();
|
return kvm_clock_read();
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 kvm_sched_clock_read(void)
|
static noinstr u64 kvm_sched_clock_read(void)
|
||||||
{
|
{
|
||||||
return kvm_clock_read() - kvm_sched_clock_offset;
|
return kvm_clock_read() - kvm_sched_clock_offset;
|
||||||
}
|
}
|
||||||
|
|
|
@ -64,7 +64,8 @@ u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
|
||||||
return flags & valid_flags;
|
return flags & valid_flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
|
static __always_inline
|
||||||
|
u64 __pvclock_clocksource_read(struct pvclock_vcpu_time_info *src, bool dowd)
|
||||||
{
|
{
|
||||||
unsigned version;
|
unsigned version;
|
||||||
u64 ret;
|
u64 ret;
|
||||||
|
@ -77,7 +78,7 @@ u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
|
||||||
flags = src->flags;
|
flags = src->flags;
|
||||||
} while (pvclock_read_retry(src, version));
|
} while (pvclock_read_retry(src, version));
|
||||||
|
|
||||||
if (unlikely((flags & PVCLOCK_GUEST_STOPPED) != 0)) {
|
if (dowd && unlikely((flags & PVCLOCK_GUEST_STOPPED) != 0)) {
|
||||||
src->flags &= ~PVCLOCK_GUEST_STOPPED;
|
src->flags &= ~PVCLOCK_GUEST_STOPPED;
|
||||||
pvclock_touch_watchdogs();
|
pvclock_touch_watchdogs();
|
||||||
}
|
}
|
||||||
|
@ -100,15 +101,25 @@ u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
|
||||||
* updating at the same time, and one of them could be slightly behind,
|
* updating at the same time, and one of them could be slightly behind,
|
||||||
* making the assumption that last_value always go forward fail to hold.
|
* making the assumption that last_value always go forward fail to hold.
|
||||||
*/
|
*/
|
||||||
last = atomic64_read(&last_value);
|
last = arch_atomic64_read(&last_value);
|
||||||
do {
|
do {
|
||||||
if (ret <= last)
|
if (ret <= last)
|
||||||
return last;
|
return last;
|
||||||
} while (!atomic64_try_cmpxchg(&last_value, &last, ret));
|
} while (!arch_atomic64_try_cmpxchg(&last_value, &last, ret));
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
|
||||||
|
{
|
||||||
|
return __pvclock_clocksource_read(src, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
noinstr u64 pvclock_clocksource_read_nowd(struct pvclock_vcpu_time_info *src)
|
||||||
|
{
|
||||||
|
return __pvclock_clocksource_read(src, false);
|
||||||
|
}
|
||||||
|
|
||||||
void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock,
|
void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock,
|
||||||
struct pvclock_vcpu_time_info *vcpu_time,
|
struct pvclock_vcpu_time_info *vcpu_time,
|
||||||
struct timespec64 *ts)
|
struct timespec64 *ts)
|
||||||
|
|
|
@ -215,7 +215,7 @@ static void __init cyc2ns_init_secondary_cpus(void)
|
||||||
/*
|
/*
|
||||||
* Scheduler clock - returns current time in nanosec units.
|
* Scheduler clock - returns current time in nanosec units.
|
||||||
*/
|
*/
|
||||||
u64 native_sched_clock(void)
|
noinstr u64 native_sched_clock(void)
|
||||||
{
|
{
|
||||||
if (static_branch_likely(&__use_tsc)) {
|
if (static_branch_likely(&__use_tsc)) {
|
||||||
u64 tsc_now = rdtsc();
|
u64 tsc_now = rdtsc();
|
||||||
|
@ -248,7 +248,7 @@ u64 native_sched_clock_from_tsc(u64 tsc)
|
||||||
/* We need to define a real function for sched_clock, to override the
|
/* We need to define a real function for sched_clock, to override the
|
||||||
weak default version */
|
weak default version */
|
||||||
#ifdef CONFIG_PARAVIRT
|
#ifdef CONFIG_PARAVIRT
|
||||||
unsigned long long sched_clock(void)
|
noinstr u64 sched_clock(void)
|
||||||
{
|
{
|
||||||
return paravirt_sched_clock();
|
return paravirt_sched_clock();
|
||||||
}
|
}
|
||||||
|
@ -258,8 +258,7 @@ bool using_native_sched_clock(void)
|
||||||
return static_call_query(pv_sched_clock) == native_sched_clock;
|
return static_call_query(pv_sched_clock) == native_sched_clock;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
unsigned long long
|
u64 sched_clock(void) __attribute__((alias("native_sched_clock")));
|
||||||
sched_clock(void) __attribute__((alias("native_sched_clock")));
|
|
||||||
|
|
||||||
bool using_native_sched_clock(void) { return true; }
|
bool using_native_sched_clock(void) { return true; }
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -60,9 +60,17 @@ static u64 xen_clocksource_get_cycles(struct clocksource *cs)
|
||||||
return xen_clocksource_read();
|
return xen_clocksource_read();
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 xen_sched_clock(void)
|
static noinstr u64 xen_sched_clock(void)
|
||||||
{
|
{
|
||||||
return xen_clocksource_read() - xen_sched_clock_offset;
|
struct pvclock_vcpu_time_info *src;
|
||||||
|
u64 ret;
|
||||||
|
|
||||||
|
preempt_disable_notrace();
|
||||||
|
src = &__this_cpu_read(xen_vcpu)->time;
|
||||||
|
ret = pvclock_clocksource_read_nowd(src);
|
||||||
|
ret -= xen_sched_clock_offset;
|
||||||
|
preempt_enable_notrace();
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void xen_read_wallclock(struct timespec64 *ts)
|
static void xen_read_wallclock(struct timespec64 *ts)
|
||||||
|
|
|
@ -161,7 +161,7 @@ static inline u64 mul_u32_u32(u32 a, u32 b)
|
||||||
#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
|
#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
|
||||||
|
|
||||||
#ifndef mul_u64_u32_shr
|
#ifndef mul_u64_u32_shr
|
||||||
static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
|
static __always_inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
|
||||||
{
|
{
|
||||||
return (u64)(((unsigned __int128)a * mul) >> shift);
|
return (u64)(((unsigned __int128)a * mul) >> shift);
|
||||||
}
|
}
|
||||||
|
@ -177,7 +177,7 @@ static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
|
||||||
#else
|
#else
|
||||||
|
|
||||||
#ifndef mul_u64_u32_shr
|
#ifndef mul_u64_u32_shr
|
||||||
static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
|
static __always_inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
|
||||||
{
|
{
|
||||||
u32 ah, al;
|
u32 ah, al;
|
||||||
u64 ret;
|
u64 ret;
|
||||||
|
|
Loading…
Reference in New Issue