2019-07-01 12:25:56 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Definitions for the clocksource provided by the Hyper-V
|
|
|
|
* hypervisor to guest VMs, as described in the Hyper-V Top
|
|
|
|
* Level Functional Spec (TLFS).
|
|
|
|
*
|
|
|
|
* Copyright (C) 2019, Microsoft, Inc.
|
|
|
|
*
|
|
|
|
* Author: Michael Kelley <mikelley@microsoft.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __CLKSOURCE_HYPERV_TIMER_H
|
|
|
|
#define __CLKSOURCE_HYPERV_TIMER_H
|
|
|
|
|
2019-07-01 12:26:06 +08:00
|
|
|
#include <linux/clocksource.h>
|
|
|
|
#include <linux/math64.h>
|
2022-11-14 05:21:15 +08:00
|
|
|
#include <asm/hyperv-tlfs.h>
|
2019-07-01 12:26:06 +08:00
|
|
|
|
2019-07-01 12:25:56 +08:00
|
|
|
#define HV_MAX_MAX_DELTA_TICKS 0xffffffff
|
|
|
|
#define HV_MIN_DELTA_TICKS 1
|
|
|
|
|
2021-07-14 08:01:46 +08:00
|
|
|
#ifdef CONFIG_HYPERV_TIMER
|
|
|
|
|
2022-11-14 05:21:15 +08:00
|
|
|
#include <asm/hyperv_timer.h>
|
|
|
|
|
2019-07-01 12:25:56 +08:00
|
|
|
/* Routines called by the VMbus driver */
|
2021-03-03 05:38:22 +08:00
|
|
|
extern int hv_stimer_alloc(bool have_percpu_irqs);
|
x86/hyperv: Initialize clockevents earlier in CPU onlining
Hyper-V has historically initialized stimer-based clockevents late in the
process of onlining a CPU because clockevents depend on stimer
interrupts. In the original Hyper-V design, stimer interrupts generate a
VMbus message, so the VMbus machinery must be running first, and VMbus
can't be initialized until relatively late. On x86/64, LAPIC timer based
clockevents are used during early initialization before VMbus and
stimer-based clockevents are ready, and again during CPU offlining after
the stimer clockevents have been shut down.
Unfortunately, this design creates problems when offlining CPUs for
hibernation or other purposes. stimer-based clockevents are shut down
relatively early in the offlining process, so clockevents_unbind_device()
must be used to fallback to the LAPIC-based clockevents for the remainder
of the offlining process. Furthermore, the late initialization and early
shutdown of stimer-based clockevents doesn't work well on ARM64 since there
is no other timer like the LAPIC to fallback to. So CPU onlining and
offlining doesn't work properly.
Fix this by recognizing that stimer Direct Mode is the normal path for
newer versions of Hyper-V on x86/64, and the only path on other
architectures. With stimer Direct Mode, stimer interrupts don't require any
VMbus machinery. stimer clockevents can be initialized and shut down
consistent with how it is done for other clockevent devices. While the old
VMbus-based stimer interrupts must still be supported for backward
compatibility on x86, that mode of operation can be treated as legacy.
So add a new Hyper-V stimer entry in the CPU hotplug state list, and use
that new state when in Direct Mode. Update the Hyper-V clocksource driver
to allocate and initialize stimer clockevents earlier during boot. Update
Hyper-V initialization and the VMbus driver to use this new design. As a
result, the LAPIC timer is no longer used during boot or CPU
onlining/offlining and clockevents_unbind_device() is not called. But
retain the old design as a legacy implementation for older versions of
Hyper-V that don't support Direct Mode.
Signed-off-by: Michael Kelley <mikelley@microsoft.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Dexuan Cui <decui@microsoft.com>
Reviewed-by: Dexuan Cui <decui@microsoft.com>
Link: https://lkml.kernel.org/r/1573607467-9456-1-git-send-email-mikelley@microsoft.com
2019-11-13 09:11:49 +08:00
|
|
|
extern int hv_stimer_cleanup(unsigned int cpu);
|
|
|
|
extern void hv_stimer_legacy_init(unsigned int cpu, int sint);
|
|
|
|
extern void hv_stimer_legacy_cleanup(unsigned int cpu);
|
2019-07-01 12:25:56 +08:00
|
|
|
extern void hv_stimer_global_cleanup(void);
|
|
|
|
extern void hv_stimer0_isr(void);
|
|
|
|
|
2019-07-01 12:26:06 +08:00
|
|
|
extern void hv_init_clocksource(void);
|
2022-11-05 04:40:55 +08:00
|
|
|
extern void hv_remap_tsc_clocksource(void);
|
2019-07-01 12:26:06 +08:00
|
|
|
|
2022-11-04 01:58:59 +08:00
|
|
|
extern unsigned long hv_get_tsc_pfn(void);
|
2019-07-01 12:26:06 +08:00
|
|
|
extern struct ms_hyperv_tsc_page *hv_get_tsc_page(void);
|
|
|
|
|
2023-05-19 18:21:07 +08:00
|
|
|
static __always_inline bool
|
|
|
|
hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg,
|
|
|
|
u64 *cur_tsc, u64 *time)
|
2019-07-01 12:26:06 +08:00
|
|
|
{
|
|
|
|
u64 scale, offset;
|
|
|
|
u32 sequence;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The protocol for reading Hyper-V TSC page is specified in Hypervisor
|
|
|
|
* Top-Level Functional Specification ver. 3.0 and above. To get the
|
|
|
|
* reference time we must do the following:
|
|
|
|
* - READ ReferenceTscSequence
|
|
|
|
* A special '0' value indicates the time source is unreliable and we
|
|
|
|
* need to use something else. The currently published specification
|
|
|
|
* versions (up to 4.0b) contain a mistake and wrongly claim '-1'
|
|
|
|
* instead of '0' as the special value, see commit c35b82ef0294.
|
|
|
|
* - ReferenceTime =
|
|
|
|
* ((RDTSC() * ReferenceTscScale) >> 64) + ReferenceTscOffset
|
|
|
|
* - READ ReferenceTscSequence again. In case its value has changed
|
|
|
|
* since our first reading we need to discard ReferenceTime and repeat
|
|
|
|
* the whole sequence as the hypervisor was updating the page in
|
|
|
|
* between.
|
|
|
|
*/
|
|
|
|
do {
|
|
|
|
sequence = READ_ONCE(tsc_pg->tsc_sequence);
|
|
|
|
if (!sequence)
|
2023-05-19 18:21:07 +08:00
|
|
|
return false;
|
2019-07-01 12:26:06 +08:00
|
|
|
/*
|
|
|
|
* Make sure we read sequence before we read other values from
|
|
|
|
* TSC page.
|
|
|
|
*/
|
|
|
|
smp_rmb();
|
|
|
|
|
|
|
|
scale = READ_ONCE(tsc_pg->tsc_scale);
|
|
|
|
offset = READ_ONCE(tsc_pg->tsc_offset);
|
|
|
|
*cur_tsc = hv_get_raw_timer();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure we read sequence after we read all other values
|
|
|
|
* from TSC page.
|
|
|
|
*/
|
|
|
|
smp_rmb();
|
|
|
|
|
|
|
|
} while (READ_ONCE(tsc_pg->tsc_sequence) != sequence);
|
|
|
|
|
2023-05-19 18:21:07 +08:00
|
|
|
*time = mul_u64_u64_shr(*cur_tsc, scale, 64) + offset;
|
|
|
|
return true;
|
2019-07-01 12:26:06 +08:00
|
|
|
}
|
|
|
|
|
2019-08-22 16:36:30 +08:00
|
|
|
#else /* CONFIG_HYPERV_TIMER */
|
2022-11-04 01:58:59 +08:00
|
|
|
static inline unsigned long hv_get_tsc_pfn(void)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-07-01 12:26:06 +08:00
|
|
|
static inline struct ms_hyperv_tsc_page *hv_get_tsc_page(void)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2023-05-19 18:21:07 +08:00
|
|
|
static __always_inline bool
|
|
|
|
hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg, u64 *cur_tsc, u64 *time)
|
2019-07-01 12:26:06 +08:00
|
|
|
{
|
2023-05-19 18:21:07 +08:00
|
|
|
return false;
|
2019-07-01 12:26:06 +08:00
|
|
|
}
|
2021-07-14 08:01:46 +08:00
|
|
|
|
|
|
|
static inline int hv_stimer_cleanup(unsigned int cpu) { return 0; }
|
|
|
|
static inline void hv_stimer_legacy_init(unsigned int cpu, int sint) {}
|
|
|
|
static inline void hv_stimer_legacy_cleanup(unsigned int cpu) {}
|
|
|
|
static inline void hv_stimer_global_cleanup(void) {}
|
|
|
|
static inline void hv_stimer0_isr(void) {}
|
|
|
|
|
2019-08-22 16:36:30 +08:00
|
|
|
#endif /* CONFIG_HYPERV_TIMER */
|
2019-07-01 12:26:06 +08:00
|
|
|
|
2019-07-01 12:25:56 +08:00
|
|
|
#endif
|