Merge branch 'x86-timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 timer updates from Thomas Gleixner:
 "A small set of updates for x86 specific timers:

   - Mark TSC invariant on a subset of Centaur CPUs

   - Allow TSC calibration without PIT on mobile platforms which lack
     legacy devices"

* 'x86-timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/centaur: Mark TSC invariant
  x86/tsc: Introduce early tsc clocksource
  x86/time: Unconditionally register legacy timer interrupt
  x86/tsc: Allow TSC calibration without PIT
This commit is contained in:
Linus Torvalds 2018-01-29 18:54:56 -08:00
commit 36c289e72a
5 changed files with 67 additions and 13 deletions

View File

@ -69,6 +69,11 @@ struct legacy_pic {
extern struct legacy_pic *legacy_pic; extern struct legacy_pic *legacy_pic;
extern struct legacy_pic null_legacy_pic; extern struct legacy_pic null_legacy_pic;
static inline bool has_legacy_pic(void)
{
return legacy_pic != &null_legacy_pic;
}
static inline int nr_legacy_irqs(void) static inline int nr_legacy_irqs(void)
{ {
return legacy_pic->nr_legacy_irqs; return legacy_pic->nr_legacy_irqs;

View File

@ -106,6 +106,10 @@ static void early_init_centaur(struct cpuinfo_x86 *c)
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
set_cpu_cap(c, X86_FEATURE_SYSENTER32); set_cpu_cap(c, X86_FEATURE_SYSENTER32);
#endif #endif
if (c->x86_power & (1 << 8)) {
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
}
} }
static void init_centaur(struct cpuinfo_x86 *c) static void init_centaur(struct cpuinfo_x86 *c)

View File

@ -69,9 +69,12 @@ static struct irqaction irq0 = {
static void __init setup_default_timer_irq(void) static void __init setup_default_timer_irq(void)
{ {
if (!nr_legacy_irqs()) /*
return; * Unconditionally register the legacy timer; even without legacy
setup_irq(0, &irq0); * PIC/PIT we need this for the HPET0 in legacy replacement mode.
*/
if (setup_irq(0, &irq0))
pr_info("Failed to register legacy timer interrupt\n");
} }
/* Default timer init function */ /* Default timer init function */

View File

@ -25,6 +25,7 @@
#include <asm/geode.h> #include <asm/geode.h>
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/intel-family.h> #include <asm/intel-family.h>
#include <asm/i8259.h>
unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
EXPORT_SYMBOL(cpu_khz); EXPORT_SYMBOL(cpu_khz);
@ -363,6 +364,20 @@ static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin)
unsigned long tscmin, tscmax; unsigned long tscmin, tscmax;
int pitcnt; int pitcnt;
if (!has_legacy_pic()) {
/*
* Relies on tsc_early_delay_calibrate() to have given us semi
* usable udelay(), wait for the same 50ms we would have with
* the PIT loop below.
*/
udelay(10 * USEC_PER_MSEC);
udelay(10 * USEC_PER_MSEC);
udelay(10 * USEC_PER_MSEC);
udelay(10 * USEC_PER_MSEC);
udelay(10 * USEC_PER_MSEC);
return ULONG_MAX;
}
/* Set the Gate high, disable speaker */ /* Set the Gate high, disable speaker */
outb((inb(0x61) & ~0x02) | 0x01, 0x61); outb((inb(0x61) & ~0x02) | 0x01, 0x61);
@ -487,6 +502,9 @@ static unsigned long quick_pit_calibrate(void)
u64 tsc, delta; u64 tsc, delta;
unsigned long d1, d2; unsigned long d1, d2;
if (!has_legacy_pic())
return 0;
/* Set the Gate high, disable speaker */ /* Set the Gate high, disable speaker */
outb((inb(0x61) & ~0x02) | 0x01, 0x61); outb((inb(0x61) & ~0x02) | 0x01, 0x61);
@ -988,8 +1006,6 @@ static void __init detect_art(void)
/* clocksource code */ /* clocksource code */
static struct clocksource clocksource_tsc;
static void tsc_resume(struct clocksource *cs) static void tsc_resume(struct clocksource *cs)
{ {
tsc_verify_tsc_adjust(true); tsc_verify_tsc_adjust(true);
@ -1040,12 +1056,31 @@ static void tsc_cs_tick_stable(struct clocksource *cs)
/* /*
* .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc() * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc()
*/ */
static struct clocksource clocksource_tsc_early = {
.name = "tsc-early",
.rating = 299,
.read = read_tsc,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS |
CLOCK_SOURCE_MUST_VERIFY,
.archdata = { .vclock_mode = VCLOCK_TSC },
.resume = tsc_resume,
.mark_unstable = tsc_cs_mark_unstable,
.tick_stable = tsc_cs_tick_stable,
};
/*
* Must mark VALID_FOR_HRES early such that when we unregister tsc_early
* this one will immediately take over. We will only register if TSC has
* been found good.
*/
static struct clocksource clocksource_tsc = { static struct clocksource clocksource_tsc = {
.name = "tsc", .name = "tsc",
.rating = 300, .rating = 300,
.read = read_tsc, .read = read_tsc,
.mask = CLOCKSOURCE_MASK(64), .mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS | .flags = CLOCK_SOURCE_IS_CONTINUOUS |
CLOCK_SOURCE_VALID_FOR_HRES |
CLOCK_SOURCE_MUST_VERIFY, CLOCK_SOURCE_MUST_VERIFY,
.archdata = { .vclock_mode = VCLOCK_TSC }, .archdata = { .vclock_mode = VCLOCK_TSC },
.resume = tsc_resume, .resume = tsc_resume,
@ -1169,8 +1204,8 @@ static void tsc_refine_calibration_work(struct work_struct *work)
int cpu; int cpu;
/* Don't bother refining TSC on unstable systems */ /* Don't bother refining TSC on unstable systems */
if (check_tsc_unstable()) if (tsc_unstable)
goto out; return;
/* /*
* Since the work is started early in boot, we may be * Since the work is started early in boot, we may be
@ -1222,9 +1257,13 @@ static void tsc_refine_calibration_work(struct work_struct *work)
set_cyc2ns_scale(tsc_khz, cpu, tsc_stop); set_cyc2ns_scale(tsc_khz, cpu, tsc_stop);
out: out:
if (tsc_unstable)
return;
if (boot_cpu_has(X86_FEATURE_ART)) if (boot_cpu_has(X86_FEATURE_ART))
art_related_clocksource = &clocksource_tsc; art_related_clocksource = &clocksource_tsc;
clocksource_register_khz(&clocksource_tsc, tsc_khz); clocksource_register_khz(&clocksource_tsc, tsc_khz);
clocksource_unregister(&clocksource_tsc_early);
} }
@ -1233,13 +1272,11 @@ static int __init init_tsc_clocksource(void)
if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_disabled > 0 || !tsc_khz) if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_disabled > 0 || !tsc_khz)
return 0; return 0;
if (check_tsc_unstable())
return 0;
if (tsc_clocksource_reliable) if (tsc_clocksource_reliable)
clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
/* lower the rating if we already know its unstable: */
if (check_tsc_unstable()) {
clocksource_tsc.rating = 0;
clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
}
if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3)) if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP; clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
@ -1252,6 +1289,7 @@ static int __init init_tsc_clocksource(void)
if (boot_cpu_has(X86_FEATURE_ART)) if (boot_cpu_has(X86_FEATURE_ART))
art_related_clocksource = &clocksource_tsc; art_related_clocksource = &clocksource_tsc;
clocksource_register_khz(&clocksource_tsc, tsc_khz); clocksource_register_khz(&clocksource_tsc, tsc_khz);
clocksource_unregister(&clocksource_tsc_early);
return 0; return 0;
} }
@ -1356,9 +1394,12 @@ void __init tsc_init(void)
check_system_tsc_reliable(); check_system_tsc_reliable();
if (unsynchronized_tsc()) if (unsynchronized_tsc()) {
mark_tsc_unstable("TSCs unsynchronized"); mark_tsc_unstable("TSCs unsynchronized");
return;
}
clocksource_register_khz(&clocksource_tsc_early, tsc_khz);
detect_art(); detect_art();
} }

View File

@ -207,6 +207,7 @@ static void tsc_check_state(int state)
switch (boot_cpu_data.x86_vendor) { switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD: case X86_VENDOR_AMD:
case X86_VENDOR_INTEL: case X86_VENDOR_INTEL:
case X86_VENDOR_CENTAUR:
/* /*
* AMD Fam10h TSC will tick in all * AMD Fam10h TSC will tick in all
* C/P/S0/S1 states when this bit is set. * C/P/S0/S1 states when this bit is set.