2006-01-19 09:42:42 +08:00
|
|
|
/*
|
2007-10-16 16:27:00 +08:00
|
|
|
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
|
2005-04-17 06:20:36 +08:00
|
|
|
* Licensed under the GPL
|
|
|
|
*/
|
|
|
|
|
2007-10-16 16:27:24 +08:00
|
|
|
#include "linux/clockchips.h"
|
2005-04-17 06:20:36 +08:00
|
|
|
#include "linux/interrupt.h"
|
2007-10-16 16:27:00 +08:00
|
|
|
#include "linux/jiffies.h"
|
|
|
|
#include "linux/threads.h"
|
2005-04-17 06:20:36 +08:00
|
|
|
#include "asm/irq.h"
|
|
|
|
#include "asm/param.h"
|
|
|
|
#include "kern_util.h"
|
|
|
|
#include "os.h"
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Scheduler clock - returns current time in nanosec units.
|
|
|
|
*/
|
|
|
|
unsigned long long sched_clock(void)
|
|
|
|
{
|
|
|
|
return (unsigned long long)jiffies_64 * (1000000000 / HZ);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_UML_REAL_TIME_CLOCK
|
2007-05-07 05:51:51 +08:00
|
|
|
static unsigned long long prev_nsecs[NR_CPUS];
|
2007-02-10 17:44:12 +08:00
|
|
|
static long long delta[NR_CPUS]; /* Deviation per interval */
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
|
|
|
|
2007-10-16 16:27:24 +08:00
|
|
|
void timer_handler(int sig, struct uml_pt_regs *regs)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
unsigned long long ticks = 0;
|
2007-10-16 16:27:24 +08:00
|
|
|
unsigned long flags;
|
2005-04-17 06:20:36 +08:00
|
|
|
#ifdef CONFIG_UML_REAL_TIME_CLOCK
|
2007-02-10 17:44:12 +08:00
|
|
|
int c = cpu();
|
2007-10-16 16:27:00 +08:00
|
|
|
if (prev_nsecs[c]) {
|
2005-04-17 06:20:36 +08:00
|
|
|
/* We've had 1 tick */
|
2006-01-19 09:42:42 +08:00
|
|
|
unsigned long long nsecs = os_nsecs();
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-02-10 17:44:12 +08:00
|
|
|
delta[c] += nsecs - prev_nsecs[c];
|
|
|
|
prev_nsecs[c] = nsecs;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Protect against the host clock being set backwards */
|
2007-10-16 16:27:00 +08:00
|
|
|
if (delta[c] < 0)
|
2007-02-10 17:44:12 +08:00
|
|
|
delta[c] = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-02-10 17:44:12 +08:00
|
|
|
ticks += (delta[c] * HZ) / BILLION;
|
|
|
|
delta[c] -= (ticks * BILLION) / HZ;
|
2006-07-10 19:45:08 +08:00
|
|
|
}
|
2007-02-10 17:44:12 +08:00
|
|
|
else prev_nsecs[c] = os_nsecs();
|
2005-04-17 06:20:36 +08:00
|
|
|
#else
|
2006-07-10 19:45:08 +08:00
|
|
|
ticks = 1;
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
2007-10-16 16:27:24 +08:00
|
|
|
|
|
|
|
local_irq_save(flags);
|
2007-10-16 16:27:00 +08:00
|
|
|
while (ticks > 0) {
|
2005-04-17 06:20:36 +08:00
|
|
|
do_IRQ(TIMER_IRQ, regs);
|
|
|
|
ticks--;
|
|
|
|
}
|
2007-10-16 16:27:24 +08:00
|
|
|
local_irq_restore(flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2007-10-16 16:27:24 +08:00
|
|
|
static void itimer_set_mode(enum clock_event_mode mode,
|
|
|
|
struct clock_event_device *evt)
|
2006-01-19 09:42:42 +08:00
|
|
|
{
|
2007-10-16 16:27:24 +08:00
|
|
|
switch(mode) {
|
|
|
|
case CLOCK_EVT_MODE_PERIODIC:
|
|
|
|
set_interval();
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CLOCK_EVT_MODE_SHUTDOWN:
|
|
|
|
case CLOCK_EVT_MODE_UNUSED:
|
|
|
|
disable_timer();
|
|
|
|
break;
|
|
|
|
case CLOCK_EVT_MODE_ONESHOT:
|
|
|
|
BUG();
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CLOCK_EVT_MODE_RESUME:
|
|
|
|
break;
|
|
|
|
}
|
2006-01-19 09:42:42 +08:00
|
|
|
}
|
|
|
|
|
2007-10-16 16:27:24 +08:00
|
|
|
static struct clock_event_device itimer_clockevent = {
|
|
|
|
.name = "itimer",
|
|
|
|
.rating = 250,
|
|
|
|
.cpumask = CPU_MASK_ALL,
|
|
|
|
.features = CLOCK_EVT_FEAT_PERIODIC,
|
|
|
|
.set_mode = itimer_set_mode,
|
|
|
|
.set_next_event = NULL,
|
|
|
|
.shift = 32,
|
|
|
|
.irq = 0,
|
|
|
|
};
|
|
|
|
|
|
|
|
static irqreturn_t um_timer(int irq, void *dev)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2007-10-16 16:27:24 +08:00
|
|
|
(*itimer_clockevent.event_handler)(&itimer_clockevent);
|
2006-01-19 09:42:42 +08:00
|
|
|
|
2006-06-30 16:55:56 +08:00
|
|
|
return IRQ_HANDLED;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2007-10-16 16:27:25 +08:00
|
|
|
static cycle_t itimer_read(void)
|
|
|
|
{
|
|
|
|
return os_nsecs();
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct clocksource itimer_clocksource = {
|
|
|
|
.name = "itimer",
|
|
|
|
.rating = 300,
|
|
|
|
.read = itimer_read,
|
|
|
|
.mask = CLOCKSOURCE_MASK(64),
|
|
|
|
.mult = 1,
|
|
|
|
.shift = 0,
|
|
|
|
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
|
|
|
};
|
|
|
|
|
2007-10-16 16:27:24 +08:00
|
|
|
static void __init setup_itimer(void)
|
2006-07-10 19:45:05 +08:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = request_irq(TIMER_IRQ, um_timer, IRQF_DISABLED, "timer", NULL);
|
2007-10-16 16:27:00 +08:00
|
|
|
if (err != 0)
|
2006-09-26 14:33:05 +08:00
|
|
|
printk(KERN_ERR "register_timer : request_irq failed - "
|
2006-07-10 19:45:05 +08:00
|
|
|
"errno = %d\n", -err);
|
|
|
|
|
2007-10-16 16:27:24 +08:00
|
|
|
itimer_clockevent.mult = div_sc(HZ, NSEC_PER_SEC, 32);
|
|
|
|
itimer_clockevent.max_delta_ns =
|
|
|
|
clockevent_delta2ns(60 * HZ, &itimer_clockevent);
|
|
|
|
itimer_clockevent.min_delta_ns =
|
|
|
|
clockevent_delta2ns(1, &itimer_clockevent);
|
2007-10-16 16:27:25 +08:00
|
|
|
err = clocksource_register(&itimer_clocksource);
|
|
|
|
if (err) {
|
|
|
|
printk(KERN_ERR "clocksource_register returned %d\n", err);
|
|
|
|
return;
|
|
|
|
}
|
2007-10-16 16:27:24 +08:00
|
|
|
clockevents_register_device(&itimer_clockevent);
|
2006-07-10 19:45:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
extern void (*late_time_init)(void);
|
|
|
|
|
2007-10-16 16:27:24 +08:00
|
|
|
void __init time_init(void)
|
2006-07-10 19:45:05 +08:00
|
|
|
{
|
|
|
|
long long nsecs;
|
|
|
|
|
2007-10-16 16:27:24 +08:00
|
|
|
timer_init();
|
|
|
|
|
2006-07-10 19:45:05 +08:00
|
|
|
nsecs = os_nsecs();
|
|
|
|
set_normalized_timespec(&wall_to_monotonic, -nsecs / BILLION,
|
|
|
|
-nsecs % BILLION);
|
2007-05-07 05:51:51 +08:00
|
|
|
set_normalized_timespec(&xtime, nsecs / BILLION, nsecs % BILLION);
|
2007-10-16 16:27:24 +08:00
|
|
|
late_time_init = setup_itimer;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|