Merge branch 'fixes' of git://git.linaro.org/people/rmk/linux-arm
Pull ARM updates from Russell King. * 'fixes' of git://git.linaro.org/people/rmk/linux-arm: ARM: 7358/1: perf: add PMU hotplug notifier ARM: 7357/1: perf: fix overflow handling for xscale2 PMUs ARM: 7356/1: perf: check that we have an event in the PMU IRQ handlers ARM: 7355/1: perf: clear overflow flag when disabling counter on ARMv7 PMU ARM: 7354/1: perf: limit sample_period to half max_period in non-sampling mode ARM: ecard: ensure fake vma vm_flags is setup ARM: 7346/1: errata: fix PL310 erratum #753970 workaround selection ARM: 7345/1: errata: update workaround for A9 erratum #743622 ARM: 7348/1: arm/spear600: fix one-shot timer ARM: 7339/1: amba/serial.h: Include types.h for resolving dependency of type bool
This commit is contained in:
commit
4f262acfde
|
@ -1280,7 +1280,7 @@ config ARM_ERRATA_743622
|
||||||
depends on CPU_V7
|
depends on CPU_V7
|
||||||
help
|
help
|
||||||
This option enables the workaround for the 743622 Cortex-A9
|
This option enables the workaround for the 743622 Cortex-A9
|
||||||
(r2p0..r2p2) erratum. Under very rare conditions, a faulty
|
(r2p*) erratum. Under very rare conditions, a faulty
|
||||||
optimisation in the Cortex-A9 Store Buffer may lead to data
|
optimisation in the Cortex-A9 Store Buffer may lead to data
|
||||||
corruption. This workaround sets a specific bit in the diagnostic
|
corruption. This workaround sets a specific bit in the diagnostic
|
||||||
register of the Cortex-A9 which disables the Store Buffer
|
register of the Cortex-A9 which disables the Store Buffer
|
||||||
|
|
|
@ -134,7 +134,7 @@ int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type);
|
||||||
|
|
||||||
u64 armpmu_event_update(struct perf_event *event,
|
u64 armpmu_event_update(struct perf_event *event,
|
||||||
struct hw_perf_event *hwc,
|
struct hw_perf_event *hwc,
|
||||||
int idx, int overflow);
|
int idx);
|
||||||
|
|
||||||
int armpmu_event_set_period(struct perf_event *event,
|
int armpmu_event_set_period(struct perf_event *event,
|
||||||
struct hw_perf_event *hwc,
|
struct hw_perf_event *hwc,
|
||||||
|
|
|
@ -242,6 +242,7 @@ static void ecard_init_pgtables(struct mm_struct *mm)
|
||||||
|
|
||||||
memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE));
|
memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE));
|
||||||
|
|
||||||
|
vma.vm_flags = VM_EXEC;
|
||||||
vma.vm_mm = mm;
|
vma.vm_mm = mm;
|
||||||
|
|
||||||
flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE);
|
flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE);
|
||||||
|
|
|
@ -180,7 +180,7 @@ armpmu_event_set_period(struct perf_event *event,
|
||||||
u64
|
u64
|
||||||
armpmu_event_update(struct perf_event *event,
|
armpmu_event_update(struct perf_event *event,
|
||||||
struct hw_perf_event *hwc,
|
struct hw_perf_event *hwc,
|
||||||
int idx, int overflow)
|
int idx)
|
||||||
{
|
{
|
||||||
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
||||||
u64 delta, prev_raw_count, new_raw_count;
|
u64 delta, prev_raw_count, new_raw_count;
|
||||||
|
@ -193,13 +193,7 @@ again:
|
||||||
new_raw_count) != prev_raw_count)
|
new_raw_count) != prev_raw_count)
|
||||||
goto again;
|
goto again;
|
||||||
|
|
||||||
new_raw_count &= armpmu->max_period;
|
delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
|
||||||
prev_raw_count &= armpmu->max_period;
|
|
||||||
|
|
||||||
if (overflow)
|
|
||||||
delta = armpmu->max_period - prev_raw_count + new_raw_count + 1;
|
|
||||||
else
|
|
||||||
delta = new_raw_count - prev_raw_count;
|
|
||||||
|
|
||||||
local64_add(delta, &event->count);
|
local64_add(delta, &event->count);
|
||||||
local64_sub(delta, &hwc->period_left);
|
local64_sub(delta, &hwc->period_left);
|
||||||
|
@ -216,7 +210,7 @@ armpmu_read(struct perf_event *event)
|
||||||
if (hwc->idx < 0)
|
if (hwc->idx < 0)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
armpmu_event_update(event, hwc, hwc->idx, 0);
|
armpmu_event_update(event, hwc, hwc->idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -232,7 +226,7 @@ armpmu_stop(struct perf_event *event, int flags)
|
||||||
if (!(hwc->state & PERF_HES_STOPPED)) {
|
if (!(hwc->state & PERF_HES_STOPPED)) {
|
||||||
armpmu->disable(hwc, hwc->idx);
|
armpmu->disable(hwc, hwc->idx);
|
||||||
barrier(); /* why? */
|
barrier(); /* why? */
|
||||||
armpmu_event_update(event, hwc, hwc->idx, 0);
|
armpmu_event_update(event, hwc, hwc->idx);
|
||||||
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
|
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -518,7 +512,13 @@ __hw_perf_event_init(struct perf_event *event)
|
||||||
hwc->config_base |= (unsigned long)mapping;
|
hwc->config_base |= (unsigned long)mapping;
|
||||||
|
|
||||||
if (!hwc->sample_period) {
|
if (!hwc->sample_period) {
|
||||||
hwc->sample_period = armpmu->max_period;
|
/*
|
||||||
|
* For non-sampling runs, limit the sample_period to half
|
||||||
|
* of the counter width. That way, the new counter value
|
||||||
|
* is far less likely to overtake the previous one unless
|
||||||
|
* you have some serious IRQ latency issues.
|
||||||
|
*/
|
||||||
|
hwc->sample_period = armpmu->max_period >> 1;
|
||||||
hwc->last_period = hwc->sample_period;
|
hwc->last_period = hwc->sample_period;
|
||||||
local64_set(&hwc->period_left, hwc->sample_period);
|
local64_set(&hwc->period_left, hwc->sample_period);
|
||||||
}
|
}
|
||||||
|
@ -679,6 +679,28 @@ static void __init cpu_pmu_init(struct arm_pmu *armpmu)
|
||||||
armpmu->type = ARM_PMU_DEVICE_CPU;
|
armpmu->type = ARM_PMU_DEVICE_CPU;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* PMU hardware loses all context when a CPU goes offline.
|
||||||
|
* When a CPU is hotplugged back in, since some hardware registers are
|
||||||
|
* UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
|
||||||
|
* junk values out of them.
|
||||||
|
*/
|
||||||
|
static int __cpuinit pmu_cpu_notify(struct notifier_block *b,
|
||||||
|
unsigned long action, void *hcpu)
|
||||||
|
{
|
||||||
|
if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
|
||||||
|
return NOTIFY_DONE;
|
||||||
|
|
||||||
|
if (cpu_pmu && cpu_pmu->reset)
|
||||||
|
cpu_pmu->reset(NULL);
|
||||||
|
|
||||||
|
return NOTIFY_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct notifier_block __cpuinitdata pmu_cpu_notifier = {
|
||||||
|
.notifier_call = pmu_cpu_notify,
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* CPU PMU identification and registration.
|
* CPU PMU identification and registration.
|
||||||
*/
|
*/
|
||||||
|
@ -730,6 +752,7 @@ init_hw_perf_events(void)
|
||||||
pr_info("enabled with %s PMU driver, %d counters available\n",
|
pr_info("enabled with %s PMU driver, %d counters available\n",
|
||||||
cpu_pmu->name, cpu_pmu->num_events);
|
cpu_pmu->name, cpu_pmu->num_events);
|
||||||
cpu_pmu_init(cpu_pmu);
|
cpu_pmu_init(cpu_pmu);
|
||||||
|
register_cpu_notifier(&pmu_cpu_notifier);
|
||||||
armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
|
armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
|
||||||
} else {
|
} else {
|
||||||
pr_info("no hardware support available\n");
|
pr_info("no hardware support available\n");
|
||||||
|
|
|
@ -467,23 +467,6 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
|
||||||
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int counter_is_active(unsigned long pmcr, int idx)
|
|
||||||
{
|
|
||||||
unsigned long mask = 0;
|
|
||||||
if (idx == ARMV6_CYCLE_COUNTER)
|
|
||||||
mask = ARMV6_PMCR_CCOUNT_IEN;
|
|
||||||
else if (idx == ARMV6_COUNTER0)
|
|
||||||
mask = ARMV6_PMCR_COUNT0_IEN;
|
|
||||||
else if (idx == ARMV6_COUNTER1)
|
|
||||||
mask = ARMV6_PMCR_COUNT1_IEN;
|
|
||||||
|
|
||||||
if (mask)
|
|
||||||
return pmcr & mask;
|
|
||||||
|
|
||||||
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static irqreturn_t
|
static irqreturn_t
|
||||||
armv6pmu_handle_irq(int irq_num,
|
armv6pmu_handle_irq(int irq_num,
|
||||||
void *dev)
|
void *dev)
|
||||||
|
@ -513,7 +496,8 @@ armv6pmu_handle_irq(int irq_num,
|
||||||
struct perf_event *event = cpuc->events[idx];
|
struct perf_event *event = cpuc->events[idx];
|
||||||
struct hw_perf_event *hwc;
|
struct hw_perf_event *hwc;
|
||||||
|
|
||||||
if (!counter_is_active(pmcr, idx))
|
/* Ignore if we don't have an event. */
|
||||||
|
if (!event)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -524,7 +508,7 @@ armv6pmu_handle_irq(int irq_num,
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
hwc = &event->hw;
|
hwc = &event->hw;
|
||||||
armpmu_event_update(event, hwc, idx, 1);
|
armpmu_event_update(event, hwc, idx);
|
||||||
data.period = event->hw.last_period;
|
data.period = event->hw.last_period;
|
||||||
if (!armpmu_event_set_period(event, hwc, idx))
|
if (!armpmu_event_set_period(event, hwc, idx))
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -809,6 +809,11 @@ static inline int armv7_pmnc_disable_intens(int idx)
|
||||||
|
|
||||||
counter = ARMV7_IDX_TO_COUNTER(idx);
|
counter = ARMV7_IDX_TO_COUNTER(idx);
|
||||||
asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
|
asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
|
||||||
|
isb();
|
||||||
|
/* Clear the overflow flag in case an interrupt is pending. */
|
||||||
|
asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
|
||||||
|
isb();
|
||||||
|
|
||||||
return idx;
|
return idx;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -955,6 +960,10 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
|
||||||
struct perf_event *event = cpuc->events[idx];
|
struct perf_event *event = cpuc->events[idx];
|
||||||
struct hw_perf_event *hwc;
|
struct hw_perf_event *hwc;
|
||||||
|
|
||||||
|
/* Ignore if we don't have an event. */
|
||||||
|
if (!event)
|
||||||
|
continue;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We have a single interrupt for all counters. Check that
|
* We have a single interrupt for all counters. Check that
|
||||||
* each counter has overflowed before we process it.
|
* each counter has overflowed before we process it.
|
||||||
|
@ -963,7 +972,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
hwc = &event->hw;
|
hwc = &event->hw;
|
||||||
armpmu_event_update(event, hwc, idx, 1);
|
armpmu_event_update(event, hwc, idx);
|
||||||
data.period = event->hw.last_period;
|
data.period = event->hw.last_period;
|
||||||
if (!armpmu_event_set_period(event, hwc, idx))
|
if (!armpmu_event_set_period(event, hwc, idx))
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -255,11 +255,14 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
|
||||||
struct perf_event *event = cpuc->events[idx];
|
struct perf_event *event = cpuc->events[idx];
|
||||||
struct hw_perf_event *hwc;
|
struct hw_perf_event *hwc;
|
||||||
|
|
||||||
|
if (!event)
|
||||||
|
continue;
|
||||||
|
|
||||||
if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
|
if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
hwc = &event->hw;
|
hwc = &event->hw;
|
||||||
armpmu_event_update(event, hwc, idx, 1);
|
armpmu_event_update(event, hwc, idx);
|
||||||
data.period = event->hw.last_period;
|
data.period = event->hw.last_period;
|
||||||
if (!armpmu_event_set_period(event, hwc, idx))
|
if (!armpmu_event_set_period(event, hwc, idx))
|
||||||
continue;
|
continue;
|
||||||
|
@ -592,11 +595,14 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
|
||||||
struct perf_event *event = cpuc->events[idx];
|
struct perf_event *event = cpuc->events[idx];
|
||||||
struct hw_perf_event *hwc;
|
struct hw_perf_event *hwc;
|
||||||
|
|
||||||
if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
|
if (!event)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (!xscale2_pmnc_counter_has_overflowed(of_flags, idx))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
hwc = &event->hw;
|
hwc = &event->hw;
|
||||||
armpmu_event_update(event, hwc, idx, 1);
|
armpmu_event_update(event, hwc, idx);
|
||||||
data.period = event->hw.last_period;
|
data.period = event->hw.last_period;
|
||||||
if (!armpmu_event_set_period(event, hwc, idx))
|
if (!armpmu_event_set_period(event, hwc, idx))
|
||||||
continue;
|
continue;
|
||||||
|
@ -663,7 +669,7 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
||||||
static void
|
static void
|
||||||
xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
||||||
{
|
{
|
||||||
unsigned long flags, ien, evtsel;
|
unsigned long flags, ien, evtsel, of_flags;
|
||||||
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
|
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
|
||||||
|
|
||||||
ien = xscale2pmu_read_int_enable();
|
ien = xscale2pmu_read_int_enable();
|
||||||
|
@ -672,26 +678,31 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
||||||
switch (idx) {
|
switch (idx) {
|
||||||
case XSCALE_CYCLE_COUNTER:
|
case XSCALE_CYCLE_COUNTER:
|
||||||
ien &= ~XSCALE2_CCOUNT_INT_EN;
|
ien &= ~XSCALE2_CCOUNT_INT_EN;
|
||||||
|
of_flags = XSCALE2_CCOUNT_OVERFLOW;
|
||||||
break;
|
break;
|
||||||
case XSCALE_COUNTER0:
|
case XSCALE_COUNTER0:
|
||||||
ien &= ~XSCALE2_COUNT0_INT_EN;
|
ien &= ~XSCALE2_COUNT0_INT_EN;
|
||||||
evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
|
evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
|
||||||
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
|
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
|
||||||
|
of_flags = XSCALE2_COUNT0_OVERFLOW;
|
||||||
break;
|
break;
|
||||||
case XSCALE_COUNTER1:
|
case XSCALE_COUNTER1:
|
||||||
ien &= ~XSCALE2_COUNT1_INT_EN;
|
ien &= ~XSCALE2_COUNT1_INT_EN;
|
||||||
evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
|
evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
|
||||||
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
|
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
|
||||||
|
of_flags = XSCALE2_COUNT1_OVERFLOW;
|
||||||
break;
|
break;
|
||||||
case XSCALE_COUNTER2:
|
case XSCALE_COUNTER2:
|
||||||
ien &= ~XSCALE2_COUNT2_INT_EN;
|
ien &= ~XSCALE2_COUNT2_INT_EN;
|
||||||
evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
|
evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
|
||||||
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
|
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
|
||||||
|
of_flags = XSCALE2_COUNT2_OVERFLOW;
|
||||||
break;
|
break;
|
||||||
case XSCALE_COUNTER3:
|
case XSCALE_COUNTER3:
|
||||||
ien &= ~XSCALE2_COUNT3_INT_EN;
|
ien &= ~XSCALE2_COUNT3_INT_EN;
|
||||||
evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
|
evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
|
||||||
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
|
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
|
||||||
|
of_flags = XSCALE2_COUNT3_OVERFLOW;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
|
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
|
||||||
|
@ -701,6 +712,7 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
||||||
raw_spin_lock_irqsave(&events->pmu_lock, flags);
|
raw_spin_lock_irqsave(&events->pmu_lock, flags);
|
||||||
xscale2pmu_write_event_select(evtsel);
|
xscale2pmu_write_event_select(evtsel);
|
||||||
xscale2pmu_write_int_enable(ien);
|
xscale2pmu_write_int_enable(ien);
|
||||||
|
xscale2pmu_write_overflow_flags(of_flags);
|
||||||
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,7 @@ config UX500_SOC_COMMON
|
||||||
default y
|
default y
|
||||||
select ARM_GIC
|
select ARM_GIC
|
||||||
select HAS_MTU
|
select HAS_MTU
|
||||||
select ARM_ERRATA_753970
|
select PL310_ERRATA_753970
|
||||||
select ARM_ERRATA_754322
|
select ARM_ERRATA_754322
|
||||||
select ARM_ERRATA_764369
|
select ARM_ERRATA_764369
|
||||||
|
|
||||||
|
|
|
@ -7,7 +7,7 @@ config ARCH_VEXPRESS_CA9X4
|
||||||
select ARM_GIC
|
select ARM_GIC
|
||||||
select ARM_ERRATA_720789
|
select ARM_ERRATA_720789
|
||||||
select ARM_ERRATA_751472
|
select ARM_ERRATA_751472
|
||||||
select ARM_ERRATA_753970
|
select PL310_ERRATA_753970
|
||||||
select HAVE_SMP
|
select HAVE_SMP
|
||||||
select MIGHT_HAVE_CACHE_L2X0
|
select MIGHT_HAVE_CACHE_L2X0
|
||||||
|
|
||||||
|
|
|
@ -230,9 +230,7 @@ __v7_setup:
|
||||||
mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
|
mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_ARM_ERRATA_743622
|
#ifdef CONFIG_ARM_ERRATA_743622
|
||||||
teq r6, #0x20 @ present in r2p0
|
teq r5, #0x00200000 @ only present in r2p*
|
||||||
teqne r6, #0x21 @ present in r2p1
|
|
||||||
teqne r6, #0x22 @ present in r2p2
|
|
||||||
mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register
|
mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register
|
||||||
orreq r10, r10, #1 << 6 @ set bit #6
|
orreq r10, r10, #1 << 6 @ set bit #6
|
||||||
mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
|
mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
|
||||||
|
|
|
@ -145,11 +145,13 @@ static void clockevent_set_mode(enum clock_event_mode mode,
|
||||||
static int clockevent_next_event(unsigned long cycles,
|
static int clockevent_next_event(unsigned long cycles,
|
||||||
struct clock_event_device *clk_event_dev)
|
struct clock_event_device *clk_event_dev)
|
||||||
{
|
{
|
||||||
u16 val;
|
u16 val = readw(gpt_base + CR(CLKEVT));
|
||||||
|
|
||||||
|
if (val & CTRL_ENABLE)
|
||||||
|
writew(val & ~CTRL_ENABLE, gpt_base + CR(CLKEVT));
|
||||||
|
|
||||||
writew(cycles, gpt_base + LOAD(CLKEVT));
|
writew(cycles, gpt_base + LOAD(CLKEVT));
|
||||||
|
|
||||||
val = readw(gpt_base + CR(CLKEVT));
|
|
||||||
val |= CTRL_ENABLE | CTRL_INT_ENABLE;
|
val |= CTRL_ENABLE | CTRL_INT_ENABLE;
|
||||||
writew(val, gpt_base + CR(CLKEVT));
|
writew(val, gpt_base + CR(CLKEVT));
|
||||||
|
|
||||||
|
|
|
@ -23,6 +23,8 @@
|
||||||
#ifndef ASM_ARM_HARDWARE_SERIAL_AMBA_H
|
#ifndef ASM_ARM_HARDWARE_SERIAL_AMBA_H
|
||||||
#define ASM_ARM_HARDWARE_SERIAL_AMBA_H
|
#define ASM_ARM_HARDWARE_SERIAL_AMBA_H
|
||||||
|
|
||||||
|
#include <linux/types.h>
|
||||||
|
|
||||||
/* -------------------------------------------------------------------------------
|
/* -------------------------------------------------------------------------------
|
||||||
* From AMBA UART (PL010) Block Specification
|
* From AMBA UART (PL010) Block Specification
|
||||||
* -------------------------------------------------------------------------------
|
* -------------------------------------------------------------------------------
|
||||||
|
|
Loading…
Reference in New Issue