2014-07-08 06:54:13 +08:00
|
|
|
/*
|
2011-02-14 15:33:10 +08:00
|
|
|
* Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
|
|
|
|
* http://www.samsung.com
|
2010-07-26 20:08:52 +08:00
|
|
|
*
|
|
|
|
* Cloned from linux/arch/arm/mach-vexpress/platsmp.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 2002 ARM Ltd.
|
|
|
|
* All Rights Reserved
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/jiffies.h>
|
|
|
|
#include <linux/smp.h>
|
|
|
|
#include <linux/io.h>
|
2014-05-13 06:13:44 +08:00
|
|
|
#include <linux/of_address.h>
|
2010-07-26 20:08:52 +08:00
|
|
|
|
|
|
|
#include <asm/cacheflush.h>
|
2014-09-14 01:49:31 +08:00
|
|
|
#include <asm/cp15.h>
|
2012-01-20 19:01:12 +08:00
|
|
|
#include <asm/smp_plat.h>
|
2010-07-26 20:08:52 +08:00
|
|
|
#include <asm/smp_scu.h>
|
2012-12-11 12:58:43 +08:00
|
|
|
#include <asm/firmware.h>
|
2010-07-26 20:08:52 +08:00
|
|
|
|
2014-07-19 02:43:22 +08:00
|
|
|
#include <mach/map.h>
|
|
|
|
|
2011-09-08 20:15:22 +08:00
|
|
|
#include "common.h"
|
2013-12-19 03:06:56 +08:00
|
|
|
#include "regs-pmu.h"
|
2011-09-08 20:15:22 +08:00
|
|
|
|
2011-02-14 15:33:10 +08:00
|
|
|
extern void exynos4_secondary_startup(void);
|
2010-07-26 20:08:52 +08:00
|
|
|
|
2014-09-14 01:49:31 +08:00
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
2014-09-14 01:49:32 +08:00
|
|
|
static inline void cpu_leave_lowpower(u32 core_id)
|
2014-09-14 01:49:31 +08:00
|
|
|
{
|
|
|
|
unsigned int v;
|
|
|
|
|
|
|
|
asm volatile(
|
|
|
|
"mrc p15, 0, %0, c1, c0, 0\n"
|
|
|
|
" orr %0, %0, %1\n"
|
|
|
|
" mcr p15, 0, %0, c1, c0, 0\n"
|
|
|
|
" mrc p15, 0, %0, c1, c0, 1\n"
|
|
|
|
" orr %0, %0, %2\n"
|
|
|
|
" mcr p15, 0, %0, c1, c0, 1\n"
|
|
|
|
: "=&r" (v)
|
|
|
|
: "Ir" (CR_C), "Ir" (0x40)
|
|
|
|
: "cc");
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
|
|
|
|
{
|
|
|
|
u32 mpidr = cpu_logical_map(cpu);
|
|
|
|
u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
|
|
|
|
/* Turn the CPU off on next WFI instruction. */
|
|
|
|
exynos_cpu_power_down(core_id);
|
|
|
|
|
|
|
|
wfi();
|
|
|
|
|
|
|
|
if (pen_release == core_id) {
|
|
|
|
/*
|
|
|
|
* OK, proper wakeup, we're done
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Getting here, means that we have come out of WFI without
|
|
|
|
* having been woken up - this shouldn't happen
|
|
|
|
*
|
|
|
|
* Just note it happening - when we're woken, we can report
|
|
|
|
* its occurrence.
|
|
|
|
*/
|
|
|
|
(*spurious)++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_HOTPLUG_CPU */
|
|
|
|
|
2014-07-19 03:45:02 +08:00
|
|
|
/**
|
|
|
|
* exynos_core_power_down : power down the specified cpu
|
|
|
|
* @cpu : the cpu to power down
|
|
|
|
*
|
|
|
|
* Power down the specified cpu. The sequence must be finished by a
|
|
|
|
* call to cpu_do_idle()
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
void exynos_cpu_power_down(int cpu)
|
|
|
|
{
|
2015-03-27 01:32:56 +08:00
|
|
|
u32 core_conf;
|
|
|
|
|
2015-02-27 04:50:41 +08:00
|
|
|
if (cpu == 0 && (soc_is_exynos5420() || soc_is_exynos5800())) {
|
2014-11-07 08:20:16 +08:00
|
|
|
/*
|
|
|
|
* Bypass power down for CPU0 during suspend. Check for
|
|
|
|
* the SYS_PWR_REG value to decide if we are suspending
|
|
|
|
* the system.
|
|
|
|
*/
|
|
|
|
int val = pmu_raw_readl(EXYNOS5_ARM_CORE0_SYS_PWR_REG);
|
|
|
|
|
|
|
|
if (!(val & S5P_CORE_LOCAL_PWR_EN))
|
|
|
|
return;
|
|
|
|
}
|
2015-03-27 01:32:56 +08:00
|
|
|
|
|
|
|
core_conf = pmu_raw_readl(EXYNOS_ARM_CORE_CONFIGURATION(cpu));
|
|
|
|
core_conf &= ~S5P_CORE_LOCAL_PWR_EN;
|
|
|
|
pmu_raw_writel(core_conf, EXYNOS_ARM_CORE_CONFIGURATION(cpu));
|
2014-07-19 03:45:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* exynos_cpu_power_up : power up the specified cpu
|
|
|
|
* @cpu : the cpu to power up
|
|
|
|
*
|
|
|
|
* Power up the specified cpu
|
|
|
|
*/
|
|
|
|
void exynos_cpu_power_up(int cpu)
|
|
|
|
{
|
2015-03-27 01:32:56 +08:00
|
|
|
u32 core_conf = S5P_CORE_LOCAL_PWR_EN;
|
|
|
|
|
|
|
|
if (soc_is_exynos3250())
|
|
|
|
core_conf |= S5P_CORE_AUTOWAKEUP_EN;
|
|
|
|
|
|
|
|
pmu_raw_writel(core_conf,
|
2014-07-26 23:54:21 +08:00
|
|
|
EXYNOS_ARM_CORE_CONFIGURATION(cpu));
|
2014-07-19 03:45:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* exynos_cpu_power_state : returns the power state of the cpu
|
|
|
|
* @cpu : the cpu to retrieve the power state from
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
int exynos_cpu_power_state(int cpu)
|
|
|
|
{
|
2014-07-26 23:54:21 +08:00
|
|
|
return (pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(cpu)) &
|
2014-07-19 03:45:02 +08:00
|
|
|
S5P_CORE_LOCAL_PWR_EN);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* exynos_cluster_power_down : power down the specified cluster
|
|
|
|
* @cluster : the cluster to power down
|
|
|
|
*/
|
|
|
|
void exynos_cluster_power_down(int cluster)
|
|
|
|
{
|
2014-07-26 23:54:21 +08:00
|
|
|
pmu_raw_writel(0, EXYNOS_COMMON_CONFIGURATION(cluster));
|
2014-07-19 03:45:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* exynos_cluster_power_up : power up the specified cluster
|
|
|
|
* @cluster : the cluster to power up
|
|
|
|
*/
|
|
|
|
void exynos_cluster_power_up(int cluster)
|
|
|
|
{
|
2014-07-26 23:54:21 +08:00
|
|
|
pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN,
|
|
|
|
EXYNOS_COMMON_CONFIGURATION(cluster));
|
2014-07-19 03:45:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* exynos_cluster_power_state : returns the power state of the cluster
|
|
|
|
* @cluster : the cluster to retrieve the power state from
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
int exynos_cluster_power_state(int cluster)
|
|
|
|
{
|
2014-07-26 23:54:21 +08:00
|
|
|
return (pmu_raw_readl(EXYNOS_COMMON_STATUS(cluster)) &
|
|
|
|
S5P_CORE_LOCAL_PWR_EN);
|
2014-07-19 03:45:02 +08:00
|
|
|
}
|
|
|
|
|
2015-03-18 21:09:57 +08:00
|
|
|
static void __iomem *cpu_boot_reg_base(void)
|
2012-11-24 10:13:48 +08:00
|
|
|
{
|
|
|
|
if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_1_1)
|
2014-07-19 02:43:22 +08:00
|
|
|
return pmu_base_addr + S5P_INFORM5;
|
2014-05-13 06:13:44 +08:00
|
|
|
return sysram_base_addr;
|
2012-11-24 10:13:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __iomem *cpu_boot_reg(int cpu)
|
|
|
|
{
|
|
|
|
void __iomem *boot_reg;
|
|
|
|
|
|
|
|
boot_reg = cpu_boot_reg_base();
|
2014-05-13 06:13:44 +08:00
|
|
|
if (!boot_reg)
|
|
|
|
return ERR_PTR(-ENODEV);
|
2012-11-24 10:13:48 +08:00
|
|
|
if (soc_is_exynos4412())
|
|
|
|
boot_reg += 4*cpu;
|
2014-05-26 03:16:11 +08:00
|
|
|
else if (soc_is_exynos5420() || soc_is_exynos5800())
|
2013-06-18 23:29:35 +08:00
|
|
|
boot_reg += 4;
|
2012-11-24 10:13:48 +08:00
|
|
|
return boot_reg;
|
|
|
|
}
|
2011-07-16 12:39:09 +08:00
|
|
|
|
2014-09-25 17:15:13 +08:00
|
|
|
/*
|
|
|
|
* Set wake up by local power mode and execute software reset for given core.
|
|
|
|
*
|
|
|
|
* Currently this is needed only when booting secondary CPU on Exynos3250.
|
|
|
|
*/
|
2015-03-18 21:09:57 +08:00
|
|
|
void exynos_core_restart(u32 core_id)
|
2014-09-25 17:15:13 +08:00
|
|
|
{
|
|
|
|
u32 val;
|
|
|
|
|
|
|
|
if (!of_machine_is_compatible("samsung,exynos3250"))
|
|
|
|
return;
|
|
|
|
|
2015-03-27 01:32:56 +08:00
|
|
|
while (!pmu_raw_readl(S5P_PMU_SPARE2))
|
|
|
|
udelay(10);
|
|
|
|
udelay(10);
|
|
|
|
|
2014-09-25 17:15:13 +08:00
|
|
|
val = pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(core_id));
|
|
|
|
val |= S5P_CORE_WAKEUP_FROM_LOCAL_CFG;
|
|
|
|
pmu_raw_writel(val, EXYNOS_ARM_CORE_STATUS(core_id));
|
|
|
|
|
|
|
|
pmu_raw_writel(EXYNOS_CORE_PO_RESET(core_id), EXYNOS_SWRESET);
|
|
|
|
}
|
|
|
|
|
ARM: Fix subtle race in CPU pen_release hotplug code
There is a subtle race in the CPU hotplug code, where a CPU which has
been offlined can online itself before being requested, which results
in things going astray on the next online/offline cycle.
What happens in the normal online/offline/online cycle is:
CPU0 CPU3
requests boot of CPU3
pen_release = 3
flush cache line
checks pen_release, reads 3
starts boot
pen_release = -1
... requests CPU3 offline ...
... dies ...
checks pen_release, reads -1
requests boot of CPU3
pen_release = 3
flush cache line
checks pen_release, reads 3
starts boot
pen_release = -1
However, as the write of -1 of pen_release is not fully flushed back to
memory, and the checking of pen_release is done with caches disabled,
this allows CPU3 the opportunity to read the old value of pen_release:
CPU0 CPU3
requests boot of CPU3
pen_release = 3
flush cache line
checks pen_release, reads 3
starts boot
pen_release = -1
... requests CPU3 offline ...
... dies ...
checks pen_release, reads 3
starts boot
pen_release = -1
requests boot of CPU3
pen_release = 3
flush cache line
Fix this by grouping the write of pen_release along with its cache line
flushing code to ensure that any update to pen_release is always pushed
out to physical memory.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-18 18:53:12 +08:00
|
|
|
/*
|
|
|
|
* Write pen_release in a way that is guaranteed to be visible to all
|
|
|
|
* observers, irrespective of whether they're taking part in coherency
|
|
|
|
* or not. This is necessary for the hotplug code to work reliably.
|
|
|
|
*/
|
|
|
|
static void write_pen_release(int val)
|
|
|
|
{
|
|
|
|
pen_release = val;
|
|
|
|
smp_wmb();
|
2013-12-06 03:26:16 +08:00
|
|
|
sync_cache_w(&pen_release);
|
ARM: Fix subtle race in CPU pen_release hotplug code
There is a subtle race in the CPU hotplug code, where a CPU which has
been offlined can online itself before being requested, which results
in things going astray on the next online/offline cycle.
What happens in the normal online/offline/online cycle is:
CPU0 CPU3
requests boot of CPU3
pen_release = 3
flush cache line
checks pen_release, reads 3
starts boot
pen_release = -1
... requests CPU3 offline ...
... dies ...
checks pen_release, reads -1
requests boot of CPU3
pen_release = 3
flush cache line
checks pen_release, reads 3
starts boot
pen_release = -1
However, as the write of -1 of pen_release is not fully flushed back to
memory, and the checking of pen_release is done with caches disabled,
this allows CPU3 the opportunity to read the old value of pen_release:
CPU0 CPU3
requests boot of CPU3
pen_release = 3
flush cache line
checks pen_release, reads 3
starts boot
pen_release = -1
... requests CPU3 offline ...
... dies ...
checks pen_release, reads 3
starts boot
pen_release = -1
requests boot of CPU3
pen_release = 3
flush cache line
Fix this by grouping the write of pen_release along with its cache line
flushing code to ensure that any update to pen_release is always pushed
out to physical memory.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-18 18:53:12 +08:00
|
|
|
}
|
|
|
|
|
2010-07-26 20:08:52 +08:00
|
|
|
static void __iomem *scu_base_addr(void)
|
|
|
|
{
|
|
|
|
return (void __iomem *)(S5P_VA_SCU);
|
|
|
|
}
|
|
|
|
|
|
|
|
static DEFINE_SPINLOCK(boot_lock);
|
|
|
|
|
2013-06-18 03:43:14 +08:00
|
|
|
static void exynos_secondary_init(unsigned int cpu)
|
2010-07-26 20:08:52 +08:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* let the primary processor know we're out of the
|
|
|
|
* pen, then head off into the C entry point
|
|
|
|
*/
|
ARM: Fix subtle race in CPU pen_release hotplug code
There is a subtle race in the CPU hotplug code, where a CPU which has
been offlined can online itself before being requested, which results
in things going astray on the next online/offline cycle.
What happens in the normal online/offline/online cycle is:
CPU0 CPU3
requests boot of CPU3
pen_release = 3
flush cache line
checks pen_release, reads 3
starts boot
pen_release = -1
... requests CPU3 offline ...
... dies ...
checks pen_release, reads -1
requests boot of CPU3
pen_release = 3
flush cache line
checks pen_release, reads 3
starts boot
pen_release = -1
However, as the write of -1 of pen_release is not fully flushed back to
memory, and the checking of pen_release is done with caches disabled,
this allows CPU3 the opportunity to read the old value of pen_release:
CPU0 CPU3
requests boot of CPU3
pen_release = 3
flush cache line
checks pen_release, reads 3
starts boot
pen_release = -1
... requests CPU3 offline ...
... dies ...
checks pen_release, reads 3
starts boot
pen_release = -1
requests boot of CPU3
pen_release = 3
flush cache line
Fix this by grouping the write of pen_release along with its cache line
flushing code to ensure that any update to pen_release is always pushed
out to physical memory.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-12-18 18:53:12 +08:00
|
|
|
write_pen_release(-1);
|
2010-07-26 20:08:52 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Synchronise with the boot thread.
|
|
|
|
*/
|
|
|
|
spin_lock(&boot_lock);
|
|
|
|
spin_unlock(&boot_lock);
|
|
|
|
}
|
|
|
|
|
2015-03-18 21:09:57 +08:00
|
|
|
int exynos_set_boot_addr(u32 core_id, unsigned long boot_addr)
|
2015-03-18 21:09:55 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Try to set boot address using firmware first
|
|
|
|
* and fall back to boot register if it fails.
|
|
|
|
*/
|
|
|
|
ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
|
|
|
|
if (ret && ret != -ENOSYS)
|
|
|
|
goto fail;
|
|
|
|
if (ret == -ENOSYS) {
|
|
|
|
void __iomem *boot_reg = cpu_boot_reg(core_id);
|
|
|
|
|
|
|
|
if (IS_ERR(boot_reg)) {
|
|
|
|
ret = PTR_ERR(boot_reg);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
__raw_writel(boot_addr, boot_reg);
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
fail:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-03-18 21:09:57 +08:00
|
|
|
int exynos_get_boot_addr(u32 core_id, unsigned long *boot_addr)
|
2015-03-18 21:09:56 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Try to get boot address using firmware first
|
|
|
|
* and fall back to boot register if it fails.
|
|
|
|
*/
|
|
|
|
ret = call_firmware_op(get_cpu_boot_addr, core_id, boot_addr);
|
|
|
|
if (ret && ret != -ENOSYS)
|
|
|
|
goto fail;
|
|
|
|
if (ret == -ENOSYS) {
|
|
|
|
void __iomem *boot_reg = cpu_boot_reg(core_id);
|
|
|
|
|
|
|
|
if (IS_ERR(boot_reg)) {
|
|
|
|
ret = PTR_ERR(boot_reg);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
*boot_addr = __raw_readl(boot_reg);
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
fail:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-06-18 03:43:14 +08:00
|
|
|
static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
2010-07-26 20:08:52 +08:00
|
|
|
{
|
|
|
|
unsigned long timeout;
|
2014-07-16 01:59:18 +08:00
|
|
|
u32 mpidr = cpu_logical_map(cpu);
|
|
|
|
u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
|
2014-05-13 06:13:44 +08:00
|
|
|
int ret = -ENOSYS;
|
2010-07-26 20:08:52 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Set synchronisation state between this boot processor
|
|
|
|
* and the secondary one
|
|
|
|
*/
|
|
|
|
spin_lock(&boot_lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The secondary processor is waiting to be released from
|
|
|
|
* the holding pen - release it, then wait for it to flag
|
|
|
|
* that it has been released by resetting pen_release.
|
|
|
|
*
|
2014-07-16 01:59:18 +08:00
|
|
|
* Note that "pen_release" is the hardware CPU core ID, whereas
|
2010-07-26 20:08:52 +08:00
|
|
|
* "cpu" is Linux's internal ID.
|
|
|
|
*/
|
2014-07-16 01:59:18 +08:00
|
|
|
write_pen_release(core_id);
|
2010-07-26 20:08:52 +08:00
|
|
|
|
2014-07-16 01:59:18 +08:00
|
|
|
if (!exynos_cpu_power_state(core_id)) {
|
|
|
|
exynos_cpu_power_up(core_id);
|
2011-07-16 12:39:09 +08:00
|
|
|
timeout = 10;
|
|
|
|
|
|
|
|
/* wait max 10 ms until cpu1 is on */
|
2014-07-16 01:59:18 +08:00
|
|
|
while (exynos_cpu_power_state(core_id)
|
|
|
|
!= S5P_CORE_LOCAL_PWR_EN) {
|
2011-07-16 12:39:09 +08:00
|
|
|
if (timeout-- == 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
mdelay(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (timeout == 0) {
|
|
|
|
printk(KERN_ERR "cpu1 power enable failed");
|
|
|
|
spin_unlock(&boot_lock);
|
|
|
|
return -ETIMEDOUT;
|
|
|
|
}
|
|
|
|
}
|
2014-09-25 17:15:13 +08:00
|
|
|
|
|
|
|
exynos_core_restart(core_id);
|
|
|
|
|
2010-07-26 20:08:52 +08:00
|
|
|
/*
|
|
|
|
* Send the secondary CPU a soft interrupt, thereby causing
|
|
|
|
* the boot monitor to read the system wide flags register,
|
|
|
|
* and branch to the address found there.
|
|
|
|
*/
|
|
|
|
|
|
|
|
timeout = jiffies + (1 * HZ);
|
|
|
|
while (time_before(jiffies, timeout)) {
|
2012-12-11 12:58:43 +08:00
|
|
|
unsigned long boot_addr;
|
|
|
|
|
2010-07-26 20:08:52 +08:00
|
|
|
smp_rmb();
|
2011-07-16 12:39:09 +08:00
|
|
|
|
2012-12-11 12:58:43 +08:00
|
|
|
boot_addr = virt_to_phys(exynos4_secondary_startup);
|
|
|
|
|
2015-03-18 21:09:55 +08:00
|
|
|
ret = exynos_set_boot_addr(core_id, boot_addr);
|
|
|
|
if (ret)
|
2014-05-13 06:13:44 +08:00
|
|
|
goto fail;
|
2012-12-11 12:58:43 +08:00
|
|
|
|
2014-07-16 01:59:18 +08:00
|
|
|
call_firmware_op(cpu_boot, core_id);
|
2012-12-11 12:58:43 +08:00
|
|
|
|
2015-03-27 01:32:56 +08:00
|
|
|
if (soc_is_exynos3250())
|
|
|
|
dsb_sev();
|
|
|
|
else
|
|
|
|
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
|
2011-07-16 12:39:09 +08:00
|
|
|
|
2010-07-26 20:08:52 +08:00
|
|
|
if (pen_release == -1)
|
|
|
|
break;
|
|
|
|
|
|
|
|
udelay(10);
|
|
|
|
}
|
|
|
|
|
2015-03-18 21:09:53 +08:00
|
|
|
if (pen_release != -1)
|
|
|
|
ret = -ETIMEDOUT;
|
|
|
|
|
2010-07-26 20:08:52 +08:00
|
|
|
/*
|
|
|
|
* now the secondary core is starting up let it run its
|
|
|
|
* calibrations, then wait for it to finish
|
|
|
|
*/
|
2014-05-13 06:13:44 +08:00
|
|
|
fail:
|
2010-07-26 20:08:52 +08:00
|
|
|
spin_unlock(&boot_lock);
|
|
|
|
|
2014-05-13 06:13:44 +08:00
|
|
|
return pen_release != -1 ? ret : 0;
|
2010-07-26 20:08:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialise the CPU possible map early - this describes the CPUs
|
|
|
|
* which may be present or become present in the system.
|
|
|
|
*/
|
|
|
|
|
2011-09-08 20:15:22 +08:00
|
|
|
static void __init exynos_smp_init_cpus(void)
|
2010-07-26 20:08:52 +08:00
|
|
|
{
|
|
|
|
void __iomem *scu_base = scu_base_addr();
|
|
|
|
unsigned int i, ncores;
|
|
|
|
|
2014-06-25 02:43:15 +08:00
|
|
|
if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
|
2012-01-25 14:35:57 +08:00
|
|
|
ncores = scu_base ? scu_get_core_count(scu_base) : 1;
|
2013-06-18 23:29:34 +08:00
|
|
|
else
|
|
|
|
/*
|
|
|
|
* CPU Nodes are passed thru DT and set_cpu_possible
|
|
|
|
* is set by "arm_dt_init_cpu_maps".
|
|
|
|
*/
|
|
|
|
return;
|
2010-07-26 20:08:52 +08:00
|
|
|
|
|
|
|
/* sanity check */
|
2011-10-21 05:04:18 +08:00
|
|
|
if (ncores > nr_cpu_ids) {
|
|
|
|
pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
|
|
|
|
ncores, nr_cpu_ids);
|
|
|
|
ncores = nr_cpu_ids;
|
2010-07-26 20:08:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < ncores; i++)
|
|
|
|
set_cpu_possible(i, true);
|
|
|
|
}
|
|
|
|
|
2011-09-08 20:15:22 +08:00
|
|
|
static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
|
2010-07-26 20:08:52 +08:00
|
|
|
{
|
2012-11-24 10:13:48 +08:00
|
|
|
int i;
|
|
|
|
|
2014-06-03 12:47:46 +08:00
|
|
|
exynos_sysram_init();
|
|
|
|
|
2015-03-11 18:13:57 +08:00
|
|
|
exynos_set_delayed_reset_assertion(true);
|
|
|
|
|
2014-06-25 02:43:15 +08:00
|
|
|
if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
|
2012-01-25 14:35:57 +08:00
|
|
|
scu_enable(scu_base_addr());
|
2010-12-03 19:09:48 +08:00
|
|
|
|
2010-07-26 20:08:52 +08:00
|
|
|
/*
|
2010-12-03 19:09:48 +08:00
|
|
|
* Write the address of secondary startup into the
|
|
|
|
* system-wide flags register. The boot monitor waits
|
|
|
|
* until it receives a soft interrupt, and then the
|
|
|
|
* secondary CPU branches to this address.
|
2012-12-11 12:58:43 +08:00
|
|
|
*
|
|
|
|
* Try using firmware operation first and fall back to
|
|
|
|
* boot register if it fails.
|
2010-07-26 20:08:52 +08:00
|
|
|
*/
|
2012-12-11 12:58:43 +08:00
|
|
|
for (i = 1; i < max_cpus; ++i) {
|
|
|
|
unsigned long boot_addr;
|
2014-07-16 01:59:18 +08:00
|
|
|
u32 mpidr;
|
|
|
|
u32 core_id;
|
2014-05-13 06:13:44 +08:00
|
|
|
int ret;
|
2012-12-11 12:58:43 +08:00
|
|
|
|
2014-07-16 01:59:18 +08:00
|
|
|
mpidr = cpu_logical_map(i);
|
|
|
|
core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
|
2012-12-11 12:58:43 +08:00
|
|
|
boot_addr = virt_to_phys(exynos4_secondary_startup);
|
|
|
|
|
2015-03-18 21:09:55 +08:00
|
|
|
ret = exynos_set_boot_addr(core_id, boot_addr);
|
|
|
|
if (ret)
|
2014-05-13 06:13:44 +08:00
|
|
|
break;
|
2012-12-11 12:58:43 +08:00
|
|
|
}
|
2010-07-26 20:08:52 +08:00
|
|
|
}
|
2011-09-08 20:15:22 +08:00
|
|
|
|
2014-09-14 01:49:31 +08:00
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
|
/*
|
|
|
|
* platform-specific code to shutdown a CPU
|
|
|
|
*
|
|
|
|
* Called with IRQs disabled
|
|
|
|
*/
|
2014-09-14 01:49:32 +08:00
|
|
|
static void exynos_cpu_die(unsigned int cpu)
|
2014-09-14 01:49:31 +08:00
|
|
|
{
|
|
|
|
int spurious = 0;
|
2014-09-14 01:49:32 +08:00
|
|
|
u32 mpidr = cpu_logical_map(cpu);
|
|
|
|
u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
|
2014-09-14 01:49:31 +08:00
|
|
|
|
|
|
|
v7_exit_coherency_flush(louis);
|
|
|
|
|
|
|
|
platform_do_lowpower(cpu, &spurious);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* bring this CPU back into the world of cache
|
|
|
|
* coherency, and then restore interrupts
|
|
|
|
*/
|
2014-09-14 01:49:32 +08:00
|
|
|
cpu_leave_lowpower(core_id);
|
2014-09-14 01:49:31 +08:00
|
|
|
|
|
|
|
if (spurious)
|
|
|
|
pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_HOTPLUG_CPU */
|
|
|
|
|
2011-09-08 20:15:22 +08:00
|
|
|
struct smp_operations exynos_smp_ops __initdata = {
|
|
|
|
.smp_init_cpus = exynos_smp_init_cpus,
|
|
|
|
.smp_prepare_cpus = exynos_smp_prepare_cpus,
|
|
|
|
.smp_secondary_init = exynos_secondary_init,
|
|
|
|
.smp_boot_secondary = exynos_boot_secondary,
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
|
.cpu_die = exynos_cpu_die,
|
|
|
|
#endif
|
|
|
|
};
|