2019-05-29 01:10:04 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2013-01-04 17:32:22 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2012, NVIDIA Corporation. All rights reserved.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/init.h>
|
2014-07-11 15:44:49 +08:00
|
|
|
#include <linux/linkage.h>
|
2013-01-04 17:32:22 +08:00
|
|
|
|
2017-03-28 20:42:54 +08:00
|
|
|
#include <soc/tegra/flowctrl.h>
|
2014-07-11 15:52:41 +08:00
|
|
|
#include <soc/tegra/fuse.h>
|
|
|
|
|
2019-03-18 06:52:10 +08:00
|
|
|
#include <asm/assembler.h>
|
2013-01-04 17:32:22 +08:00
|
|
|
#include <asm/asm-offsets.h>
|
2014-07-11 15:44:49 +08:00
|
|
|
#include <asm/cache.h>
|
2013-01-04 17:32:22 +08:00
|
|
|
|
|
|
|
#include "iomap.h"
|
|
|
|
#include "reset.h"
|
|
|
|
#include "sleep.h"
|
|
|
|
|
|
|
|
#define PMC_SCRATCH41 0x140
|
|
|
|
|
|
|
|
#ifdef CONFIG_PM_SLEEP
|
|
|
|
/*
|
|
|
|
* tegra_resume
|
|
|
|
*
|
|
|
|
* CPU boot vector when restarting the a CPU following
|
|
|
|
* an LP2 transition. Also branched to by LP0 and LP1 resume after
|
|
|
|
* re-enabling sdram.
|
2013-05-20 18:39:29 +08:00
|
|
|
*
|
|
|
|
* r6: SoC ID
|
2013-07-03 17:50:37 +08:00
|
|
|
* r8: CPU part number
|
2013-01-04 17:32:22 +08:00
|
|
|
*/
|
|
|
|
ENTRY(tegra_resume)
|
2013-07-03 17:50:37 +08:00
|
|
|
check_cpu_part_num 0xc09, r8, r9
|
|
|
|
bleq v7_invalidate_l1
|
2013-01-04 17:32:22 +08:00
|
|
|
|
|
|
|
cpu_id r0
|
|
|
|
cmp r0, #0 @ CPU0?
|
2013-04-16 06:50:54 +08:00
|
|
|
THUMB( it ne )
|
2013-01-04 17:32:22 +08:00
|
|
|
bne cpu_resume @ no
|
|
|
|
|
2014-10-10 21:24:47 +08:00
|
|
|
tegra_get_soc_id TEGRA_APB_MISC_BASE, r6
|
2013-01-04 17:32:22 +08:00
|
|
|
/* Are we on Tegra20? */
|
2013-05-20 18:39:24 +08:00
|
|
|
cmp r6, #TEGRA20
|
2013-01-04 17:32:22 +08:00
|
|
|
beq 1f @ Yes
|
|
|
|
/* Clear the flow controller flags for this CPU. */
|
2024-06-11 20:08:33 +08:00
|
|
|
cpu_to_csr_reg r3, r0
|
2013-05-20 18:39:26 +08:00
|
|
|
mov32 r2, TEGRA_FLOW_CTRL_BASE
|
2024-06-11 20:08:33 +08:00
|
|
|
ldr r1, [r2, r3]
|
2013-01-04 17:32:22 +08:00
|
|
|
/* Clear event & intr flag */
|
|
|
|
orr r1, r1, \
|
|
|
|
#FLOW_CTRL_CSR_INTR_FLAG | FLOW_CTRL_CSR_EVENT_FLAG
|
2013-05-20 18:39:26 +08:00
|
|
|
movw r0, #0x3FFD @ enable, cluster_switch, immed, bitmaps
|
|
|
|
@ & ext flags for CPU power mgnt
|
2013-01-04 17:32:22 +08:00
|
|
|
bic r1, r1, r0
|
2024-06-11 20:08:33 +08:00
|
|
|
str r1, [r2, r3]
|
2013-01-04 17:32:22 +08:00
|
|
|
1:
|
|
|
|
|
2013-07-03 17:50:37 +08:00
|
|
|
mov32 r9, 0xc09
|
|
|
|
cmp r8, r9
|
2013-10-11 17:57:31 +08:00
|
|
|
bne end_ca9_scu_l2_resume
|
2013-01-04 17:32:22 +08:00
|
|
|
#ifdef CONFIG_HAVE_ARM_SCU
|
|
|
|
/* enable SCU */
|
|
|
|
mov32 r0, TEGRA_ARM_PERIF_BASE
|
|
|
|
ldr r1, [r0]
|
|
|
|
orr r1, r1, #1
|
|
|
|
str r1, [r0]
|
|
|
|
#endif
|
2019-03-18 06:52:10 +08:00
|
|
|
bl tegra_resume_trusted_foundations
|
2013-01-04 17:32:22 +08:00
|
|
|
|
2014-04-05 18:50:38 +08:00
|
|
|
#ifdef CONFIG_CACHE_L2X0
|
2013-01-04 17:32:22 +08:00
|
|
|
/* L2 cache resume & re-enable */
|
2014-04-05 18:50:38 +08:00
|
|
|
bl l2c310_early_resume
|
|
|
|
#endif
|
2013-10-11 17:57:31 +08:00
|
|
|
end_ca9_scu_l2_resume:
|
|
|
|
mov32 r9, 0xc0f
|
|
|
|
cmp r8, r9
|
|
|
|
bleq tegra_init_l2_for_a15
|
2013-01-04 17:32:22 +08:00
|
|
|
|
|
|
|
b cpu_resume
|
|
|
|
ENDPROC(tegra_resume)
|
2019-03-18 06:52:10 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* tegra_resume_trusted_foundations
|
|
|
|
*
|
|
|
|
* Trusted Foundations firmware initialization.
|
|
|
|
*
|
|
|
|
* Doesn't return if firmware presents.
|
|
|
|
* Corrupted registers: r1, r2
|
|
|
|
*/
|
|
|
|
ENTRY(tegra_resume_trusted_foundations)
|
|
|
|
/* Check whether Trusted Foundations firmware presents. */
|
|
|
|
mov32 r2, TEGRA_IRAM_BASE + TEGRA_IRAM_RESET_HANDLER_OFFSET
|
|
|
|
ldr r1, =__tegra_cpu_reset_handler_data_offset + \
|
|
|
|
RESET_DATA(TF_PRESENT)
|
|
|
|
ldr r1, [r2, r1]
|
|
|
|
cmp r1, #0
|
|
|
|
reteq lr
|
|
|
|
|
|
|
|
.arch_extension sec
|
|
|
|
/* First call after suspend wakes firmware. No arguments required. */
|
|
|
|
smc #0
|
|
|
|
|
|
|
|
b cpu_resume
|
|
|
|
ENDPROC(tegra_resume_trusted_foundations)
|
2013-01-04 17:32:22 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
.align L1_CACHE_SHIFT
|
|
|
|
ENTRY(__tegra_cpu_reset_handler_start)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* __tegra_cpu_reset_handler:
|
|
|
|
*
|
|
|
|
* Common handler for all CPU reset events.
|
|
|
|
*
|
|
|
|
* Register usage within the reset handler:
|
|
|
|
*
|
ARM: tegra: add CPU errata WARs to Tegra reset handler
The CPU cores in Tegra contain some errata. Workarounds must be applied
for these every time a CPU boots. Implement those workarounds directly
in the Tegra-specific CPU reset vector.
Many of these workarounds duplicate code in the core ARM kernel.
However, the core ARM kernel cannot enable those workarounds when
building a multi-platform kernel, since they require writing to secure-
only registers, and a multi-platform kernel often does not run in secure
mode, and also cannot generically/architecturally detect whether it is
running in secure mode, and hence cannot either unconditionally or
conditionally apply these workarounds.
Instead, the workarounds must be applied in architecture-specific reset
code, which is able to have more direct knowledge of the secure/normal
state. On Tegra, we will be able to detect this using a non-architected
register in the future, although we currently assume the kernel runs only
in secure mode. Other SoCs may never run the kernel in secure mode, and
hence always rely on a secure monitor to enable the workarounds, and
hence never implement them in the kernel.
Signed-off-by: Stephen Warren <swarren@nvidia.com>
2013-03-05 08:05:56 +08:00
|
|
|
* Others: scratch
|
2013-05-20 18:39:24 +08:00
|
|
|
* R6 = SoC ID
|
2013-01-04 17:32:22 +08:00
|
|
|
* R7 = CPU present (to the OS) mask
|
|
|
|
* R8 = CPU in LP1 state mask
|
|
|
|
* R9 = CPU in LP2 state mask
|
|
|
|
* R10 = CPU number
|
|
|
|
* R11 = CPU mask
|
|
|
|
* R12 = pointer to reset handler data
|
|
|
|
*
|
|
|
|
* NOTE: This code is copied to IRAM. All code and data accesses
|
|
|
|
* must be position-independent.
|
|
|
|
*/
|
|
|
|
|
2019-03-18 06:52:09 +08:00
|
|
|
.arm
|
2013-01-04 17:32:22 +08:00
|
|
|
.align L1_CACHE_SHIFT
|
|
|
|
ENTRY(__tegra_cpu_reset_handler)
|
|
|
|
|
|
|
|
cpsid aif, 0x13 @ SVC mode, interrupts disabled
|
ARM: tegra: add CPU errata WARs to Tegra reset handler
The CPU cores in Tegra contain some errata. Workarounds must be applied
for these every time a CPU boots. Implement those workarounds directly
in the Tegra-specific CPU reset vector.
Many of these workarounds duplicate code in the core ARM kernel.
However, the core ARM kernel cannot enable those workarounds when
building a multi-platform kernel, since they require writing to secure-
only registers, and a multi-platform kernel often does not run in secure
mode, and also cannot generically/architecturally detect whether it is
running in secure mode, and hence cannot either unconditionally or
conditionally apply these workarounds.
Instead, the workarounds must be applied in architecture-specific reset
code, which is able to have more direct knowledge of the secure/normal
state. On Tegra, we will be able to detect this using a non-architected
register in the future, although we currently assume the kernel runs only
in secure mode. Other SoCs may never run the kernel in secure mode, and
hence always rely on a secure monitor to enable the workarounds, and
hence never implement them in the kernel.
Signed-off-by: Stephen Warren <swarren@nvidia.com>
2013-03-05 08:05:56 +08:00
|
|
|
|
2013-05-20 18:39:24 +08:00
|
|
|
tegra_get_soc_id TEGRA_APB_MISC_BASE, r6
|
2019-03-18 06:52:08 +08:00
|
|
|
|
|
|
|
adr r12, __tegra_cpu_reset_handler_data
|
|
|
|
ldr r5, [r12, #RESET_DATA(TF_PRESENT)]
|
|
|
|
cmp r5, #0
|
|
|
|
bne after_errata
|
|
|
|
|
ARM: tegra: add CPU errata WARs to Tegra reset handler
The CPU cores in Tegra contain some errata. Workarounds must be applied
for these every time a CPU boots. Implement those workarounds directly
in the Tegra-specific CPU reset vector.
Many of these workarounds duplicate code in the core ARM kernel.
However, the core ARM kernel cannot enable those workarounds when
building a multi-platform kernel, since they require writing to secure-
only registers, and a multi-platform kernel often does not run in secure
mode, and also cannot generically/architecturally detect whether it is
running in secure mode, and hence cannot either unconditionally or
conditionally apply these workarounds.
Instead, the workarounds must be applied in architecture-specific reset
code, which is able to have more direct knowledge of the secure/normal
state. On Tegra, we will be able to detect this using a non-architected
register in the future, although we currently assume the kernel runs only
in secure mode. Other SoCs may never run the kernel in secure mode, and
hence always rely on a secure monitor to enable the workarounds, and
hence never implement them in the kernel.
Signed-off-by: Stephen Warren <swarren@nvidia.com>
2013-03-05 08:05:56 +08:00
|
|
|
#ifdef CONFIG_ARCH_TEGRA_2x_SOC
|
|
|
|
t20_check:
|
2013-05-20 18:39:24 +08:00
|
|
|
cmp r6, #TEGRA20
|
ARM: tegra: add CPU errata WARs to Tegra reset handler
The CPU cores in Tegra contain some errata. Workarounds must be applied
for these every time a CPU boots. Implement those workarounds directly
in the Tegra-specific CPU reset vector.
Many of these workarounds duplicate code in the core ARM kernel.
However, the core ARM kernel cannot enable those workarounds when
building a multi-platform kernel, since they require writing to secure-
only registers, and a multi-platform kernel often does not run in secure
mode, and also cannot generically/architecturally detect whether it is
running in secure mode, and hence cannot either unconditionally or
conditionally apply these workarounds.
Instead, the workarounds must be applied in architecture-specific reset
code, which is able to have more direct knowledge of the secure/normal
state. On Tegra, we will be able to detect this using a non-architected
register in the future, although we currently assume the kernel runs only
in secure mode. Other SoCs may never run the kernel in secure mode, and
hence always rely on a secure monitor to enable the workarounds, and
hence never implement them in the kernel.
Signed-off-by: Stephen Warren <swarren@nvidia.com>
2013-03-05 08:05:56 +08:00
|
|
|
bne after_t20_check
|
|
|
|
t20_errata:
|
|
|
|
# Tegra20 is a Cortex-A9 r1p1
|
|
|
|
mrc p15, 0, r0, c1, c0, 0 @ read system control register
|
|
|
|
orr r0, r0, #1 << 14 @ erratum 716044
|
|
|
|
mcr p15, 0, r0, c1, c0, 0 @ write system control register
|
|
|
|
mrc p15, 0, r0, c15, c0, 1 @ read diagnostic register
|
|
|
|
orr r0, r0, #1 << 4 @ erratum 742230
|
|
|
|
orr r0, r0, #1 << 11 @ erratum 751472
|
|
|
|
mcr p15, 0, r0, c15, c0, 1 @ write diagnostic register
|
|
|
|
b after_errata
|
|
|
|
after_t20_check:
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_ARCH_TEGRA_3x_SOC
|
|
|
|
t30_check:
|
2013-05-20 18:39:24 +08:00
|
|
|
cmp r6, #TEGRA30
|
ARM: tegra: add CPU errata WARs to Tegra reset handler
The CPU cores in Tegra contain some errata. Workarounds must be applied
for these every time a CPU boots. Implement those workarounds directly
in the Tegra-specific CPU reset vector.
Many of these workarounds duplicate code in the core ARM kernel.
However, the core ARM kernel cannot enable those workarounds when
building a multi-platform kernel, since they require writing to secure-
only registers, and a multi-platform kernel often does not run in secure
mode, and also cannot generically/architecturally detect whether it is
running in secure mode, and hence cannot either unconditionally or
conditionally apply these workarounds.
Instead, the workarounds must be applied in architecture-specific reset
code, which is able to have more direct knowledge of the secure/normal
state. On Tegra, we will be able to detect this using a non-architected
register in the future, although we currently assume the kernel runs only
in secure mode. Other SoCs may never run the kernel in secure mode, and
hence always rely on a secure monitor to enable the workarounds, and
hence never implement them in the kernel.
Signed-off-by: Stephen Warren <swarren@nvidia.com>
2013-03-05 08:05:56 +08:00
|
|
|
bne after_t30_check
|
|
|
|
t30_errata:
|
|
|
|
# Tegra30 is a Cortex-A9 r2p9
|
|
|
|
mrc p15, 0, r0, c15, c0, 1 @ read diagnostic register
|
|
|
|
orr r0, r0, #1 << 6 @ erratum 743622
|
|
|
|
orr r0, r0, #1 << 11 @ erratum 751472
|
|
|
|
mcr p15, 0, r0, c15, c0, 1 @ write diagnostic register
|
|
|
|
b after_errata
|
|
|
|
after_t30_check:
|
|
|
|
#endif
|
|
|
|
after_errata:
|
2013-01-04 17:32:22 +08:00
|
|
|
mrc p15, 0, r10, c0, c0, 5 @ MPIDR
|
|
|
|
and r10, r10, #0x3 @ R10 = CPU number
|
|
|
|
mov r11, #1
|
|
|
|
mov r11, r11, lsl r10 @ R11 = CPU mask
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
/* Does the OS know about this CPU? */
|
|
|
|
ldr r7, [r12, #RESET_DATA(MASK_PRESENT)]
|
|
|
|
tst r7, r11 @ if !present
|
|
|
|
bleq __die @ CPU not present (to OS)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_ARCH_TEGRA_2x_SOC
|
|
|
|
/* Are we on Tegra20? */
|
2013-05-20 18:39:24 +08:00
|
|
|
cmp r6, #TEGRA20
|
2013-01-04 17:32:22 +08:00
|
|
|
bne 1f
|
|
|
|
/* If not CPU0, don't let CPU0 reset CPU1 now that CPU1 is coming up. */
|
2015-01-15 18:58:57 +08:00
|
|
|
mov r0, #CPU_NOT_RESETTABLE
|
2013-01-04 17:32:22 +08:00
|
|
|
cmp r10, #0
|
2019-03-18 06:52:08 +08:00
|
|
|
strbne r0, [r12, #RESET_DATA(RESETTABLE_STATUS)]
|
2013-01-04 17:32:22 +08:00
|
|
|
1:
|
|
|
|
#endif
|
|
|
|
|
2013-08-12 17:40:00 +08:00
|
|
|
/* Waking up from LP1? */
|
|
|
|
ldr r8, [r12, #RESET_DATA(MASK_LP1)]
|
|
|
|
tst r8, r11 @ if in_lp1
|
|
|
|
beq __is_not_lp1
|
|
|
|
cmp r10, #0
|
|
|
|
bne __die @ only CPU0 can be here
|
|
|
|
ldr lr, [r12, #RESET_DATA(STARTUP_LP1)]
|
|
|
|
cmp lr, #0
|
|
|
|
bleq __die @ no LP1 startup handler
|
|
|
|
THUMB( add lr, lr, #1 ) @ switch to Thumb mode
|
|
|
|
bx lr
|
|
|
|
__is_not_lp1:
|
|
|
|
|
2013-01-04 17:32:22 +08:00
|
|
|
/* Waking up from LP2? */
|
|
|
|
ldr r9, [r12, #RESET_DATA(MASK_LP2)]
|
|
|
|
tst r9, r11 @ if in_lp2
|
|
|
|
beq __is_not_lp2
|
|
|
|
ldr lr, [r12, #RESET_DATA(STARTUP_LP2)]
|
|
|
|
cmp lr, #0
|
|
|
|
bleq __die @ no LP2 startup handler
|
|
|
|
bx lr
|
|
|
|
|
|
|
|
__is_not_lp2:
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
/*
|
2013-05-20 18:39:29 +08:00
|
|
|
* Can only be secondary boot (initial or hotplug)
|
|
|
|
* CPU0 can't be here for Tegra20/30
|
2013-01-04 17:32:22 +08:00
|
|
|
*/
|
2013-05-20 18:39:29 +08:00
|
|
|
cmp r6, #TEGRA114
|
|
|
|
beq __no_cpu0_chk
|
2013-01-04 17:32:22 +08:00
|
|
|
cmp r10, #0
|
|
|
|
bleq __die @ CPU0 cannot be here
|
2013-05-20 18:39:29 +08:00
|
|
|
__no_cpu0_chk:
|
2013-01-04 17:32:22 +08:00
|
|
|
ldr lr, [r12, #RESET_DATA(STARTUP_SECONDARY)]
|
|
|
|
cmp lr, #0
|
|
|
|
bleq __die @ no secondary startup handler
|
|
|
|
bx lr
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We don't know why the CPU reset. Just kill it.
|
|
|
|
* The LR register will contain the address we died at + 4.
|
|
|
|
*/
|
|
|
|
|
|
|
|
__die:
|
|
|
|
sub lr, lr, #4
|
|
|
|
mov32 r7, TEGRA_PMC_BASE
|
|
|
|
str lr, [r7, #PMC_SCRATCH41]
|
|
|
|
|
|
|
|
mov32 r7, TEGRA_CLK_RESET_BASE
|
|
|
|
|
|
|
|
/* Are we on Tegra20? */
|
2013-05-20 18:39:24 +08:00
|
|
|
cmp r6, #TEGRA20
|
2013-01-04 17:32:22 +08:00
|
|
|
bne 1f
|
|
|
|
|
|
|
|
#ifdef CONFIG_ARCH_TEGRA_2x_SOC
|
|
|
|
mov32 r0, 0x1111
|
|
|
|
mov r1, r0, lsl r10
|
|
|
|
str r1, [r7, #0x340] @ CLK_RST_CPU_CMPLX_SET
|
|
|
|
#endif
|
|
|
|
1:
|
|
|
|
#ifdef CONFIG_ARCH_TEGRA_3x_SOC
|
|
|
|
mov32 r6, TEGRA_FLOW_CTRL_BASE
|
|
|
|
|
|
|
|
cmp r10, #0
|
|
|
|
moveq r1, #FLOW_CTRL_HALT_CPU0_EVENTS
|
|
|
|
moveq r2, #FLOW_CTRL_CPU0_CSR
|
|
|
|
movne r1, r10, lsl #3
|
|
|
|
addne r2, r1, #(FLOW_CTRL_CPU1_CSR-8)
|
|
|
|
addne r1, r1, #(FLOW_CTRL_HALT_CPU1_EVENTS-8)
|
|
|
|
|
|
|
|
/* Clear CPU "event" and "interrupt" flags and power gate
|
|
|
|
it when halting but not before it is in the "WFI" state. */
|
|
|
|
ldr r0, [r6, +r2]
|
|
|
|
orr r0, r0, #FLOW_CTRL_CSR_INTR_FLAG | FLOW_CTRL_CSR_EVENT_FLAG
|
|
|
|
orr r0, r0, #FLOW_CTRL_CSR_ENABLE
|
|
|
|
str r0, [r6, +r2]
|
|
|
|
|
|
|
|
/* Unconditionally halt this CPU */
|
|
|
|
mov r0, #FLOW_CTRL_WAITEVENT
|
|
|
|
str r0, [r6, +r1]
|
|
|
|
ldr r0, [r6, +r1] @ memory barrier
|
|
|
|
|
|
|
|
dsb
|
|
|
|
isb
|
|
|
|
wfi @ CPU should be power gated here
|
|
|
|
|
|
|
|
/* If the CPU didn't power gate above just kill it's clock. */
|
|
|
|
|
|
|
|
mov r0, r11, lsl #8
|
|
|
|
str r0, [r7, #348] @ CLK_CPU_CMPLX_SET
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* If the CPU still isn't dead, just spin here. */
|
|
|
|
b .
|
|
|
|
ENDPROC(__tegra_cpu_reset_handler)
|
|
|
|
|
|
|
|
.align L1_CACHE_SHIFT
|
|
|
|
.type __tegra_cpu_reset_handler_data, %object
|
|
|
|
.globl __tegra_cpu_reset_handler_data
|
2019-03-18 06:52:08 +08:00
|
|
|
.globl __tegra_cpu_reset_handler_data_offset
|
|
|
|
.equ __tegra_cpu_reset_handler_data_offset, \
|
|
|
|
. - __tegra_cpu_reset_handler_start
|
2013-01-04 17:32:22 +08:00
|
|
|
__tegra_cpu_reset_handler_data:
|
2019-03-18 06:52:08 +08:00
|
|
|
.rept TEGRA_RESET_DATA_SIZE
|
|
|
|
.long 0
|
2013-01-04 17:32:22 +08:00
|
|
|
.endr
|
|
|
|
.align L1_CACHE_SHIFT
|
|
|
|
|
|
|
|
ENTRY(__tegra_cpu_reset_handler_end)
|