arm64 fixes:
- Cache invalidation fix for early CPU boot status update (incorrect cacheline) - of_put_node() missing in the spin_table code - EL1/El2 early init inconsistency when Virtualisation Host Extensions are present - RCU warning fix in the arm_pmu.c driver -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJXGgsnAAoJEGvWsS0AyF7xEQ0P/1t9N459ttwATfBVkqZzUowJ w5hGGQmuq1zEKFsjsNWATcMTraLU4Kqoao6jDO5/uxGvRSaEGbwpU0nZPLuXghHb V6V0kc6LAh91AKxtnIyebaxSfBmDOrQb8dH/ZfOIL5SMPvhrrrG70axniKQq/46I uUfe8uVpSiqV+w9jUlJteWlugHW1ivtojpe1cNOoFuSc+I+jvrE87DPowxSBqs20 CAsD2njdEtYV0C/NxFcuSJmhG3USi4r41VQ5mvqiQc3odgrF0sC0/Ytl7rPw9GIa qBLumJUyCYQcsV/wJ6deSKWSOddNTf/q1b0Dnbq/wMp4nLwYvO4QrePjNmfT0ZyI /i6HxUxAwYxbP1F790n0WZoRaZx9yNk/qjeneYtF5lgXbU61rf0zhmLPEJ5uo6x2 GP85z1I0xRClq+Fa15qybq+zp10VlNsSkMZKhv03kmqnGRFNqxYEhzZhhHDJnQhq WeG8aVj7LJbeRRh0l6gYfIwu3HdO6dDKAqzqv54cDuwhiGPS46LbCLpZVa/C3B4K qyUprCt6FaBs528yysLewVcjmbUu6cvda27OB88Er/6C6jvKnKQ+20nVbzGOd7+V ex7kTXVe41rO3vWulBFDKB8Q3c02URpXJKmMsNVlbzNX77tLkDbd2Lyf5vSkJK7x JeRj/ua+r8rOdWS+Nhxb =/LTw -----END PGP SIGNATURE----- Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pull arm64 fixes from Catalin Marinas: - Cache invalidation fix for early CPU boot status update (incorrect cacheline) - of_put_node() missing in the spin_table code - EL1/El2 early init inconsistency when Virtualisation Host Extensions are present - RCU warning fix in the arm_pmu.c driver * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: Fix EL1/EL2 early init inconsistencies with VHE drivers/perf: arm-pmu: fix RCU usage on pmu resume from low-power arm64: spin-table: add missing of_node_put() arm64: fix invalidation of wrong __early_cpu_boot_status cacheline
This commit is contained in:
commit
ddce192106
|
@ -588,6 +588,15 @@ set_hcr:
|
|||
msr vpidr_el2, x0
|
||||
msr vmpidr_el2, x1
|
||||
|
||||
/*
|
||||
* When VHE is not in use, early init of EL2 and EL1 needs to be
|
||||
* done here.
|
||||
* When VHE _is_ in use, EL1 will not be used in the host and
|
||||
* requires no configuration, and all non-hyp-specific EL2 setup
|
||||
* will be done via the _EL1 system register aliases in __cpu_setup.
|
||||
*/
|
||||
cbnz x2, 1f
|
||||
|
||||
/* sctlr_el1 */
|
||||
mov x0, #0x0800 // Set/clear RES{1,0} bits
|
||||
CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems
|
||||
|
@ -597,6 +606,7 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
|
|||
/* Coprocessor traps. */
|
||||
mov x0, #0x33ff
|
||||
msr cptr_el2, x0 // Disable copro. traps to EL2
|
||||
1:
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
msr hstr_el2, xzr // Disable CP15 traps to EL2
|
||||
|
@ -734,7 +744,8 @@ ENDPROC(__secondary_switched)
|
|||
|
||||
.macro update_early_cpu_boot_status status, tmp1, tmp2
|
||||
mov \tmp2, #\status
|
||||
str_l \tmp2, __early_cpu_boot_status, \tmp1
|
||||
adr_l \tmp1, __early_cpu_boot_status
|
||||
str \tmp2, [\tmp1]
|
||||
dmb sy
|
||||
dc ivac, \tmp1 // Invalidate potentially stale cache line
|
||||
.endm
|
||||
|
|
|
@ -52,6 +52,7 @@ static void write_pen_release(u64 val)
|
|||
static int smp_spin_table_cpu_init(unsigned int cpu)
|
||||
{
|
||||
struct device_node *dn;
|
||||
int ret;
|
||||
|
||||
dn = of_get_cpu_node(cpu, NULL);
|
||||
if (!dn)
|
||||
|
@ -60,15 +61,15 @@ static int smp_spin_table_cpu_init(unsigned int cpu)
|
|||
/*
|
||||
* Determine the address from which the CPU is polling.
|
||||
*/
|
||||
if (of_property_read_u64(dn, "cpu-release-addr",
|
||||
&cpu_release_addr[cpu])) {
|
||||
ret = of_property_read_u64(dn, "cpu-release-addr",
|
||||
&cpu_release_addr[cpu]);
|
||||
if (ret)
|
||||
pr_err("CPU %d: missing or invalid cpu-release-addr property\n",
|
||||
cpu);
|
||||
|
||||
return -1;
|
||||
}
|
||||
of_node_put(dn);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int smp_spin_table_cpu_prepare(unsigned int cpu)
|
||||
|
|
|
@ -737,8 +737,19 @@ static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
|
|||
break;
|
||||
case CPU_PM_EXIT:
|
||||
case CPU_PM_ENTER_FAILED:
|
||||
/* Restore and enable the counter */
|
||||
armpmu_start(event, PERF_EF_RELOAD);
|
||||
/*
|
||||
* Restore and enable the counter.
|
||||
* armpmu_start() indirectly calls
|
||||
*
|
||||
* perf_event_update_userpage()
|
||||
*
|
||||
* that requires RCU read locking to be functional,
|
||||
* wrap the call within RCU_NONIDLE to make the
|
||||
* RCU subsystem aware this cpu is not idle from
|
||||
* an RCU perspective for the armpmu_start() call
|
||||
* duration.
|
||||
*/
|
||||
RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
|
Loading…
Reference in New Issue