Merge branch 'for-next/boot' into for-next/core
Boot path cleanups to enable early initialisation of per-cpu operations needed by KCSAN. * for-next/boot: arm64: scs: Drop unused 'tmp' argument to scs_{load, save} asm macros arm64: smp: initialize cpu offset earlier arm64: smp: unify task and sp setup arm64: smp: remove stack from secondary_data arm64: smp: remove pointless secondary_data maintenance arm64: assembler: add set_this_cpu_offset
This commit is contained in:
commit
e7cf636cba
|
@ -232,15 +232,23 @@ lr .req x30 // link register
|
|||
* @dst: destination register
|
||||
*/
|
||||
#if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
|
||||
.macro this_cpu_offset, dst
|
||||
.macro get_this_cpu_offset, dst
|
||||
mrs \dst, tpidr_el2
|
||||
.endm
|
||||
#else
|
||||
.macro this_cpu_offset, dst
|
||||
.macro get_this_cpu_offset, dst
|
||||
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
|
||||
mrs \dst, tpidr_el1
|
||||
alternative_else
|
||||
mrs \dst, tpidr_el2
|
||||
alternative_endif
|
||||
.endm
|
||||
|
||||
.macro set_this_cpu_offset, src
|
||||
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
|
||||
msr tpidr_el1, \src
|
||||
alternative_else
|
||||
msr tpidr_el2, \src
|
||||
alternative_endif
|
||||
.endm
|
||||
#endif
|
||||
|
@ -253,7 +261,7 @@ alternative_endif
|
|||
.macro adr_this_cpu, dst, sym, tmp
|
||||
adrp \tmp, \sym
|
||||
add \dst, \tmp, #:lo12:\sym
|
||||
this_cpu_offset \tmp
|
||||
get_this_cpu_offset \tmp
|
||||
add \dst, \dst, \tmp
|
||||
.endm
|
||||
|
||||
|
@ -264,7 +272,7 @@ alternative_endif
|
|||
*/
|
||||
.macro ldr_this_cpu dst, sym, tmp
|
||||
adr_l \dst, \sym
|
||||
this_cpu_offset \tmp
|
||||
get_this_cpu_offset \tmp
|
||||
ldr \dst, [\dst, \tmp]
|
||||
.endm
|
||||
|
||||
|
@ -745,7 +753,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
|
|||
cbz \tmp, \lbl
|
||||
#endif
|
||||
adr_l \tmp, irq_stat + IRQ_CPUSTAT_SOFTIRQ_PENDING
|
||||
this_cpu_offset \tmp2
|
||||
get_this_cpu_offset \tmp2
|
||||
ldr w\tmp, [\tmp, \tmp2]
|
||||
cbnz w\tmp, \lbl // yield on pending softirq in task context
|
||||
.Lnoyield_\@:
|
||||
|
|
|
@ -9,18 +9,18 @@
|
|||
#ifdef CONFIG_SHADOW_CALL_STACK
|
||||
scs_sp .req x18
|
||||
|
||||
.macro scs_load tsk, tmp
|
||||
.macro scs_load tsk
|
||||
ldr scs_sp, [\tsk, #TSK_TI_SCS_SP]
|
||||
.endm
|
||||
|
||||
.macro scs_save tsk, tmp
|
||||
.macro scs_save tsk
|
||||
str scs_sp, [\tsk, #TSK_TI_SCS_SP]
|
||||
.endm
|
||||
#else
|
||||
.macro scs_load tsk, tmp
|
||||
.macro scs_load tsk
|
||||
.endm
|
||||
|
||||
.macro scs_save tsk, tmp
|
||||
.macro scs_save tsk
|
||||
.endm
|
||||
#endif /* CONFIG_SHADOW_CALL_STACK */
|
||||
|
||||
|
|
|
@ -73,12 +73,10 @@ asmlinkage void secondary_start_kernel(void);
|
|||
|
||||
/*
|
||||
* Initial data for bringing up a secondary CPU.
|
||||
* @stack - sp for the secondary CPU
|
||||
* @status - Result passed back from the secondary CPU to
|
||||
* indicate failure.
|
||||
*/
|
||||
struct secondary_data {
|
||||
void *stack;
|
||||
struct task_struct *task;
|
||||
long status;
|
||||
};
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
int main(void)
|
||||
{
|
||||
DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
|
||||
DEFINE(TSK_CPU, offsetof(struct task_struct, cpu));
|
||||
BLANK();
|
||||
DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
|
||||
DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count));
|
||||
|
@ -99,7 +100,6 @@ int main(void)
|
|||
DEFINE(SOFTIRQ_SHIFT, SOFTIRQ_SHIFT);
|
||||
DEFINE(IRQ_CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
|
||||
BLANK();
|
||||
DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack));
|
||||
DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task));
|
||||
BLANK();
|
||||
DEFINE(FTR_OVR_VAL_OFFSET, offsetof(struct arm64_ftr_override, val));
|
||||
|
|
|
@ -275,7 +275,7 @@ alternative_else_nop_endif
|
|||
|
||||
mte_set_kernel_gcr x22, x23
|
||||
|
||||
scs_load tsk, x20
|
||||
scs_load tsk
|
||||
.else
|
||||
add x21, sp, #PT_REGS_SIZE
|
||||
get_current_task tsk
|
||||
|
@ -375,7 +375,7 @@ alternative_if ARM64_WORKAROUND_845719
|
|||
alternative_else_nop_endif
|
||||
#endif
|
||||
3:
|
||||
scs_save tsk, x0
|
||||
scs_save tsk
|
||||
|
||||
#ifdef CONFIG_ARM64_PTR_AUTH
|
||||
alternative_if ARM64_HAS_ADDRESS_AUTH
|
||||
|
@ -979,8 +979,8 @@ SYM_FUNC_START(cpu_switch_to)
|
|||
mov sp, x9
|
||||
msr sp_el0, x1
|
||||
ptrauth_keys_install_kernel x1, x8, x9, x10
|
||||
scs_save x0, x8
|
||||
scs_load x1, x8
|
||||
scs_save x0
|
||||
scs_load x1
|
||||
ret
|
||||
SYM_FUNC_END(cpu_switch_to)
|
||||
NOKPROBE(cpu_switch_to)
|
||||
|
|
|
@ -395,15 +395,29 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
|
|||
SYM_FUNC_END(__create_page_tables)
|
||||
|
||||
/*
|
||||
* Initialize CPU registers with task-specific and cpu-specific context.
|
||||
*
|
||||
* Create a final frame record at task_pt_regs(current)->stackframe, so
|
||||
* that the unwinder can identify the final frame record of any task by
|
||||
* its location in the task stack. We reserve the entire pt_regs space
|
||||
* for consistency with user tasks and kthreads.
|
||||
*/
|
||||
.macro setup_final_frame
|
||||
.macro init_cpu_task tsk, tmp1, tmp2
|
||||
msr sp_el0, \tsk
|
||||
|
||||
ldr \tmp1, [\tsk, #TSK_STACK]
|
||||
add sp, \tmp1, #THREAD_SIZE
|
||||
sub sp, sp, #PT_REGS_SIZE
|
||||
|
||||
stp xzr, xzr, [sp, #S_STACKFRAME]
|
||||
add x29, sp, #S_STACKFRAME
|
||||
|
||||
scs_load \tsk
|
||||
|
||||
adr_l \tmp1, __per_cpu_offset
|
||||
ldr w\tmp2, [\tsk, #TSK_CPU]
|
||||
ldr \tmp1, [\tmp1, \tmp2, lsl #3]
|
||||
set_this_cpu_offset \tmp1
|
||||
.endm
|
||||
|
||||
/*
|
||||
|
@ -412,22 +426,16 @@ SYM_FUNC_END(__create_page_tables)
|
|||
* x0 = __PHYS_OFFSET
|
||||
*/
|
||||
SYM_FUNC_START_LOCAL(__primary_switched)
|
||||
adrp x4, init_thread_union
|
||||
add sp, x4, #THREAD_SIZE
|
||||
adr_l x5, init_task
|
||||
msr sp_el0, x5 // Save thread_info
|
||||
adr_l x4, init_task
|
||||
init_cpu_task x4, x5, x6
|
||||
|
||||
adr_l x8, vectors // load VBAR_EL1 with virtual
|
||||
msr vbar_el1, x8 // vector table address
|
||||
isb
|
||||
|
||||
stp xzr, x30, [sp, #-16]!
|
||||
stp x29, x30, [sp, #-16]!
|
||||
mov x29, sp
|
||||
|
||||
#ifdef CONFIG_SHADOW_CALL_STACK
|
||||
adr_l scs_sp, init_shadow_call_stack // Set shadow call stack
|
||||
#endif
|
||||
|
||||
str_l x21, __fdt_pointer, x5 // Save FDT pointer
|
||||
|
||||
ldr_l x4, kimage_vaddr // Save the offset between
|
||||
|
@ -459,8 +467,7 @@ SYM_FUNC_START_LOCAL(__primary_switched)
|
|||
0:
|
||||
#endif
|
||||
bl switch_to_vhe // Prefer VHE if possible
|
||||
add sp, sp, #16
|
||||
setup_final_frame
|
||||
ldp x29, x30, [sp], #16
|
||||
bl start_kernel
|
||||
ASM_BUG()
|
||||
SYM_FUNC_END(__primary_switched)
|
||||
|
@ -645,14 +652,10 @@ SYM_FUNC_START_LOCAL(__secondary_switched)
|
|||
isb
|
||||
|
||||
adr_l x0, secondary_data
|
||||
ldr x1, [x0, #CPU_BOOT_STACK] // get secondary_data.stack
|
||||
cbz x1, __secondary_too_slow
|
||||
mov sp, x1
|
||||
ldr x2, [x0, #CPU_BOOT_TASK]
|
||||
cbz x2, __secondary_too_slow
|
||||
msr sp_el0, x2
|
||||
scs_load x2, x3
|
||||
setup_final_frame
|
||||
|
||||
init_cpu_task x2, x1, x3
|
||||
|
||||
#ifdef CONFIG_ARM64_PTR_AUTH
|
||||
ptrauth_keys_init_cpu x2, x3, x4, x5
|
||||
|
|
|
@ -87,12 +87,6 @@ void __init smp_setup_processor_id(void)
|
|||
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
|
||||
set_cpu_logical_map(0, mpidr);
|
||||
|
||||
/*
|
||||
* clear __my_cpu_offset on boot CPU to avoid hang caused by
|
||||
* using percpu variable early, for example, lockdep will
|
||||
* access percpu variable inside lock_release
|
||||
*/
|
||||
set_my_cpu_offset(0);
|
||||
pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n",
|
||||
(unsigned long)mpidr, read_cpuid_id());
|
||||
}
|
||||
|
|
|
@ -120,9 +120,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
|
|||
* page tables.
|
||||
*/
|
||||
secondary_data.task = idle;
|
||||
secondary_data.stack = task_stack_page(idle) + THREAD_SIZE;
|
||||
update_cpu_boot_status(CPU_MMU_OFF);
|
||||
__flush_dcache_area(&secondary_data, sizeof(secondary_data));
|
||||
|
||||
/* Now bring the CPU into our world */
|
||||
ret = boot_secondary(cpu, idle);
|
||||
|
@ -142,8 +140,6 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
|
|||
|
||||
pr_crit("CPU%u: failed to come online\n", cpu);
|
||||
secondary_data.task = NULL;
|
||||
secondary_data.stack = NULL;
|
||||
__flush_dcache_area(&secondary_data, sizeof(secondary_data));
|
||||
status = READ_ONCE(secondary_data.status);
|
||||
if (status == CPU_MMU_OFF)
|
||||
status = READ_ONCE(__early_cpu_boot_status);
|
||||
|
@ -202,10 +198,7 @@ asmlinkage notrace void secondary_start_kernel(void)
|
|||
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
|
||||
struct mm_struct *mm = &init_mm;
|
||||
const struct cpu_operations *ops;
|
||||
unsigned int cpu;
|
||||
|
||||
cpu = task_cpu(current);
|
||||
set_my_cpu_offset(per_cpu_offset(cpu));
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
||||
/*
|
||||
* All kernel threads share the same mm context; grab a
|
||||
|
@ -452,6 +445,11 @@ void __init smp_cpus_done(unsigned int max_cpus)
|
|||
|
||||
void __init smp_prepare_boot_cpu(void)
|
||||
{
|
||||
/*
|
||||
* The runtime per-cpu areas have been allocated by
|
||||
* setup_per_cpu_areas(), and CPU0's boot time per-cpu area will be
|
||||
* freed shortly, so we must move over to the runtime per-cpu area.
|
||||
*/
|
||||
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
|
||||
cpuinfo_store_boot_cpu();
|
||||
|
||||
|
|
|
@ -83,11 +83,7 @@ SYM_FUNC_START(cpu_do_suspend)
|
|||
mrs x9, mdscr_el1
|
||||
mrs x10, oslsr_el1
|
||||
mrs x11, sctlr_el1
|
||||
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
|
||||
mrs x12, tpidr_el1
|
||||
alternative_else
|
||||
mrs x12, tpidr_el2
|
||||
alternative_endif
|
||||
get_this_cpu_offset x12
|
||||
mrs x13, sp_el0
|
||||
stp x2, x3, [x0]
|
||||
stp x4, x5, [x0, #16]
|
||||
|
@ -145,11 +141,7 @@ SYM_FUNC_START(cpu_do_resume)
|
|||
msr mdscr_el1, x10
|
||||
|
||||
msr sctlr_el1, x12
|
||||
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
|
||||
msr tpidr_el1, x13
|
||||
alternative_else
|
||||
msr tpidr_el2, x13
|
||||
alternative_endif
|
||||
set_this_cpu_offset x13
|
||||
msr sp_el0, x14
|
||||
/*
|
||||
* Restore oslsr_el1 by writing oslar_el1
|
||||
|
|
Loading…
Reference in New Issue