powerpc/pseries: Do not start secondaries in Open Firmware

Starting secondary CPUs early on from Open Firmware and placing them
in a holding spin loop slows down the boot process significantly under
some hypervisors such as KVM.

This is also unnecessary when RTAS supports querying the CPU state

So let's not do it.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
This commit is contained in:
Benjamin Herrenschmidt 2013-09-25 14:02:50 +10:00
parent 0c9fa29149
commit dbe78b4011
2 changed files with 37 additions and 10 deletions

View File

@ -196,6 +196,8 @@ static int __initdata mem_reserve_cnt;
static cell_t __initdata regbuf[1024]; static cell_t __initdata regbuf[1024];
static bool rtas_has_query_cpu_stopped;
/* /*
* Error results ... some OF calls will return "-1" on error, some * Error results ... some OF calls will return "-1" on error, some
@ -1574,6 +1576,11 @@ static void __init prom_instantiate_rtas(void)
prom_setprop(rtas_node, "/rtas", "linux,rtas-entry", prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
&val, sizeof(val)); &val, sizeof(val));
/* Check if it supports "query-cpu-stopped-state" */
if (prom_getprop(rtas_node, "query-cpu-stopped-state",
&val, sizeof(val)) != PROM_ERROR)
rtas_has_query_cpu_stopped = true;
#if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__) #if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__)
/* PowerVN takeover hack */ /* PowerVN takeover hack */
prom_rtas_data = base; prom_rtas_data = base;
@ -1815,6 +1822,18 @@ static void __init prom_hold_cpus(void)
= (void *) LOW_ADDR(__secondary_hold_acknowledge); = (void *) LOW_ADDR(__secondary_hold_acknowledge);
unsigned long secondary_hold = LOW_ADDR(__secondary_hold); unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
/*
* On pseries, if RTAS supports "query-cpu-stopped-state",
* we skip this stage, the CPUs will be started by the
* kernel using RTAS.
*/
if ((of_platform == PLATFORM_PSERIES ||
of_platform == PLATFORM_PSERIES_LPAR) &&
rtas_has_query_cpu_stopped) {
prom_printf("prom_hold_cpus: skipped\n");
return;
}
prom_debug("prom_hold_cpus: start...\n"); prom_debug("prom_hold_cpus: start...\n");
prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop); prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop);
prom_debug(" 1) *spinloop = 0x%x\n", *spinloop); prom_debug(" 1) *spinloop = 0x%x\n", *spinloop);
@ -3011,6 +3030,8 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
* On non-powermacs, put all CPUs in spin-loops. * On non-powermacs, put all CPUs in spin-loops.
* *
* PowerMacs use a different mechanism to spin CPUs * PowerMacs use a different mechanism to spin CPUs
*
* (This must be done after instanciating RTAS)
*/ */
if (of_platform != PLATFORM_POWERMAC && if (of_platform != PLATFORM_POWERMAC &&
of_platform != PLATFORM_OPAL) of_platform != PLATFORM_OPAL)

View File

@ -233,17 +233,23 @@ static void __init smp_init_pseries(void)
alloc_bootmem_cpumask_var(&of_spin_mask); alloc_bootmem_cpumask_var(&of_spin_mask);
/* Mark threads which are still spinning in hold loops. */ /*
* Mark threads which are still spinning in hold loops
*
* We know prom_init will not have started them if RTAS supports
* query-cpu-stopped-state.
*/
if (rtas_token("query-cpu-stopped-state") == RTAS_UNKNOWN_SERVICE) {
if (cpu_has_feature(CPU_FTR_SMT)) { if (cpu_has_feature(CPU_FTR_SMT)) {
for_each_present_cpu(i) { for_each_present_cpu(i) {
if (cpu_thread_in_core(i) == 0) if (cpu_thread_in_core(i) == 0)
cpumask_set_cpu(i, of_spin_mask); cpumask_set_cpu(i, of_spin_mask);
} }
} else { } else
cpumask_copy(of_spin_mask, cpu_present_mask); cpumask_copy(of_spin_mask, cpu_present_mask);
}
cpumask_clear_cpu(boot_cpuid, of_spin_mask); cpumask_clear_cpu(boot_cpuid, of_spin_mask);
}
/* Non-lpar has additional take/give timebase */ /* Non-lpar has additional take/give timebase */
if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) { if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {