Merge branch 'x86/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Peter Anvin: "A significantly larger than I'd like set of patches for just below the wire. All of these, however, fix real problems. The one thing that is genuinely scary in here is the change of SMP initialization, but that *does* fix a confirmed hang when booting virtual machines. There is also a patch to actually do the right thing about not offlining a CPU when there are not enough interrupt vectors available in the system; the accounting was done incorrectly. The worst case for that patch is that we fail to offline CPUs when we should (the new code is strictly more conservative than the old), so is not particularly risky. Most of the rest is minor stuff; the EFI patches are all about exporting correct information to boot loaders and kexec" * 'x86/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/boot: EFI_MIXED should not prohibit loading above 4G x86/smpboot: Initialize secondary CPU only if master CPU will wait for it x86/smpboot: Log error on secondary CPU wakeup failure at ERR level x86: Fix list/memory corruption on CPU hotplug x86: irq: Get correct available vectors for cpu disable x86/efi: Do not export efi runtime map in case old map x86/efi: earlyprintk=efi,keep fix
This commit is contained in:
commit
813895f8dc
|
@ -375,8 +375,7 @@ xloadflags:
|
||||||
# define XLF0 0
|
# define XLF0 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_X86_64) && \
|
#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_X86_64)
|
||||||
!defined(CONFIG_EFI_MIXED)
|
|
||||||
/* kernel/boot_param/ramdisk could be loaded above 4g */
|
/* kernel/boot_param/ramdisk could be loaded above 4g */
|
||||||
# define XLF1 XLF_CAN_BE_LOADED_ABOVE_4G
|
# define XLF1 XLF_CAN_BE_LOADED_ABOVE_4G
|
||||||
#else
|
#else
|
||||||
|
|
|
@ -1221,6 +1221,17 @@ static void dbg_restore_debug_regs(void)
|
||||||
#define dbg_restore_debug_regs()
|
#define dbg_restore_debug_regs()
|
||||||
#endif /* ! CONFIG_KGDB */
|
#endif /* ! CONFIG_KGDB */
|
||||||
|
|
||||||
|
static void wait_for_master_cpu(int cpu)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* wait for ACK from master CPU before continuing
|
||||||
|
* with AP initialization
|
||||||
|
*/
|
||||||
|
WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask));
|
||||||
|
while (!cpumask_test_cpu(cpu, cpu_callout_mask))
|
||||||
|
cpu_relax();
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* cpu_init() initializes state that is per-CPU. Some data is already
|
* cpu_init() initializes state that is per-CPU. Some data is already
|
||||||
* initialized (naturally) in the bootstrap process, such as the GDT
|
* initialized (naturally) in the bootstrap process, such as the GDT
|
||||||
|
@ -1236,16 +1247,17 @@ void cpu_init(void)
|
||||||
struct task_struct *me;
|
struct task_struct *me;
|
||||||
struct tss_struct *t;
|
struct tss_struct *t;
|
||||||
unsigned long v;
|
unsigned long v;
|
||||||
int cpu;
|
int cpu = stack_smp_processor_id();
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
wait_for_master_cpu(cpu);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Load microcode on this cpu if a valid microcode is available.
|
* Load microcode on this cpu if a valid microcode is available.
|
||||||
* This is early microcode loading procedure.
|
* This is early microcode loading procedure.
|
||||||
*/
|
*/
|
||||||
load_ucode_ap();
|
load_ucode_ap();
|
||||||
|
|
||||||
cpu = stack_smp_processor_id();
|
|
||||||
t = &per_cpu(init_tss, cpu);
|
t = &per_cpu(init_tss, cpu);
|
||||||
oist = &per_cpu(orig_ist, cpu);
|
oist = &per_cpu(orig_ist, cpu);
|
||||||
|
|
||||||
|
@ -1257,9 +1269,6 @@ void cpu_init(void)
|
||||||
|
|
||||||
me = current;
|
me = current;
|
||||||
|
|
||||||
if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask))
|
|
||||||
panic("CPU#%d already initialized!\n", cpu);
|
|
||||||
|
|
||||||
pr_debug("Initializing CPU#%d\n", cpu);
|
pr_debug("Initializing CPU#%d\n", cpu);
|
||||||
|
|
||||||
clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
|
clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
|
||||||
|
@ -1336,13 +1345,9 @@ void cpu_init(void)
|
||||||
struct tss_struct *t = &per_cpu(init_tss, cpu);
|
struct tss_struct *t = &per_cpu(init_tss, cpu);
|
||||||
struct thread_struct *thread = &curr->thread;
|
struct thread_struct *thread = &curr->thread;
|
||||||
|
|
||||||
show_ucode_info_early();
|
wait_for_master_cpu(cpu);
|
||||||
|
|
||||||
if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
|
show_ucode_info_early();
|
||||||
printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
|
|
||||||
for (;;)
|
|
||||||
local_irq_enable();
|
|
||||||
}
|
|
||||||
|
|
||||||
printk(KERN_INFO "Initializing CPU#%d\n", cpu);
|
printk(KERN_INFO "Initializing CPU#%d\n", cpu);
|
||||||
|
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
#include <asm/idle.h>
|
#include <asm/idle.h>
|
||||||
#include <asm/mce.h>
|
#include <asm/mce.h>
|
||||||
#include <asm/hw_irq.h>
|
#include <asm/hw_irq.h>
|
||||||
|
#include <asm/desc.h>
|
||||||
|
|
||||||
#define CREATE_TRACE_POINTS
|
#define CREATE_TRACE_POINTS
|
||||||
#include <asm/trace/irq_vectors.h>
|
#include <asm/trace/irq_vectors.h>
|
||||||
|
@ -334,9 +335,16 @@ int check_irq_vectors_for_cpu_disable(void)
|
||||||
for_each_online_cpu(cpu) {
|
for_each_online_cpu(cpu) {
|
||||||
if (cpu == this_cpu)
|
if (cpu == this_cpu)
|
||||||
continue;
|
continue;
|
||||||
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
|
/*
|
||||||
vector++) {
|
* We scan from FIRST_EXTERNAL_VECTOR to first system
|
||||||
if (per_cpu(vector_irq, cpu)[vector] < 0)
|
* vector. If the vector is marked in the used vectors
|
||||||
|
* bitmap or an irq is assigned to it, we don't count
|
||||||
|
* it as available.
|
||||||
|
*/
|
||||||
|
for (vector = FIRST_EXTERNAL_VECTOR;
|
||||||
|
vector < first_system_vector; vector++) {
|
||||||
|
if (!test_bit(vector, used_vectors) &&
|
||||||
|
per_cpu(vector_irq, cpu)[vector] < 0)
|
||||||
count++;
|
count++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -111,7 +111,6 @@ atomic_t init_deasserted;
|
||||||
static void smp_callin(void)
|
static void smp_callin(void)
|
||||||
{
|
{
|
||||||
int cpuid, phys_id;
|
int cpuid, phys_id;
|
||||||
unsigned long timeout;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If waken up by an INIT in an 82489DX configuration
|
* If waken up by an INIT in an 82489DX configuration
|
||||||
|
@ -130,37 +129,6 @@ static void smp_callin(void)
|
||||||
* (This works even if the APIC is not enabled.)
|
* (This works even if the APIC is not enabled.)
|
||||||
*/
|
*/
|
||||||
phys_id = read_apic_id();
|
phys_id = read_apic_id();
|
||||||
if (cpumask_test_cpu(cpuid, cpu_callin_mask)) {
|
|
||||||
panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__,
|
|
||||||
phys_id, cpuid);
|
|
||||||
}
|
|
||||||
pr_debug("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* STARTUP IPIs are fragile beasts as they might sometimes
|
|
||||||
* trigger some glue motherboard logic. Complete APIC bus
|
|
||||||
* silence for 1 second, this overestimates the time the
|
|
||||||
* boot CPU is spending to send the up to 2 STARTUP IPIs
|
|
||||||
* by a factor of two. This should be enough.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Waiting 2s total for startup (udelay is not yet working)
|
|
||||||
*/
|
|
||||||
timeout = jiffies + 2*HZ;
|
|
||||||
while (time_before(jiffies, timeout)) {
|
|
||||||
/*
|
|
||||||
* Has the boot CPU finished it's STARTUP sequence?
|
|
||||||
*/
|
|
||||||
if (cpumask_test_cpu(cpuid, cpu_callout_mask))
|
|
||||||
break;
|
|
||||||
cpu_relax();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!time_before(jiffies, timeout)) {
|
|
||||||
panic("%s: CPU%d started up but did not get a callout!\n",
|
|
||||||
__func__, cpuid);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* the boot CPU has finished the init stage and is spinning
|
* the boot CPU has finished the init stage and is spinning
|
||||||
|
@ -750,8 +718,8 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
|
||||||
unsigned long start_ip = real_mode_header->trampoline_start;
|
unsigned long start_ip = real_mode_header->trampoline_start;
|
||||||
|
|
||||||
unsigned long boot_error = 0;
|
unsigned long boot_error = 0;
|
||||||
int timeout;
|
|
||||||
int cpu0_nmi_registered = 0;
|
int cpu0_nmi_registered = 0;
|
||||||
|
unsigned long timeout;
|
||||||
|
|
||||||
/* Just in case we booted with a single CPU. */
|
/* Just in case we booted with a single CPU. */
|
||||||
alternatives_enable_smp();
|
alternatives_enable_smp();
|
||||||
|
@ -798,6 +766,15 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* AP might wait on cpu_callout_mask in cpu_init() with
|
||||||
|
* cpu_initialized_mask set if previous attempt to online
|
||||||
|
* it timed-out. Clear cpu_initialized_mask so that after
|
||||||
|
* INIT/SIPI it could start with a clean state.
|
||||||
|
*/
|
||||||
|
cpumask_clear_cpu(cpu, cpu_initialized_mask);
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Wake up a CPU in difference cases:
|
* Wake up a CPU in difference cases:
|
||||||
* - Use the method in the APIC driver if it's defined
|
* - Use the method in the APIC driver if it's defined
|
||||||
|
@ -810,58 +787,41 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
|
||||||
boot_error = wakeup_cpu_via_init_nmi(cpu, start_ip, apicid,
|
boot_error = wakeup_cpu_via_init_nmi(cpu, start_ip, apicid,
|
||||||
&cpu0_nmi_registered);
|
&cpu0_nmi_registered);
|
||||||
|
|
||||||
|
|
||||||
if (!boot_error) {
|
if (!boot_error) {
|
||||||
/*
|
/*
|
||||||
* allow APs to start initializing.
|
* Wait 10s total for a response from AP
|
||||||
*/
|
*/
|
||||||
pr_debug("Before Callout %d\n", cpu);
|
boot_error = -1;
|
||||||
cpumask_set_cpu(cpu, cpu_callout_mask);
|
timeout = jiffies + 10*HZ;
|
||||||
pr_debug("After Callout %d\n", cpu);
|
while (time_before(jiffies, timeout)) {
|
||||||
|
if (cpumask_test_cpu(cpu, cpu_initialized_mask)) {
|
||||||
/*
|
/*
|
||||||
* Wait 5s total for a response
|
* Tell AP to proceed with initialization
|
||||||
*/
|
*/
|
||||||
for (timeout = 0; timeout < 50000; timeout++) {
|
cpumask_set_cpu(cpu, cpu_callout_mask);
|
||||||
if (cpumask_test_cpu(cpu, cpu_callin_mask))
|
boot_error = 0;
|
||||||
break; /* It has booted */
|
break;
|
||||||
|
}
|
||||||
udelay(100);
|
udelay(100);
|
||||||
|
schedule();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!boot_error) {
|
||||||
|
/*
|
||||||
|
* Wait till AP completes initial initialization
|
||||||
|
*/
|
||||||
|
while (!cpumask_test_cpu(cpu, cpu_callin_mask)) {
|
||||||
/*
|
/*
|
||||||
* Allow other tasks to run while we wait for the
|
* Allow other tasks to run while we wait for the
|
||||||
* AP to come online. This also gives a chance
|
* AP to come online. This also gives a chance
|
||||||
* for the MTRR work(triggered by the AP coming online)
|
* for the MTRR work(triggered by the AP coming online)
|
||||||
* to be completed in the stop machine context.
|
* to be completed in the stop machine context.
|
||||||
*/
|
*/
|
||||||
|
udelay(100);
|
||||||
schedule();
|
schedule();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
|
|
||||||
print_cpu_msr(&cpu_data(cpu));
|
|
||||||
pr_debug("CPU%d: has booted.\n", cpu);
|
|
||||||
} else {
|
|
||||||
boot_error = 1;
|
|
||||||
if (*trampoline_status == 0xA5A5A5A5)
|
|
||||||
/* trampoline started but...? */
|
|
||||||
pr_err("CPU%d: Stuck ??\n", cpu);
|
|
||||||
else
|
|
||||||
/* trampoline code not run */
|
|
||||||
pr_err("CPU%d: Not responding\n", cpu);
|
|
||||||
if (apic->inquire_remote_apic)
|
|
||||||
apic->inquire_remote_apic(apicid);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (boot_error) {
|
|
||||||
/* Try to put things back the way they were before ... */
|
|
||||||
numa_remove_cpu(cpu); /* was set by numa_add_cpu */
|
|
||||||
|
|
||||||
/* was set by do_boot_cpu() */
|
|
||||||
cpumask_clear_cpu(cpu, cpu_callout_mask);
|
|
||||||
|
|
||||||
/* was set by cpu_init() */
|
|
||||||
cpumask_clear_cpu(cpu, cpu_initialized_mask);
|
|
||||||
|
|
||||||
set_cpu_present(cpu, false);
|
|
||||||
per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* mark "stuck" area as not stuck */
|
/* mark "stuck" area as not stuck */
|
||||||
|
@ -921,7 +881,7 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||||
|
|
||||||
err = do_boot_cpu(apicid, cpu, tidle);
|
err = do_boot_cpu(apicid, cpu, tidle);
|
||||||
if (err) {
|
if (err) {
|
||||||
pr_debug("do_boot_cpu failed %d\n", err);
|
pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -919,6 +919,9 @@ static void __init save_runtime_map(void)
|
||||||
void *tmp, *p, *q = NULL;
|
void *tmp, *p, *q = NULL;
|
||||||
int count = 0;
|
int count = 0;
|
||||||
|
|
||||||
|
if (efi_enabled(EFI_OLD_MEMMAP))
|
||||||
|
return;
|
||||||
|
|
||||||
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
|
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
|
||||||
md = p;
|
md = p;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue