Merge branch 'x86/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Peter Anvin: "This series fixes a couple of build failures, and fixes MTRR cleanup and memory setup on very specific memory maps. Finally, it fixes triggering backtraces on all CPUs, which was inadvertently disabled on x86." * 'x86/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/efi: Fix dummy variable buffer allocation x86: Fix trigger_all_cpu_backtrace() implementation x86: Fix section mismatch on load_ucode_ap x86: fix build error and kconfig for ia32_emulation and binfmt range: Do not add new blank slot with add_range_with_merge x86, mtrr: Fix original mtrr range get for mtrr_cleanup
This commit is contained in:
commit
f71194a7d4
|
@ -2265,6 +2265,7 @@ source "fs/Kconfig.binfmt"
|
|||
config IA32_EMULATION
|
||||
bool "IA32 Emulation"
|
||||
depends on X86_64
|
||||
select BINFMT_ELF
|
||||
select COMPAT_BINFMT_ELF
|
||||
select HAVE_UID16
|
||||
---help---
|
||||
|
|
|
@ -41,4 +41,9 @@ extern int vector_used_by_percpu_irq(unsigned int vector);
|
|||
|
||||
extern void init_ISA_irqs(void);
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
void arch_trigger_all_cpu_backtrace(void);
|
||||
#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_IRQ_H */
|
||||
|
|
|
@ -60,11 +60,11 @@ static inline void __exit exit_amd_microcode(void) {}
|
|||
#ifdef CONFIG_MICROCODE_EARLY
|
||||
#define MAX_UCODE_COUNT 128
|
||||
extern void __init load_ucode_bsp(void);
|
||||
extern __init void load_ucode_ap(void);
|
||||
extern void __cpuinit load_ucode_ap(void);
|
||||
extern int __init save_microcode_in_initrd(void);
|
||||
#else
|
||||
static inline void __init load_ucode_bsp(void) {}
|
||||
static inline __init void load_ucode_ap(void) {}
|
||||
static inline void __cpuinit load_ucode_ap(void) {}
|
||||
static inline int __init save_microcode_in_initrd(void)
|
||||
{
|
||||
return 0;
|
||||
|
|
|
@ -18,9 +18,7 @@ extern int proc_nmi_enabled(struct ctl_table *, int ,
|
|||
void __user *, size_t *, loff_t *);
|
||||
extern int unknown_nmi_panic;
|
||||
|
||||
void arch_trigger_all_cpu_backtrace(void);
|
||||
#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
|
||||
#endif
|
||||
#endif /* CONFIG_X86_LOCAL_APIC */
|
||||
|
||||
#define NMI_FLAG_FIRST 1
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
*
|
||||
*/
|
||||
#include <asm/apic.h>
|
||||
#include <asm/nmi.h>
|
||||
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/kdebug.h>
|
||||
|
|
|
@ -714,15 +714,15 @@ int __init mtrr_cleanup(unsigned address_bits)
|
|||
if (mtrr_tom2)
|
||||
x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base;
|
||||
|
||||
nr_range = x86_get_mtrr_mem_range(range, 0, x_remove_base, x_remove_size);
|
||||
/*
|
||||
* [0, 1M) should always be covered by var mtrr with WB
|
||||
* and fixed mtrrs should take effect before var mtrr for it:
|
||||
*/
|
||||
nr_range = add_range_with_merge(range, RANGE_NUM, nr_range, 0,
|
||||
nr_range = add_range_with_merge(range, RANGE_NUM, 0, 0,
|
||||
1ULL<<(20 - PAGE_SHIFT));
|
||||
/* Sort the ranges: */
|
||||
sort_range(range, nr_range);
|
||||
/* add from var mtrr at last */
|
||||
nr_range = x86_get_mtrr_mem_range(range, nr_range,
|
||||
x_remove_base, x_remove_size);
|
||||
|
||||
range_sums = sum_ranges(range, nr_range);
|
||||
printk(KERN_INFO "total RAM covered: %ldM\n",
|
||||
|
|
|
@ -1069,7 +1069,10 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
|
|||
* that by attempting to use more space than is available.
|
||||
*/
|
||||
unsigned long dummy_size = remaining_size + 1024;
|
||||
void *dummy = kmalloc(dummy_size, GFP_ATOMIC);
|
||||
void *dummy = kzalloc(dummy_size, GFP_ATOMIC);
|
||||
|
||||
if (!dummy)
|
||||
return EFI_OUT_OF_RESOURCES;
|
||||
|
||||
status = efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
|
||||
EFI_VARIABLE_NON_VOLATILE |
|
||||
|
@ -1089,6 +1092,8 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
|
|||
0, dummy);
|
||||
}
|
||||
|
||||
kfree(dummy);
|
||||
|
||||
/*
|
||||
* The runtime code may now have triggered a garbage collection
|
||||
* run, so check the variable info again
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/sort.h>
|
||||
|
||||
#include <linux/string.h>
|
||||
#include <linux/range.h>
|
||||
|
||||
int add_range(struct range *range, int az, int nr_range, u64 start, u64 end)
|
||||
|
@ -32,9 +32,8 @@ int add_range_with_merge(struct range *range, int az, int nr_range,
|
|||
if (start >= end)
|
||||
return nr_range;
|
||||
|
||||
/* Try to merge it with old one: */
|
||||
/* get new start/end: */
|
||||
for (i = 0; i < nr_range; i++) {
|
||||
u64 final_start, final_end;
|
||||
u64 common_start, common_end;
|
||||
|
||||
if (!range[i].end)
|
||||
|
@ -45,14 +44,16 @@ int add_range_with_merge(struct range *range, int az, int nr_range,
|
|||
if (common_start > common_end)
|
||||
continue;
|
||||
|
||||
final_start = min(range[i].start, start);
|
||||
final_end = max(range[i].end, end);
|
||||
/* new start/end, will add it back at last */
|
||||
start = min(range[i].start, start);
|
||||
end = max(range[i].end, end);
|
||||
|
||||
/* clear it and add it back for further merge */
|
||||
range[i].start = 0;
|
||||
range[i].end = 0;
|
||||
return add_range_with_merge(range, az, nr_range,
|
||||
final_start, final_end);
|
||||
memmove(&range[i], &range[i + 1],
|
||||
(nr_range - (i + 1)) * sizeof(range[i]));
|
||||
range[nr_range - 1].start = 0;
|
||||
range[nr_range - 1].end = 0;
|
||||
nr_range--;
|
||||
i--;
|
||||
}
|
||||
|
||||
/* Need to add it: */
|
||||
|
|
Loading…
Reference in New Issue