Merge git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86
* git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86: x86: avoid section mismatch involving arch_register_cpu x86: fixes for lookup_address args x86: fix sparse warnings in cpu/common.c x86: make early_console static in early_printk.c x86: remove unneeded round_up x86: fix section mismatch warning in kernel/pci-calgary x86: fix section mismatch warning in acpi/boot.c x86: fix section mismatch warnings when referencing notifiers x86: silence section mismatch warning in smpboot_64.c x86: fix comments in vmlinux_64.lds x86_64: make bootmap_start page align v6 x86_64: add debug name for early_res
This commit is contained in:
commit
3e01dfce13
|
@ -3,7 +3,7 @@ OUTPUT_ARCH(i386:x86-64)
|
|||
ENTRY(startup_64)
|
||||
SECTIONS
|
||||
{
|
||||
/* Be careful parts of head_64.S assume startup_64 is at
|
||||
/* Be careful parts of head_64.S assume startup_32 is at
|
||||
* address 0.
|
||||
*/
|
||||
. = 0;
|
||||
|
|
|
@ -496,7 +496,8 @@ EXPORT_SYMBOL(acpi_register_gsi);
|
|||
* ACPI based hotplug support for CPU
|
||||
*/
|
||||
#ifdef CONFIG_ACPI_HOTPLUG_CPU
|
||||
int acpi_map_lsapic(acpi_handle handle, int *pcpu)
|
||||
|
||||
static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu)
|
||||
{
|
||||
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||
union acpi_object *obj;
|
||||
|
@ -551,6 +552,11 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* wrapper to silence section mismatch warning */
|
||||
int __ref acpi_map_lsapic(acpi_handle handle, int *pcpu)
|
||||
{
|
||||
return _acpi_map_lsapic(handle, pcpu);
|
||||
}
|
||||
EXPORT_SYMBOL(acpi_map_lsapic);
|
||||
|
||||
int acpi_unmap_lsapic(int cpu)
|
||||
|
|
|
@ -258,10 +258,10 @@ static int __cpuinit have_cpuid_p(void)
|
|||
void __init cpu_detect(struct cpuinfo_x86 *c)
|
||||
{
|
||||
/* Get vendor name */
|
||||
cpuid(0x00000000, &c->cpuid_level,
|
||||
(int *)&c->x86_vendor_id[0],
|
||||
(int *)&c->x86_vendor_id[8],
|
||||
(int *)&c->x86_vendor_id[4]);
|
||||
cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
|
||||
(unsigned int *)&c->x86_vendor_id[0],
|
||||
(unsigned int *)&c->x86_vendor_id[8],
|
||||
(unsigned int *)&c->x86_vendor_id[4]);
|
||||
|
||||
c->x86 = 4;
|
||||
if (c->cpuid_level >= 0x00000001) {
|
||||
|
@ -283,7 +283,7 @@ void __init cpu_detect(struct cpuinfo_x86 *c)
|
|||
static void __cpuinit early_get_cap(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u32 tfms, xlvl;
|
||||
int ebx;
|
||||
unsigned int ebx;
|
||||
|
||||
memset(&c->x86_capability, 0, sizeof c->x86_capability);
|
||||
if (have_cpuid_p()) {
|
||||
|
@ -343,14 +343,14 @@ static void __init early_cpu_detect(void)
|
|||
static void __cpuinit generic_identify(struct cpuinfo_x86 * c)
|
||||
{
|
||||
u32 tfms, xlvl;
|
||||
int ebx;
|
||||
unsigned int ebx;
|
||||
|
||||
if (have_cpuid_p()) {
|
||||
/* Get vendor name */
|
||||
cpuid(0x00000000, &c->cpuid_level,
|
||||
(int *)&c->x86_vendor_id[0],
|
||||
(int *)&c->x86_vendor_id[8],
|
||||
(int *)&c->x86_vendor_id[4]);
|
||||
cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
|
||||
(unsigned int *)&c->x86_vendor_id[0],
|
||||
(unsigned int *)&c->x86_vendor_id[8],
|
||||
(unsigned int *)&c->x86_vendor_id[4]);
|
||||
|
||||
get_cpu_vendor(c, 0);
|
||||
/* Initialize the standard set of capabilities */
|
||||
|
|
|
@ -170,7 +170,7 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
|
|||
return err ? NOTIFY_BAD : NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block __cpuinitdata cpuid_class_cpu_notifier =
|
||||
static struct notifier_block __refdata cpuid_class_cpu_notifier =
|
||||
{
|
||||
.notifier_call = cpuid_class_cpu_callback,
|
||||
};
|
||||
|
|
|
@ -54,30 +54,33 @@ static unsigned long __initdata end_user_pfn = MAXMEM>>PAGE_SHIFT;
|
|||
|
||||
struct early_res {
|
||||
unsigned long start, end;
|
||||
char name[16];
|
||||
};
|
||||
static struct early_res early_res[MAX_EARLY_RES] __initdata = {
|
||||
{ 0, PAGE_SIZE }, /* BIOS data page */
|
||||
{ 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
|
||||
#ifdef CONFIG_SMP
|
||||
{ SMP_TRAMPOLINE_BASE, SMP_TRAMPOLINE_BASE + 2*PAGE_SIZE },
|
||||
{ SMP_TRAMPOLINE_BASE, SMP_TRAMPOLINE_BASE + 2*PAGE_SIZE, "SMP_TRAMPOLINE" },
|
||||
#endif
|
||||
{}
|
||||
};
|
||||
|
||||
void __init reserve_early(unsigned long start, unsigned long end)
|
||||
void __init reserve_early(unsigned long start, unsigned long end, char *name)
|
||||
{
|
||||
int i;
|
||||
struct early_res *r;
|
||||
for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
|
||||
r = &early_res[i];
|
||||
if (end > r->start && start < r->end)
|
||||
panic("Overlapping early reservations %lx-%lx to %lx-%lx\n",
|
||||
start, end, r->start, r->end);
|
||||
panic("Overlapping early reservations %lx-%lx %s to %lx-%lx %s\n",
|
||||
start, end - 1, name?name:"", r->start, r->end - 1, r->name);
|
||||
}
|
||||
if (i >= MAX_EARLY_RES)
|
||||
panic("Too many early reservations");
|
||||
r = &early_res[i];
|
||||
r->start = start;
|
||||
r->end = end;
|
||||
if (name)
|
||||
strncpy(r->name, name, sizeof(r->name) - 1);
|
||||
}
|
||||
|
||||
void __init early_res_to_bootmem(void)
|
||||
|
@ -85,6 +88,8 @@ void __init early_res_to_bootmem(void)
|
|||
int i;
|
||||
for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
|
||||
struct early_res *r = &early_res[i];
|
||||
printk(KERN_INFO "early res: %d [%lx-%lx] %s\n", i,
|
||||
r->start, r->end - 1, r->name);
|
||||
reserve_bootmem_generic(r->start, r->end - r->start);
|
||||
}
|
||||
}
|
||||
|
@ -166,12 +171,13 @@ int __init e820_all_mapped(unsigned long start, unsigned long end,
|
|||
}
|
||||
|
||||
/*
|
||||
* Find a free area in a specific range.
|
||||
* Find a free area with specified alignment in a specific range.
|
||||
*/
|
||||
unsigned long __init find_e820_area(unsigned long start, unsigned long end,
|
||||
unsigned size)
|
||||
unsigned size, unsigned long align)
|
||||
{
|
||||
int i;
|
||||
unsigned long mask = ~(align - 1);
|
||||
|
||||
for (i = 0; i < e820.nr_map; i++) {
|
||||
struct e820entry *ei = &e820.map[i];
|
||||
|
@ -185,7 +191,8 @@ unsigned long __init find_e820_area(unsigned long start, unsigned long end,
|
|||
continue;
|
||||
while (bad_addr(&addr, size) && addr+size <= ei->addr+ei->size)
|
||||
;
|
||||
last = PAGE_ALIGN(addr) + size;
|
||||
addr = (addr + align - 1) & mask;
|
||||
last = addr + size;
|
||||
if (last > ei->addr + ei->size)
|
||||
continue;
|
||||
if (last > end)
|
||||
|
|
|
@ -193,7 +193,7 @@ static struct console simnow_console = {
|
|||
};
|
||||
|
||||
/* Direct interface for emergencies */
|
||||
struct console *early_console = &early_vga_console;
|
||||
static struct console *early_console = &early_vga_console;
|
||||
static int early_console_initialized = 0;
|
||||
|
||||
void early_printk(const char *fmt, ...)
|
||||
|
|
|
@ -44,7 +44,7 @@ static void __init early_mapping_set_exec(unsigned long start,
|
|||
int executable)
|
||||
{
|
||||
pte_t *kpte;
|
||||
int level;
|
||||
unsigned int level;
|
||||
|
||||
while (start < end) {
|
||||
kpte = lookup_address((unsigned long)__va(start), &level);
|
||||
|
|
|
@ -75,7 +75,7 @@ static __init void reserve_ebda(void)
|
|||
if (ebda_size > 64*1024)
|
||||
ebda_size = 64*1024;
|
||||
|
||||
reserve_early(ebda_addr, ebda_addr + ebda_size);
|
||||
reserve_early(ebda_addr, ebda_addr + ebda_size, "EBDA");
|
||||
}
|
||||
|
||||
void __init x86_64_start_kernel(char * real_mode_data)
|
||||
|
@ -105,14 +105,14 @@ void __init x86_64_start_kernel(char * real_mode_data)
|
|||
pda_init(0);
|
||||
copy_bootdata(__va(real_mode_data));
|
||||
|
||||
reserve_early(__pa_symbol(&_text), __pa_symbol(&_end));
|
||||
reserve_early(__pa_symbol(&_text), __pa_symbol(&_end), "TEXT DATA BSS");
|
||||
|
||||
/* Reserve INITRD */
|
||||
if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
|
||||
unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
|
||||
unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
|
||||
unsigned long ramdisk_end = ramdisk_image + ramdisk_size;
|
||||
reserve_early(ramdisk_image, ramdisk_end);
|
||||
reserve_early(ramdisk_image, ramdisk_end, "RAMDISK");
|
||||
}
|
||||
|
||||
reserve_ebda();
|
||||
|
|
|
@ -797,7 +797,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
|
|||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block __cpuinitdata mc_cpu_notifier = {
|
||||
static struct notifier_block __refdata mc_cpu_notifier = {
|
||||
.notifier_call = mc_cpu_callback,
|
||||
};
|
||||
|
||||
|
|
|
@ -168,7 +168,7 @@ static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
|
|||
return err ? NOTIFY_BAD : NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block __cpuinitdata msr_class_cpu_notifier = {
|
||||
static struct notifier_block __refdata msr_class_cpu_notifier = {
|
||||
.notifier_call = msr_class_cpu_callback,
|
||||
};
|
||||
|
||||
|
|
|
@ -1006,7 +1006,7 @@ static void __init calgary_set_split_completion_timeout(void __iomem *bbar,
|
|||
readq(target); /* flush */
|
||||
}
|
||||
|
||||
static void calioc2_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev)
|
||||
static void __init calioc2_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev)
|
||||
{
|
||||
unsigned char busnum = dev->bus->number;
|
||||
void __iomem *bbar = tbl->bbar;
|
||||
|
@ -1022,7 +1022,7 @@ static void calioc2_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev)
|
|||
writel(cpu_to_be32(val), target);
|
||||
}
|
||||
|
||||
static void calgary_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev)
|
||||
static void __init calgary_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev)
|
||||
{
|
||||
unsigned char busnum = dev->bus->number;
|
||||
|
||||
|
|
|
@ -182,7 +182,8 @@ contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
|
|||
unsigned long bootmap_size, bootmap;
|
||||
|
||||
bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
|
||||
bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
|
||||
bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size,
|
||||
PAGE_SIZE);
|
||||
if (bootmap == -1L)
|
||||
panic("Cannot find bootmem map of size %ld\n", bootmap_size);
|
||||
bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
|
||||
|
|
|
@ -1019,7 +1019,7 @@ static void remove_siblinginfo(int cpu)
|
|||
cpu_clear(cpu, cpu_sibling_setup_map);
|
||||
}
|
||||
|
||||
void remove_cpu_from_maps(void)
|
||||
static void __ref remove_cpu_from_maps(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
|
|
|
@ -57,11 +57,10 @@ void arch_unregister_cpu(int num)
|
|||
}
|
||||
EXPORT_SYMBOL(arch_unregister_cpu);
|
||||
#else
|
||||
int arch_register_cpu(int num)
|
||||
static int __init arch_register_cpu(int num)
|
||||
{
|
||||
return register_cpu(&per_cpu(cpu_devices, num).cpu, num);
|
||||
}
|
||||
EXPORT_SYMBOL(arch_register_cpu);
|
||||
#endif /*CONFIG_HOTPLUG_CPU*/
|
||||
|
||||
static int __init topology_init(void)
|
||||
|
|
|
@ -382,7 +382,7 @@ static void show_fault_oops(struct pt_regs *regs, unsigned long error_code,
|
|||
|
||||
#ifdef CONFIG_X86_PAE
|
||||
if (error_code & PF_INSTR) {
|
||||
int level;
|
||||
unsigned int level;
|
||||
pte_t *pte = lookup_address(address, &level);
|
||||
|
||||
if (pte && pte_present(*pte) && !pte_exec(*pte))
|
||||
|
|
|
@ -354,17 +354,10 @@ static void __init find_early_table_space(unsigned long end)
|
|||
* need roughly 0.5KB per GB.
|
||||
*/
|
||||
start = 0x8000;
|
||||
table_start = find_e820_area(start, end, tables);
|
||||
table_start = find_e820_area(start, end, tables, PAGE_SIZE);
|
||||
if (table_start == -1UL)
|
||||
panic("Cannot find space for the kernel page tables");
|
||||
|
||||
/*
|
||||
* When you have a lot of RAM like 256GB, early_table will not fit
|
||||
* into 0x8000 range, find_e820_area() will find area after kernel
|
||||
* bss but the table_start is not page aligned, so need to round it
|
||||
* up to avoid overlap with bss:
|
||||
*/
|
||||
table_start = round_up(table_start, PAGE_SIZE);
|
||||
table_start >>= PAGE_SHIFT;
|
||||
table_end = table_start;
|
||||
|
||||
|
@ -420,7 +413,9 @@ void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
|
|||
mmu_cr4_features = read_cr4();
|
||||
__flush_tlb_all();
|
||||
|
||||
reserve_early(table_start << PAGE_SHIFT, table_end << PAGE_SHIFT);
|
||||
if (!after_bootmem)
|
||||
reserve_early(table_start << PAGE_SHIFT,
|
||||
table_end << PAGE_SHIFT, "PGTABLE");
|
||||
}
|
||||
|
||||
#ifndef CONFIG_NUMA
|
||||
|
|
|
@ -75,7 +75,8 @@ static int ioremap_change_attr(unsigned long paddr, unsigned long size,
|
|||
{
|
||||
unsigned long vaddr = (unsigned long)__va(paddr);
|
||||
unsigned long nrpages = size >> PAGE_SHIFT;
|
||||
int err, level;
|
||||
unsigned int level;
|
||||
int err;
|
||||
|
||||
/* No change for pages after the last mapping */
|
||||
if ((paddr + size - 1) >= (max_pfn_mapped << PAGE_SHIFT))
|
||||
|
|
|
@ -84,26 +84,24 @@ static int __init populate_memnodemap(const struct bootnode *nodes,
|
|||
|
||||
static int __init allocate_cachealigned_memnodemap(void)
|
||||
{
|
||||
unsigned long pad, pad_addr;
|
||||
unsigned long addr;
|
||||
|
||||
memnodemap = memnode.embedded_map;
|
||||
if (memnodemapsize <= ARRAY_SIZE(memnode.embedded_map))
|
||||
return 0;
|
||||
|
||||
pad = L1_CACHE_BYTES - 1;
|
||||
pad_addr = 0x8000;
|
||||
nodemap_size = pad + sizeof(s16) * memnodemapsize;
|
||||
nodemap_addr = find_e820_area(pad_addr, end_pfn<<PAGE_SHIFT,
|
||||
nodemap_size);
|
||||
addr = 0x8000;
|
||||
nodemap_size = round_up(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES);
|
||||
nodemap_addr = find_e820_area(addr, end_pfn<<PAGE_SHIFT,
|
||||
nodemap_size, L1_CACHE_BYTES);
|
||||
if (nodemap_addr == -1UL) {
|
||||
printk(KERN_ERR
|
||||
"NUMA: Unable to allocate Memory to Node hash map\n");
|
||||
nodemap_addr = nodemap_size = 0;
|
||||
return -1;
|
||||
}
|
||||
pad_addr = (nodemap_addr + pad) & ~pad;
|
||||
memnodemap = phys_to_virt(pad_addr);
|
||||
reserve_early(nodemap_addr, nodemap_addr + nodemap_size);
|
||||
memnodemap = phys_to_virt(nodemap_addr);
|
||||
reserve_early(nodemap_addr, nodemap_addr + nodemap_size, "MEMNODEMAP");
|
||||
|
||||
printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
|
||||
nodemap_addr, nodemap_addr + nodemap_size);
|
||||
|
@ -164,15 +162,16 @@ int early_pfn_to_nid(unsigned long pfn)
|
|||
}
|
||||
|
||||
static void * __init early_node_mem(int nodeid, unsigned long start,
|
||||
unsigned long end, unsigned long size)
|
||||
unsigned long end, unsigned long size,
|
||||
unsigned long align)
|
||||
{
|
||||
unsigned long mem = find_e820_area(start, end, size);
|
||||
unsigned long mem = find_e820_area(start, end, size, align);
|
||||
void *ptr;
|
||||
|
||||
if (mem != -1L)
|
||||
return __va(mem);
|
||||
ptr = __alloc_bootmem_nopanic(size,
|
||||
SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS));
|
||||
|
||||
ptr = __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS));
|
||||
if (ptr == NULL) {
|
||||
printk(KERN_ERR "Cannot find %lu bytes in node %d\n",
|
||||
size, nodeid);
|
||||
|
@ -198,7 +197,8 @@ void __init setup_node_bootmem(int nodeid, unsigned long start,
|
|||
start_pfn = start >> PAGE_SHIFT;
|
||||
end_pfn = end >> PAGE_SHIFT;
|
||||
|
||||
node_data[nodeid] = early_node_mem(nodeid, start, end, pgdat_size);
|
||||
node_data[nodeid] = early_node_mem(nodeid, start, end, pgdat_size,
|
||||
SMP_CACHE_BYTES);
|
||||
if (node_data[nodeid] == NULL)
|
||||
return;
|
||||
nodedata_phys = __pa(node_data[nodeid]);
|
||||
|
@ -211,8 +211,12 @@ void __init setup_node_bootmem(int nodeid, unsigned long start,
|
|||
/* Find a place for the bootmem map */
|
||||
bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
|
||||
bootmap_start = round_up(nodedata_phys + pgdat_size, PAGE_SIZE);
|
||||
/*
|
||||
* SMP_CAHCE_BYTES could be enough, but init_bootmem_node like
|
||||
* to use that to align to PAGE_SIZE
|
||||
*/
|
||||
bootmap = early_node_mem(nodeid, bootmap_start, end,
|
||||
bootmap_pages<<PAGE_SHIFT);
|
||||
bootmap_pages<<PAGE_SHIFT, PAGE_SIZE);
|
||||
if (bootmap == NULL) {
|
||||
if (nodedata_phys < start || nodedata_phys >= end)
|
||||
free_bootmem((unsigned long)node_data[nodeid],
|
||||
|
|
|
@ -42,7 +42,7 @@ static __init int print_split(struct split_state *s)
|
|||
s->max_exec = 0;
|
||||
for (i = 0; i < max_pfn_mapped; ) {
|
||||
unsigned long addr = (unsigned long)__va(i << PAGE_SHIFT);
|
||||
int level;
|
||||
unsigned int level;
|
||||
pte_t *pte;
|
||||
|
||||
pte = lookup_address(addr, &level);
|
||||
|
@ -106,7 +106,7 @@ static __init int exercise_pageattr(void)
|
|||
unsigned long *bm;
|
||||
pte_t *pte, pte0;
|
||||
int failed = 0;
|
||||
int level;
|
||||
unsigned int level;
|
||||
int i, k;
|
||||
int err;
|
||||
|
||||
|
|
|
@ -10,8 +10,9 @@
|
|||
struct x86_cpu {
|
||||
struct cpu cpu;
|
||||
};
|
||||
extern int arch_register_cpu(int num);
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
extern int arch_register_cpu(int num);
|
||||
extern void arch_unregister_cpu(int);
|
||||
#endif
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
|
||||
#ifndef __ASSEMBLY__
|
||||
extern unsigned long find_e820_area(unsigned long start, unsigned long end,
|
||||
unsigned size);
|
||||
unsigned size, unsigned long align);
|
||||
extern void add_memory_region(unsigned long start, unsigned long size,
|
||||
int type);
|
||||
extern void setup_memory_region(void);
|
||||
|
@ -41,7 +41,7 @@ extern void finish_e820_parsing(void);
|
|||
extern struct e820map e820;
|
||||
extern void update_e820(void);
|
||||
|
||||
extern void reserve_early(unsigned long start, unsigned long end);
|
||||
extern void reserve_early(unsigned long start, unsigned long end, char *name);
|
||||
extern void early_res_to_bootmem(void);
|
||||
|
||||
#endif/*!__ASSEMBLY__*/
|
||||
|
|
Loading…
Reference in New Issue