x86: Move memory_setup to x86_init_ops
memory_setup is overridden by x86_quirks and by paravirts with weak functions and quirks. Unify the whole mess and make it an unconditional x86_init_ops function which defaults to the standard function and can be overridden by the early platform code. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
816c25e7d4
commit
6b18ae3e2f
|
@ -126,8 +126,6 @@ extern void e820_reserve_resources(void);
|
|||
extern void e820_reserve_resources_late(void);
|
||||
extern void setup_memory_map(void);
|
||||
extern char *default_machine_specific_memory_setup(void);
|
||||
extern char *machine_specific_memory_setup(void);
|
||||
extern char *memory_setup(void);
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
|
|
|
@ -81,7 +81,6 @@ struct pv_init_ops {
|
|||
|
||||
/* Basic arch-specific setup */
|
||||
void (*arch_setup)(void);
|
||||
char *(*memory_setup)(void);
|
||||
void (*post_allocator_init)(void);
|
||||
|
||||
/* Print a banner to identify the environment */
|
||||
|
|
|
@ -22,7 +22,6 @@ struct x86_quirks {
|
|||
int (*arch_pre_intr_init)(void);
|
||||
int (*arch_intr_init)(void);
|
||||
int (*arch_trap_init)(void);
|
||||
char * (*arch_memory_setup)(void);
|
||||
int (*mach_get_smp_config)(unsigned int early);
|
||||
int (*mach_find_smp_config)(unsigned int reserve);
|
||||
|
||||
|
|
|
@ -7,12 +7,14 @@
|
|||
* @reserve_resources: reserve the standard resources for the
|
||||
* platform
|
||||
* @reserve_ebda_region: reserve the extended bios data area
|
||||
* @memory_setup: platform specific memory setup
|
||||
*
|
||||
*/
|
||||
struct x86_init_resources {
|
||||
void (*probe_roms)(void);
|
||||
void (*reserve_resources)(void);
|
||||
void (*reserve_ebda_region)(void);
|
||||
char *(*memory_setup)(void);
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -260,7 +260,6 @@ static struct x86_quirks numaq_x86_quirks __initdata = {
|
|||
.arch_pre_time_init = numaq_pre_time_init,
|
||||
.arch_time_init = NULL,
|
||||
.arch_pre_intr_init = NULL,
|
||||
.arch_memory_setup = NULL,
|
||||
.arch_intr_init = NULL,
|
||||
.arch_trap_init = NULL,
|
||||
.mach_get_smp_config = NULL,
|
||||
|
|
|
@ -1455,28 +1455,11 @@ char *__init default_machine_specific_memory_setup(void)
|
|||
return who;
|
||||
}
|
||||
|
||||
char *__init __attribute__((weak)) machine_specific_memory_setup(void)
|
||||
{
|
||||
if (x86_quirks->arch_memory_setup) {
|
||||
char *who = x86_quirks->arch_memory_setup();
|
||||
|
||||
if (who)
|
||||
return who;
|
||||
}
|
||||
return default_machine_specific_memory_setup();
|
||||
}
|
||||
|
||||
/* Overridden in paravirt.c if CONFIG_PARAVIRT */
|
||||
char * __init __attribute__((weak)) memory_setup(void)
|
||||
{
|
||||
return machine_specific_memory_setup();
|
||||
}
|
||||
|
||||
void __init setup_memory_map(void)
|
||||
{
|
||||
char *who;
|
||||
|
||||
who = memory_setup();
|
||||
who = x86_init.resources.memory_setup();
|
||||
memcpy(&e820_saved, &e820, sizeof(struct e820map));
|
||||
printk(KERN_INFO "BIOS-provided physical RAM map:\n");
|
||||
e820_print_map(who);
|
||||
|
|
|
@ -60,11 +60,6 @@ static void __init default_banner(void)
|
|||
pv_info.name);
|
||||
}
|
||||
|
||||
char *memory_setup(void)
|
||||
{
|
||||
return pv_init_ops.memory_setup();
|
||||
}
|
||||
|
||||
/* Simple instruction patching code. */
|
||||
#define DEF_NATIVE(ops, name, code) \
|
||||
extern const char start_##ops##_##name[], end_##ops##_##name[]; \
|
||||
|
@ -322,7 +317,6 @@ struct pv_init_ops pv_init_ops = {
|
|||
.patch = native_patch,
|
||||
.banner = default_banner,
|
||||
.arch_setup = paravirt_nop,
|
||||
.memory_setup = machine_specific_memory_setup,
|
||||
};
|
||||
|
||||
struct pv_time_ops pv_time_ops = {
|
||||
|
|
|
@ -239,7 +239,6 @@ static int visws_trap_init(void);
|
|||
static struct x86_quirks visws_x86_quirks __initdata = {
|
||||
.arch_time_init = visws_time_init,
|
||||
.arch_pre_intr_init = visws_pre_intr_init,
|
||||
.arch_memory_setup = visws_memory_setup,
|
||||
.arch_intr_init = NULL,
|
||||
.arch_trap_init = visws_trap_init,
|
||||
.mach_get_smp_config = visws_get_smp_config,
|
||||
|
@ -263,6 +262,8 @@ void __init visws_early_detect(void)
|
|||
*/
|
||||
x86_quirks = &visws_x86_quirks;
|
||||
|
||||
x86_init.resources.memory_setup = visws_memory_setup;
|
||||
|
||||
/*
|
||||
* Install reboot quirks:
|
||||
*/
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
|
||||
#include <asm/bios_ebda.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/e820.h>
|
||||
|
||||
void __cpuinit x86_init_noop(void) { }
|
||||
|
||||
|
@ -20,5 +21,6 @@ struct __initdata x86_init_ops x86_init = {
|
|||
.probe_roms = x86_init_noop,
|
||||
.reserve_resources = reserve_standard_io_resources,
|
||||
.reserve_ebda_region = reserve_ebda_region,
|
||||
.memory_setup = default_machine_specific_memory_setup,
|
||||
},
|
||||
};
|
||||
|
|
|
@ -1270,7 +1270,6 @@ __init void lguest_init(void)
|
|||
pv_irq_ops.safe_halt = lguest_safe_halt;
|
||||
|
||||
/* Setup operations */
|
||||
pv_init_ops.memory_setup = lguest_memory_setup;
|
||||
pv_init_ops.patch = lguest_patch;
|
||||
|
||||
/* Intercepts of various CPU instructions */
|
||||
|
@ -1325,6 +1324,8 @@ __init void lguest_init(void)
|
|||
pv_time_ops.time_init = lguest_time_init;
|
||||
pv_time_ops.get_tsc_khz = lguest_tsc_khz;
|
||||
|
||||
x86_init.resources.memory_setup = lguest_memory_setup;
|
||||
|
||||
/*
|
||||
* Now is a good time to look at the implementations of these functions
|
||||
* before returning to the rest of lguest_init().
|
||||
|
|
|
@ -841,7 +841,6 @@ static const struct pv_init_ops xen_init_ops __initdata = {
|
|||
.patch = xen_patch,
|
||||
|
||||
.banner = xen_banner,
|
||||
.memory_setup = xen_memory_setup,
|
||||
.arch_setup = xen_arch_setup,
|
||||
.post_allocator_init = xen_post_allocator_init,
|
||||
};
|
||||
|
@ -982,6 +981,8 @@ asmlinkage void __init xen_start_kernel(void)
|
|||
pv_apic_ops = xen_apic_ops;
|
||||
pv_mmu_ops = xen_mmu_ops;
|
||||
|
||||
x86_init.resources.memory_setup = xen_memory_setup;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/*
|
||||
* Setup percpu state. We only need to do this for 64-bit
|
||||
|
|
Loading…
Reference in New Issue