[PATCH] hotplug CPU: clean up hotcpu_notifier() use
There was lots of #ifdef noise in the kernel due to hotcpu_notifier(fn, prio) not correctly marking 'fn' as used in the !HOTPLUG_CPU case, and thus generating compiler warnings of unused symbols, hence forcing people to add #ifdefs. the compiler can skip truly unused functions just fine: text data bss dec hex filename 1624412 728710 3674856 6027978 5bfaca vmlinux.before 1624412 728710 3674856 6027978 5bfaca vmlinux.after [akpm@osdl.org: topology.c fix] Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
a38a44c1a9
commit
0231606785
|
@ -116,7 +116,6 @@ static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev)
|
|||
return sysfs_create_group(&sys_dev->kobj, &thermal_throttle_attr_group);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static __cpuinit void thermal_throttle_remove_dev(struct sys_device *sys_dev)
|
||||
{
|
||||
return sysfs_remove_group(&sys_dev->kobj, &thermal_throttle_attr_group);
|
||||
|
@ -153,7 +152,6 @@ static struct notifier_block thermal_throttle_cpu_notifier =
|
|||
{
|
||||
.notifier_call = thermal_throttle_cpu_callback,
|
||||
};
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
static __init int thermal_throttle_init_device(void)
|
||||
{
|
||||
|
|
|
@ -167,7 +167,6 @@ static int cpuid_device_create(int i)
|
|||
return err;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static int cpuid_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
||||
{
|
||||
unsigned int cpu = (unsigned long)hcpu;
|
||||
|
@ -187,7 +186,6 @@ static struct notifier_block __cpuinitdata cpuid_class_cpu_notifier =
|
|||
{
|
||||
.notifier_call = cpuid_class_cpu_callback,
|
||||
};
|
||||
#endif /* !CONFIG_HOTPLUG_CPU */
|
||||
|
||||
static int __init cpuid_init(void)
|
||||
{
|
||||
|
|
|
@ -703,7 +703,6 @@ static struct sysdev_driver mc_sysdev_driver = {
|
|||
.resume = mc_sysdev_resume,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static __cpuinit int
|
||||
mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
|
||||
{
|
||||
|
@ -726,7 +725,6 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
|
|||
static struct notifier_block mc_cpu_notifier = {
|
||||
.notifier_call = mc_cpu_callback,
|
||||
};
|
||||
#endif
|
||||
|
||||
static int __init microcode_init (void)
|
||||
{
|
||||
|
|
|
@ -250,7 +250,6 @@ static int msr_device_create(int i)
|
|||
return err;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static int msr_class_cpu_callback(struct notifier_block *nfb,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
|
@ -271,7 +270,6 @@ static struct notifier_block __cpuinitdata msr_class_cpu_notifier =
|
|||
{
|
||||
.notifier_call = msr_class_cpu_callback,
|
||||
};
|
||||
#endif
|
||||
|
||||
static int __init msr_init(void)
|
||||
{
|
||||
|
|
|
@ -952,7 +952,6 @@ remove_palinfo_proc_entries(unsigned int hcpu)
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static int palinfo_cpu_callback(struct notifier_block *nfb,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
|
@ -974,7 +973,6 @@ static struct notifier_block palinfo_cpu_notifier =
|
|||
.notifier_call = palinfo_cpu_callback,
|
||||
.priority = 0,
|
||||
};
|
||||
#endif
|
||||
|
||||
static int __init
|
||||
palinfo_init(void)
|
||||
|
|
|
@ -575,7 +575,6 @@ static struct file_operations salinfo_data_fops = {
|
|||
.write = salinfo_log_write,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static int __devinit
|
||||
salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
|
||||
{
|
||||
|
@ -620,7 +619,6 @@ static struct notifier_block salinfo_cpu_notifier =
|
|||
.notifier_call = salinfo_cpu_callback,
|
||||
.priority = 0,
|
||||
};
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
static int __init
|
||||
salinfo_init(void)
|
||||
|
|
|
@ -561,7 +561,6 @@ appldata_offline_cpu(int cpu)
|
|||
spin_unlock(&appldata_timer_lock);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static int __cpuinit
|
||||
appldata_cpu_notify(struct notifier_block *self,
|
||||
unsigned long action, void *hcpu)
|
||||
|
@ -582,7 +581,6 @@ appldata_cpu_notify(struct notifier_block *self,
|
|||
static struct notifier_block appldata_nb = {
|
||||
.notifier_call = appldata_cpu_notify,
|
||||
};
|
||||
#endif
|
||||
|
||||
/*
|
||||
* appldata_init()
|
||||
|
|
|
@ -641,7 +641,6 @@ static __cpuinit int mce_create_device(unsigned int cpu)
|
|||
return err;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static void mce_remove_device(unsigned int cpu)
|
||||
{
|
||||
int i;
|
||||
|
@ -674,7 +673,6 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
|||
static struct notifier_block mce_cpu_notifier = {
|
||||
.notifier_call = mce_cpu_callback,
|
||||
};
|
||||
#endif
|
||||
|
||||
static __init int mce_init_device(void)
|
||||
{
|
||||
|
|
|
@ -551,7 +551,6 @@ out:
|
|||
return err;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
/*
|
||||
* let's be hotplug friendly.
|
||||
* in case of multiple core processors, the first core always takes ownership
|
||||
|
@ -594,12 +593,14 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
|
|||
|
||||
sprintf(name, "threshold_bank%i", bank);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* sibling symlink */
|
||||
if (shared_bank[bank] && b->blocks->cpu != cpu) {
|
||||
sysfs_remove_link(&per_cpu(device_mce, cpu).kobj, name);
|
||||
per_cpu(threshold_banks, cpu)[bank] = NULL;
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* remove all sibling symlinks before unregistering */
|
||||
for_each_cpu_mask(i, b->cpus) {
|
||||
|
@ -656,7 +657,6 @@ static int threshold_cpu_callback(struct notifier_block *nfb,
|
|||
static struct notifier_block threshold_cpu_notifier = {
|
||||
.notifier_call = threshold_cpu_callback,
|
||||
};
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
static __init int threshold_init_device(void)
|
||||
{
|
||||
|
|
|
@ -275,7 +275,6 @@ static void __cpuinit cpu_vsyscall_init(void *arg)
|
|||
vsyscall_set_cpu(raw_smp_processor_id());
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static int __cpuinit
|
||||
cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
|
||||
{
|
||||
|
@ -284,7 +283,6 @@ cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
|
|||
smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1);
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void __init map_vsyscall(void)
|
||||
{
|
||||
|
|
|
@ -3459,8 +3459,6 @@ static void blk_done_softirq(struct softirq_action *h)
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
|
||||
static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
|
||||
void *hcpu)
|
||||
{
|
||||
|
@ -3486,8 +3484,6 @@ static struct notifier_block __devinitdata blk_cpu_notifier = {
|
|||
.notifier_call = blk_cpu_notify,
|
||||
};
|
||||
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
/**
|
||||
* blk_complete_request - end I/O on a request
|
||||
* @req: the request being processed
|
||||
|
|
|
@ -108,7 +108,6 @@ static int __cpuinit topology_add_dev(unsigned int cpu)
|
|||
return rc;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static void __cpuinit topology_remove_dev(unsigned int cpu)
|
||||
{
|
||||
struct sys_device *sys_dev = get_cpu_sysdev(cpu);
|
||||
|
@ -136,7 +135,6 @@ static int __cpuinit topology_cpu_callback(struct notifier_block *nfb,
|
|||
}
|
||||
return rc ? NOTIFY_BAD : NOTIFY_OK;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int __cpuinit topology_sysfs_init(void)
|
||||
{
|
||||
|
|
|
@ -1537,7 +1537,6 @@ int cpufreq_update_policy(unsigned int cpu)
|
|||
}
|
||||
EXPORT_SYMBOL(cpufreq_update_policy);
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static int cpufreq_cpu_callback(struct notifier_block *nfb,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
|
@ -1577,7 +1576,6 @@ static struct notifier_block __cpuinitdata cpufreq_cpu_notifier =
|
|||
{
|
||||
.notifier_call = cpufreq_cpu_callback,
|
||||
};
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
/*********************************************************************
|
||||
* REGISTER / UNREGISTER CPUFREQ DRIVER *
|
||||
|
|
|
@ -2972,7 +2972,6 @@ init_buffer_head(void *data, struct kmem_cache *cachep, unsigned long flags)
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static void buffer_exit_cpu(int cpu)
|
||||
{
|
||||
int i;
|
||||
|
@ -2994,7 +2993,6 @@ static int buffer_cpu_notify(struct notifier_block *self,
|
|||
buffer_exit_cpu((unsigned long)hcpu);
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
void __init buffer_init(void)
|
||||
{
|
||||
|
|
|
@ -89,9 +89,9 @@ int cpu_down(unsigned int cpu);
|
|||
#define lock_cpu_hotplug() do { } while (0)
|
||||
#define unlock_cpu_hotplug() do { } while (0)
|
||||
#define lock_cpu_hotplug_interruptible() 0
|
||||
#define hotcpu_notifier(fn, pri) do { } while (0)
|
||||
#define register_hotcpu_notifier(nb) do { } while (0)
|
||||
#define unregister_hotcpu_notifier(nb) do { } while (0)
|
||||
#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
|
||||
#define register_hotcpu_notifier(nb) do { (void)(nb); } while (0)
|
||||
#define unregister_hotcpu_notifier(nb) do { (void)(nb); } while (0)
|
||||
|
||||
/* CPUs don't go offline once they're online w/o CONFIG_HOTPLUG_CPU */
|
||||
static inline int cpu_is_offline(int cpu) { return 0; }
|
||||
|
|
|
@ -2044,7 +2044,6 @@ out:
|
|||
return err;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_MEMORY_HOTPLUG)
|
||||
/*
|
||||
* If common_cpu_mem_hotplug_unplug(), below, unplugs any CPUs
|
||||
* or memory nodes, we need to walk over the cpuset hierarchy,
|
||||
|
@ -2108,9 +2107,7 @@ static void common_cpu_mem_hotplug_unplug(void)
|
|||
mutex_unlock(&callback_mutex);
|
||||
mutex_unlock(&manage_mutex);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
/*
|
||||
* The top_cpuset tracks what CPUs and Memory Nodes are online,
|
||||
* period. This is necessary in order to make cpusets transparent
|
||||
|
@ -2127,7 +2124,6 @@ static int cpuset_handle_cpuhp(struct notifier_block *nb,
|
|||
common_cpu_mem_hotplug_unplug();
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
/*
|
||||
|
|
|
@ -319,7 +319,6 @@ out:
|
|||
put_cpu();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static int __devinit profile_cpu_callback(struct notifier_block *info,
|
||||
unsigned long action, void *__cpu)
|
||||
{
|
||||
|
@ -372,10 +371,10 @@ static int __devinit profile_cpu_callback(struct notifier_block *info,
|
|||
}
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
#else /* !CONFIG_SMP */
|
||||
#define profile_flip_buffers() do { } while (0)
|
||||
#define profile_discard_flip_buffers() do { } while (0)
|
||||
#define profile_cpu_callback NULL
|
||||
|
||||
void profile_hits(int type, void *__pc, unsigned int nr_hits)
|
||||
{
|
||||
|
|
|
@ -6740,8 +6740,6 @@ SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show,
|
|||
sched_smt_power_savings_store);
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
/*
|
||||
* Force a reinitialization of the sched domains hierarchy. The domains
|
||||
* and groups cannot be updated in place without racing with the balancing
|
||||
|
@ -6774,7 +6772,6 @@ static int update_sched_domains(struct notifier_block *nfb,
|
|||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
#endif
|
||||
|
||||
void __init sched_init_smp(void)
|
||||
{
|
||||
|
|
|
@ -655,7 +655,6 @@ int current_is_keventd(void)
|
|||
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
/* Take the work from this (downed) CPU. */
|
||||
static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
|
||||
{
|
||||
|
@ -738,7 +737,6 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
|
|||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
#endif
|
||||
|
||||
void init_workqueues(void)
|
||||
{
|
||||
|
|
|
@ -996,7 +996,6 @@ static __init void radix_tree_init_maxindex(void)
|
|||
height_to_maxindex[i] = __maxindex(i);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static int radix_tree_callback(struct notifier_block *nfb,
|
||||
unsigned long action,
|
||||
void *hcpu)
|
||||
|
@ -1016,7 +1015,6 @@ static int radix_tree_callback(struct notifier_block *nfb,
|
|||
}
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
void __init radix_tree_init(void)
|
||||
{
|
||||
|
|
|
@ -701,7 +701,6 @@ void drain_node_pages(int nodeid)
|
|||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU)
|
||||
static void __drain_pages(unsigned int cpu)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
@ -723,7 +722,6 @@ static void __drain_pages(unsigned int cpu)
|
|||
}
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
||||
|
@ -2907,7 +2905,6 @@ void __init free_area_init(unsigned long *zones_size)
|
|||
__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static int page_alloc_cpu_notify(struct notifier_block *self,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
|
@ -2922,7 +2919,6 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
|
|||
}
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
void __init page_alloc_init(void)
|
||||
{
|
||||
|
|
|
@ -514,5 +514,7 @@ void __init swap_setup(void)
|
|||
* Right now other parts of the system means that we
|
||||
* _really_ don't want to cluster much more
|
||||
*/
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
hotcpu_notifier(cpu_swap_callback, 0);
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -1513,7 +1513,6 @@ out:
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
/* It's optimal to keep kswapds on the same CPUs as their memory, but
|
||||
not required for correctness. So if the last cpu in a node goes
|
||||
away, we get changed to run anywhere: as the first one comes back,
|
||||
|
@ -1534,7 +1533,6 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
|
|||
}
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
/*
|
||||
* This kswapd start function will be called by init and node-hot-add.
|
||||
|
|
|
@ -3340,7 +3340,6 @@ void unregister_netdev(struct net_device *dev)
|
|||
|
||||
EXPORT_SYMBOL(unregister_netdev);
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static int dev_cpu_callback(struct notifier_block *nfb,
|
||||
unsigned long action,
|
||||
void *ocpu)
|
||||
|
@ -3384,7 +3383,6 @@ static int dev_cpu_callback(struct notifier_block *nfb,
|
|||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
#ifdef CONFIG_NET_DMA
|
||||
/**
|
||||
|
|
|
@ -340,7 +340,6 @@ static void __devinit flow_cache_cpu_prepare(int cpu)
|
|||
tasklet_init(tasklet, flow_cache_flush_tasklet, 0);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static int flow_cache_cpu(struct notifier_block *nfb,
|
||||
unsigned long action,
|
||||
void *hcpu)
|
||||
|
@ -349,7 +348,6 @@ static int flow_cache_cpu(struct notifier_block *nfb,
|
|||
__flow_cache_shrink((unsigned long)hcpu, 0);
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
static int __init flow_cache_init(void)
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue