sched: Fix compiler warnings
Commit143e1e28cb
(sched: Rework sched_domain topology definition) introduced a number of functions with a return value of 'const int'. gcc doesn't know what to do with that and, if the kernel is compiled with W=1, complains with the following warnings whenever sched.h is included. include/linux/sched.h:875:25: warning: type qualifiers ignored on function return type include/linux/sched.h:882:25: warning: type qualifiers ignored on function return type include/linux/sched.h:889:25: warning: type qualifiers ignored on function return type include/linux/sched.h:1002:21: warning: type qualifiers ignored on function return type Commitsfb2aa855
(sched, ARM: Create a dedicated scheduler topology table) and607b45e9a
(sched, powerpc: Create a dedicated topology table) introduce the same warning in the arm and powerpc code. Drop 'const' from the function declarations to fix the problem. The fix for all three patches has to be applied together to avoid compilation failures for the affected architectures. Acked-by: Vincent Guittot <vincent.guittot@linaro.org> Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Guenter Roeck <linux@roeck-us.net> Cc: Russell King <linux@arm.linux.org.uk> Cc: Paul Mackerras <paulus@samba.org> Cc: Dietmar Eggemann <dietmar.eggemann@arm.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1403658329-13196-1-git-send-email-linux@roeck-us.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
3896c329df
commit
b6220ad66b
|
@ -275,7 +275,7 @@ void store_cpu_topology(unsigned int cpuid)
|
||||||
cpu_topology[cpuid].socket_id, mpidr);
|
cpu_topology[cpuid].socket_id, mpidr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline const int cpu_corepower_flags(void)
|
static inline int cpu_corepower_flags(void)
|
||||||
{
|
{
|
||||||
return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN;
|
return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN;
|
||||||
}
|
}
|
||||||
|
|
|
@ -747,7 +747,7 @@ int setup_profiling_timer(unsigned int multiplier)
|
||||||
|
|
||||||
#ifdef CONFIG_SCHED_SMT
|
#ifdef CONFIG_SCHED_SMT
|
||||||
/* cpumask of CPUs with asymetric SMT dependancy */
|
/* cpumask of CPUs with asymetric SMT dependancy */
|
||||||
static const int powerpc_smt_flags(void)
|
static int powerpc_smt_flags(void)
|
||||||
{
|
{
|
||||||
int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
|
int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
|
||||||
|
|
||||||
|
|
|
@ -872,21 +872,21 @@ enum cpu_idle_type {
|
||||||
#define SD_NUMA 0x4000 /* cross-node balancing */
|
#define SD_NUMA 0x4000 /* cross-node balancing */
|
||||||
|
|
||||||
#ifdef CONFIG_SCHED_SMT
|
#ifdef CONFIG_SCHED_SMT
|
||||||
static inline const int cpu_smt_flags(void)
|
static inline int cpu_smt_flags(void)
|
||||||
{
|
{
|
||||||
return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
|
return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_SCHED_MC
|
#ifdef CONFIG_SCHED_MC
|
||||||
static inline const int cpu_core_flags(void)
|
static inline int cpu_core_flags(void)
|
||||||
{
|
{
|
||||||
return SD_SHARE_PKG_RESOURCES;
|
return SD_SHARE_PKG_RESOURCES;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
static inline const int cpu_numa_flags(void)
|
static inline int cpu_numa_flags(void)
|
||||||
{
|
{
|
||||||
return SD_NUMA;
|
return SD_NUMA;
|
||||||
}
|
}
|
||||||
|
@ -999,7 +999,7 @@ void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
|
||||||
bool cpus_share_cache(int this_cpu, int that_cpu);
|
bool cpus_share_cache(int this_cpu, int that_cpu);
|
||||||
|
|
||||||
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
|
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
|
||||||
typedef const int (*sched_domain_flags_f)(void);
|
typedef int (*sched_domain_flags_f)(void);
|
||||||
|
|
||||||
#define SDTL_OVERLAP 0x01
|
#define SDTL_OVERLAP 0x01
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue