sched/numa: Remove the NUMA sched_feature
Variable sched_numa_balancing is available for both CONFIG_SCHED_DEBUG and !CONFIG_SCHED_DEBUG. All code paths now check for sched_numa_balancing. Hence remove sched_feat(NUMA). Suggested-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1439290813-6683-4-git-send-email-srikar@linux.vnet.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
c3b9bc5bbf
commit
2b49d84b25
|
@ -2120,12 +2120,6 @@ __read_mostly bool sched_numa_balancing;
|
|||
void set_numabalancing_state(bool enabled)
|
||||
{
|
||||
sched_numa_balancing = enabled;
|
||||
#ifdef CONFIG_SCHED_DEBUG
|
||||
if (enabled)
|
||||
sched_feat_set("NUMA");
|
||||
else
|
||||
sched_feat_set("NO_NUMA");
|
||||
#endif /* CONFIG_SCHED_DEBUG */
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROC_SYSCTL
|
||||
|
|
|
@ -72,21 +72,5 @@ SCHED_FEAT(RT_PUSH_IPI, true)
|
|||
SCHED_FEAT(FORCE_SD_OVERLAP, false)
|
||||
SCHED_FEAT(RT_RUNTIME_SHARE, true)
|
||||
SCHED_FEAT(LB_MIN, false)
|
||||
|
||||
SCHED_FEAT(ATTACH_AGE_LOAD, true)
|
||||
|
||||
/*
|
||||
* Apply the automatic NUMA scheduling policy. Enabled automatically
|
||||
* at runtime if running on a NUMA machine. Can be controlled via
|
||||
* numa_balancing=
|
||||
*/
|
||||
#ifdef CONFIG_NUMA_BALANCING
|
||||
|
||||
/*
|
||||
* NUMA will favor moving tasks towards nodes where a higher number of
|
||||
* hinting faults are recorded during active load balancing. It will
|
||||
* resist moving tasks towards nodes where a lower number of hinting
|
||||
* faults have been recorded.
|
||||
*/
|
||||
SCHED_FEAT(NUMA, true)
|
||||
#endif
|
||||
|
|
Loading…
Reference in New Issue