cgroup/cpuset: Allow suppression of sched domain rebuild in update_cpumasks_hier()
A single partition setup and tear-down operation can lead to multiple rebuild_sched_domains_locked() calls which is a waste of effort. This can partly be mitigated by adding a flag to suppress the rebuild_sched_domains_locked() call in update_cpumasks_hier(). Since a Boolean flag has already been passed as the 3rd argument to update_cpumasks_hier(), we can extend that to a full flag word. The sched domain rebuild suppression is now enabled in update_sibling_cpumasks() as all it callers will do the sched domain rebuild after its return later on anyway. Signed-off-by: Waiman Long <longman@redhat.com> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
99fe36ba6f
commit
3ae0b77321
|
@ -1590,6 +1590,12 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* update_cpumasks_hier() flags
|
||||||
|
*/
|
||||||
|
#define HIER_CHECKALL 0x01 /* Check all cpusets with no skipping */
|
||||||
|
#define HIER_NO_SD_REBUILD 0x02 /* Don't rebuild sched domains */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
|
* update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
|
||||||
* @cs: the cpuset to consider
|
* @cs: the cpuset to consider
|
||||||
|
@ -1604,7 +1610,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
|
||||||
* Called with cpuset_mutex held
|
* Called with cpuset_mutex held
|
||||||
*/
|
*/
|
||||||
static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
|
static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
|
||||||
bool force)
|
int flags)
|
||||||
{
|
{
|
||||||
struct cpuset *cp;
|
struct cpuset *cp;
|
||||||
struct cgroup_subsys_state *pos_css;
|
struct cgroup_subsys_state *pos_css;
|
||||||
|
@ -1644,10 +1650,10 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
|
||||||
* Skip the whole subtree if
|
* Skip the whole subtree if
|
||||||
* 1) the cpumask remains the same,
|
* 1) the cpumask remains the same,
|
||||||
* 2) has no partition root state,
|
* 2) has no partition root state,
|
||||||
* 3) force flag not set, and
|
* 3) HIER_CHECKALL flag not set, and
|
||||||
* 4) for v2 load balance state same as its parent.
|
* 4) for v2 load balance state same as its parent.
|
||||||
*/
|
*/
|
||||||
if (!cp->partition_root_state && !force &&
|
if (!cp->partition_root_state && !(flags & HIER_CHECKALL) &&
|
||||||
cpumask_equal(tmp->new_cpus, cp->effective_cpus) &&
|
cpumask_equal(tmp->new_cpus, cp->effective_cpus) &&
|
||||||
(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
|
(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
|
||||||
(is_sched_load_balance(parent) == is_sched_load_balance(cp)))) {
|
(is_sched_load_balance(parent) == is_sched_load_balance(cp)))) {
|
||||||
|
@ -1764,7 +1770,7 @@ update_parent_subparts:
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
if (need_rebuild_sched_domains)
|
if (need_rebuild_sched_domains && !(flags & HIER_NO_SD_REBUILD))
|
||||||
rebuild_sched_domains_locked();
|
rebuild_sched_domains_locked();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1788,7 +1794,9 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
|
||||||
* to use the right effective_cpus value.
|
* to use the right effective_cpus value.
|
||||||
*
|
*
|
||||||
* The update_cpumasks_hier() function may sleep. So we have to
|
* The update_cpumasks_hier() function may sleep. So we have to
|
||||||
* release the RCU read lock before calling it.
|
* release the RCU read lock before calling it. HIER_NO_SD_REBUILD
|
||||||
|
* flag is used to suppress rebuild of sched domains as the callers
|
||||||
|
* will take care of that.
|
||||||
*/
|
*/
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
cpuset_for_each_child(sibling, pos_css, parent) {
|
cpuset_for_each_child(sibling, pos_css, parent) {
|
||||||
|
@ -1800,7 +1808,7 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
update_cpumasks_hier(sibling, tmp, false);
|
update_cpumasks_hier(sibling, tmp, HIER_NO_SD_REBUILD);
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
css_put(&sibling->css);
|
css_put(&sibling->css);
|
||||||
}
|
}
|
||||||
|
@ -1913,7 +1921,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
|
||||||
spin_unlock_irq(&callback_lock);
|
spin_unlock_irq(&callback_lock);
|
||||||
|
|
||||||
/* effective_cpus will be updated here */
|
/* effective_cpus will be updated here */
|
||||||
update_cpumasks_hier(cs, &tmp, false);
|
update_cpumasks_hier(cs, &tmp, 0);
|
||||||
|
|
||||||
if (cs->partition_root_state) {
|
if (cs->partition_root_state) {
|
||||||
struct cpuset *parent = parent_cs(cs);
|
struct cpuset *parent = parent_cs(cs);
|
||||||
|
@ -2382,7 +2390,7 @@ out:
|
||||||
* Force update if switching back to member.
|
* Force update if switching back to member.
|
||||||
*/
|
*/
|
||||||
if (!list_empty(&cs->css.children))
|
if (!list_empty(&cs->css.children))
|
||||||
update_cpumasks_hier(cs, &tmpmask, !new_prs);
|
update_cpumasks_hier(cs, &tmpmask, !new_prs ? HIER_CHECKALL : 0);
|
||||||
|
|
||||||
/* Update sched domains and load balance flag */
|
/* Update sched domains and load balance flag */
|
||||||
update_partition_sd_lb(cs, old_prs);
|
update_partition_sd_lb(cs, old_prs);
|
||||||
|
|
Loading…
Reference in New Issue