cpuset: modify cpuset_set_cpus_allowed to use cpumask pointer
* Modify cpuset_cpus_allowed to return the currently allowed cpuset via a pointer argument instead of as the function return value. * Use new set_cpus_allowed_ptr function. * Cleanup CPU_MASK_ALL and NODE_MASK_ALL uses. Depends on: [sched-devel]: sched: add new set_cpus_allowed_ptr function Signed-off-by: Mike Travis <travis@sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
f70316dace
commit
f9a86fcbbb
|
@ -20,8 +20,8 @@ extern int number_of_cpusets; /* How many cpusets are defined in system? */
|
||||||
extern int cpuset_init_early(void);
|
extern int cpuset_init_early(void);
|
||||||
extern int cpuset_init(void);
|
extern int cpuset_init(void);
|
||||||
extern void cpuset_init_smp(void);
|
extern void cpuset_init_smp(void);
|
||||||
extern cpumask_t cpuset_cpus_allowed(struct task_struct *p);
|
extern void cpuset_cpus_allowed(struct task_struct *p, cpumask_t *mask);
|
||||||
extern cpumask_t cpuset_cpus_allowed_locked(struct task_struct *p);
|
extern void cpuset_cpus_allowed_locked(struct task_struct *p, cpumask_t *mask);
|
||||||
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
|
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
|
||||||
#define cpuset_current_mems_allowed (current->mems_allowed)
|
#define cpuset_current_mems_allowed (current->mems_allowed)
|
||||||
void cpuset_init_current_mems_allowed(void);
|
void cpuset_init_current_mems_allowed(void);
|
||||||
|
@ -84,13 +84,14 @@ static inline int cpuset_init_early(void) { return 0; }
|
||||||
static inline int cpuset_init(void) { return 0; }
|
static inline int cpuset_init(void) { return 0; }
|
||||||
static inline void cpuset_init_smp(void) {}
|
static inline void cpuset_init_smp(void) {}
|
||||||
|
|
||||||
static inline cpumask_t cpuset_cpus_allowed(struct task_struct *p)
|
static inline void cpuset_cpus_allowed(struct task_struct *p, cpumask_t *mask)
|
||||||
{
|
{
|
||||||
return cpu_possible_map;
|
*mask = cpu_possible_map;
|
||||||
}
|
}
|
||||||
static inline cpumask_t cpuset_cpus_allowed_locked(struct task_struct *p)
|
static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
|
||||||
|
cpumask_t *mask)
|
||||||
{
|
{
|
||||||
return cpu_possible_map;
|
*mask = cpu_possible_map;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
|
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
|
||||||
|
|
|
@ -729,7 +729,7 @@ int cpuset_test_cpumask(struct task_struct *tsk, struct cgroup_scanner *scan)
|
||||||
*/
|
*/
|
||||||
void cpuset_change_cpumask(struct task_struct *tsk, struct cgroup_scanner *scan)
|
void cpuset_change_cpumask(struct task_struct *tsk, struct cgroup_scanner *scan)
|
||||||
{
|
{
|
||||||
set_cpus_allowed(tsk, (cgroup_cs(scan->cg))->cpus_allowed);
|
set_cpus_allowed_ptr(tsk, &((cgroup_cs(scan->cg))->cpus_allowed));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1178,7 +1178,7 @@ static void cpuset_attach(struct cgroup_subsys *ss,
|
||||||
|
|
||||||
mutex_lock(&callback_mutex);
|
mutex_lock(&callback_mutex);
|
||||||
guarantee_online_cpus(cs, &cpus);
|
guarantee_online_cpus(cs, &cpus);
|
||||||
set_cpus_allowed(tsk, cpus);
|
set_cpus_allowed_ptr(tsk, &cpus);
|
||||||
mutex_unlock(&callback_mutex);
|
mutex_unlock(&callback_mutex);
|
||||||
|
|
||||||
from = oldcs->mems_allowed;
|
from = oldcs->mems_allowed;
|
||||||
|
@ -1555,8 +1555,8 @@ static struct cgroup_subsys_state *cpuset_create(
|
||||||
if (is_spread_slab(parent))
|
if (is_spread_slab(parent))
|
||||||
set_bit(CS_SPREAD_SLAB, &cs->flags);
|
set_bit(CS_SPREAD_SLAB, &cs->flags);
|
||||||
set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
|
set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
|
||||||
cs->cpus_allowed = CPU_MASK_NONE;
|
cpus_clear(cs->cpus_allowed);
|
||||||
cs->mems_allowed = NODE_MASK_NONE;
|
nodes_clear(cs->mems_allowed);
|
||||||
cs->mems_generation = cpuset_mems_generation++;
|
cs->mems_generation = cpuset_mems_generation++;
|
||||||
fmeter_init(&cs->fmeter);
|
fmeter_init(&cs->fmeter);
|
||||||
|
|
||||||
|
@ -1625,8 +1625,8 @@ int __init cpuset_init(void)
|
||||||
{
|
{
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
top_cpuset.cpus_allowed = CPU_MASK_ALL;
|
cpus_setall(top_cpuset.cpus_allowed);
|
||||||
top_cpuset.mems_allowed = NODE_MASK_ALL;
|
nodes_setall(top_cpuset.mems_allowed);
|
||||||
|
|
||||||
fmeter_init(&top_cpuset.fmeter);
|
fmeter_init(&top_cpuset.fmeter);
|
||||||
top_cpuset.mems_generation = cpuset_mems_generation++;
|
top_cpuset.mems_generation = cpuset_mems_generation++;
|
||||||
|
@ -1844,6 +1844,7 @@ void __init cpuset_init_smp(void)
|
||||||
|
|
||||||
* cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
|
* cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
|
||||||
* @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
|
* @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
|
||||||
|
* @pmask: pointer to cpumask_t variable to receive cpus_allowed set.
|
||||||
*
|
*
|
||||||
* Description: Returns the cpumask_t cpus_allowed of the cpuset
|
* Description: Returns the cpumask_t cpus_allowed of the cpuset
|
||||||
* attached to the specified @tsk. Guaranteed to return some non-empty
|
* attached to the specified @tsk. Guaranteed to return some non-empty
|
||||||
|
@ -1851,35 +1852,27 @@ void __init cpuset_init_smp(void)
|
||||||
* tasks cpuset.
|
* tasks cpuset.
|
||||||
**/
|
**/
|
||||||
|
|
||||||
cpumask_t cpuset_cpus_allowed(struct task_struct *tsk)
|
void cpuset_cpus_allowed(struct task_struct *tsk, cpumask_t *pmask)
|
||||||
{
|
{
|
||||||
cpumask_t mask;
|
|
||||||
|
|
||||||
mutex_lock(&callback_mutex);
|
mutex_lock(&callback_mutex);
|
||||||
mask = cpuset_cpus_allowed_locked(tsk);
|
cpuset_cpus_allowed_locked(tsk, pmask);
|
||||||
mutex_unlock(&callback_mutex);
|
mutex_unlock(&callback_mutex);
|
||||||
|
|
||||||
return mask;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset.
|
* cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset.
|
||||||
* Must be called with callback_mutex held.
|
* Must be called with callback_mutex held.
|
||||||
**/
|
**/
|
||||||
cpumask_t cpuset_cpus_allowed_locked(struct task_struct *tsk)
|
void cpuset_cpus_allowed_locked(struct task_struct *tsk, cpumask_t *pmask)
|
||||||
{
|
{
|
||||||
cpumask_t mask;
|
|
||||||
|
|
||||||
task_lock(tsk);
|
task_lock(tsk);
|
||||||
guarantee_online_cpus(task_cs(tsk), &mask);
|
guarantee_online_cpus(task_cs(tsk), pmask);
|
||||||
task_unlock(tsk);
|
task_unlock(tsk);
|
||||||
|
|
||||||
return mask;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void cpuset_init_current_mems_allowed(void)
|
void cpuset_init_current_mems_allowed(void)
|
||||||
{
|
{
|
||||||
current->mems_allowed = NODE_MASK_ALL;
|
nodes_setall(current->mems_allowed);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -4941,13 +4941,13 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask)
|
||||||
if (retval)
|
if (retval)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
cpus_allowed = cpuset_cpus_allowed(p);
|
cpuset_cpus_allowed(p, &cpus_allowed);
|
||||||
cpus_and(new_mask, new_mask, cpus_allowed);
|
cpus_and(new_mask, new_mask, cpus_allowed);
|
||||||
again:
|
again:
|
||||||
retval = set_cpus_allowed(p, new_mask);
|
retval = set_cpus_allowed(p, new_mask);
|
||||||
|
|
||||||
if (!retval) {
|
if (!retval) {
|
||||||
cpus_allowed = cpuset_cpus_allowed(p);
|
cpuset_cpus_allowed(p, &cpus_allowed);
|
||||||
if (!cpus_subset(new_mask, cpus_allowed)) {
|
if (!cpus_subset(new_mask, cpus_allowed)) {
|
||||||
/*
|
/*
|
||||||
* We must have raced with a concurrent cpuset
|
* We must have raced with a concurrent cpuset
|
||||||
|
@ -5661,7 +5661,9 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
|
||||||
|
|
||||||
/* No more Mr. Nice Guy. */
|
/* No more Mr. Nice Guy. */
|
||||||
if (dest_cpu >= nr_cpu_ids) {
|
if (dest_cpu >= nr_cpu_ids) {
|
||||||
cpumask_t cpus_allowed = cpuset_cpus_allowed_locked(p);
|
cpumask_t cpus_allowed;
|
||||||
|
|
||||||
|
cpuset_cpus_allowed_locked(p, &cpus_allowed);
|
||||||
/*
|
/*
|
||||||
* Try to stay on the same cpuset, where the
|
* Try to stay on the same cpuset, where the
|
||||||
* current cpuset may be a subset of all cpus.
|
* current cpuset may be a subset of all cpus.
|
||||||
|
|
|
@ -187,8 +187,8 @@ static int pdflush(void *dummy)
|
||||||
* This is needed as pdflush's are dynamically created and destroyed.
|
* This is needed as pdflush's are dynamically created and destroyed.
|
||||||
* The boottime pdflush's are easily placed w/o these 2 lines.
|
* The boottime pdflush's are easily placed w/o these 2 lines.
|
||||||
*/
|
*/
|
||||||
cpus_allowed = cpuset_cpus_allowed(current);
|
cpuset_cpus_allowed(current, &cpus_allowed);
|
||||||
set_cpus_allowed(current, cpus_allowed);
|
set_cpus_allowed_ptr(current, &cpus_allowed);
|
||||||
|
|
||||||
return __pdflush(&my_work);
|
return __pdflush(&my_work);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue