staging: lustre: lnet: switch to cpumask_var_t

So that we can use the common cpumask allocation functions,
switch to cpumask_var_t.
We need to be careful not to free a cpumask_var_t until the
variable has been initialized, and it cannot be initialized
directly.
So we must be sure either that it is filled with zeros, or
that zalloc_cpumask_var() has been called on it.

Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
NeilBrown 2018-01-09 12:19:38 +11:00 committed by Greg Kroah-Hartman
parent 3c88bdbbf9
commit 3872fb73ca
5 changed files with 49 additions and 59 deletions

View File

@ -79,7 +79,7 @@
/**
* return cpumask of CPU partition \a cpt
*/
cpumask_t *cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt);
cpumask_var_t *cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt);
/**
* print string information of cpt-table
*/
@ -96,7 +96,7 @@ struct cfs_cpt_table {
u64 ctb_version;
};
static inline cpumask_t *
static inline cpumask_var_t *
cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt)
{
return NULL;

View File

@ -49,7 +49,7 @@
/** virtual processing unit */
struct cfs_cpu_partition {
/* CPUs mask for this partition */
cpumask_t *cpt_cpumask;
cpumask_var_t cpt_cpumask;
/* nodes mask for this partition */
nodemask_t *cpt_nodemask;
/* spread rotor for NUMA allocator */
@ -69,7 +69,7 @@ struct cfs_cpt_table {
/* shadow HW CPU to CPU partition ID */
int *ctb_cpu2cpt;
/* all cpus in this partition table */
cpumask_t *ctb_cpumask;
cpumask_var_t ctb_cpumask;
/* all nodes in this partition table */
nodemask_t *ctb_nodemask;
};

View File

@ -596,7 +596,7 @@ static void kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid)
static int kiblnd_get_completion_vector(struct kib_conn *conn, int cpt)
{
cpumask_t *mask;
cpumask_var_t *mask;
int vectors;
int off;
int i;
@ -611,8 +611,8 @@ static int kiblnd_get_completion_vector(struct kib_conn *conn, int cpt)
return 0;
/* hash NID to CPU id in this partition... */
off = do_div(nid, cpumask_weight(mask));
for_each_cpu(i, mask) {
off = do_div(nid, cpumask_weight(*mask));
for_each_cpu(i, *mask) {
if (!off--)
return i % vectors;
}

View File

@ -113,7 +113,7 @@ cfs_cpt_nodemask(struct cfs_cpt_table *cptab, int cpt)
{
return &cptab->ctb_nodemask;
}
EXPORT_SYMBOL(cfs_cpt_cpumask);
EXPORT_SYMBOL(cfs_cpt_nodemask);
int
cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)

View File

@ -72,7 +72,7 @@ struct cfs_cpt_data {
/* mutex to protect cpt_cpumask */
struct mutex cpt_mutex;
/* scratch buffer for set/unset_node */
cpumask_t *cpt_cpumask;
cpumask_var_t cpt_cpumask;
};
static struct cfs_cpt_data cpt_data;
@ -103,8 +103,7 @@ cfs_cpt_table_free(struct cfs_cpt_table *cptab)
struct cfs_cpu_partition *part = &cptab->ctb_parts[i];
kfree(part->cpt_nodemask);
if (part->cpt_cpumask)
LIBCFS_FREE(part->cpt_cpumask, cpumask_size());
free_cpumask_var(part->cpt_cpumask);
}
if (cptab->ctb_parts) {
@ -113,8 +112,7 @@ cfs_cpt_table_free(struct cfs_cpt_table *cptab)
}
kfree(cptab->ctb_nodemask);
if (cptab->ctb_cpumask)
LIBCFS_FREE(cptab->ctb_cpumask, cpumask_size());
free_cpumask_var(cptab->ctb_cpumask);
kfree(cptab);
}
@ -132,11 +130,10 @@ cfs_cpt_table_alloc(unsigned int ncpt)
cptab->ctb_nparts = ncpt;
LIBCFS_ALLOC(cptab->ctb_cpumask, cpumask_size());
cptab->ctb_nodemask = kzalloc(sizeof(*cptab->ctb_nodemask),
GFP_NOFS);
if (!cptab->ctb_cpumask || !cptab->ctb_nodemask)
if (!zalloc_cpumask_var(&cptab->ctb_cpumask, GFP_NOFS) ||
!cptab->ctb_nodemask)
goto failed;
LIBCFS_ALLOC(cptab->ctb_cpu2cpt,
@ -154,10 +151,10 @@ cfs_cpt_table_alloc(unsigned int ncpt)
for (i = 0; i < ncpt; i++) {
struct cfs_cpu_partition *part = &cptab->ctb_parts[i];
LIBCFS_ALLOC(part->cpt_cpumask, cpumask_size());
part->cpt_nodemask = kzalloc(sizeof(*part->cpt_nodemask),
GFP_NOFS);
if (!part->cpt_cpumask || !part->cpt_nodemask)
if (!zalloc_cpumask_var(&part->cpt_cpumask, GFP_NOFS) ||
!part->cpt_nodemask)
goto failed;
}
@ -248,13 +245,13 @@ cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt)
}
EXPORT_SYMBOL(cfs_cpt_online);
cpumask_t *
cpumask_var_t *
cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt)
{
LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
return cpt == CFS_CPT_ANY ?
cptab->ctb_cpumask : cptab->ctb_parts[cpt].cpt_cpumask;
&cptab->ctb_cpumask : &cptab->ctb_parts[cpt].cpt_cpumask;
}
EXPORT_SYMBOL(cfs_cpt_cpumask);
@ -402,7 +399,6 @@ EXPORT_SYMBOL(cfs_cpt_unset_cpumask);
int
cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node)
{
cpumask_t *mask;
int rc;
if (node < 0 || node >= MAX_NUMNODES) {
@ -413,10 +409,9 @@ cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node)
mutex_lock(&cpt_data.cpt_mutex);
mask = cpt_data.cpt_cpumask;
cfs_node_to_cpumask(node, mask);
cfs_node_to_cpumask(node, cpt_data.cpt_cpumask);
rc = cfs_cpt_set_cpumask(cptab, cpt, mask);
rc = cfs_cpt_set_cpumask(cptab, cpt, cpt_data.cpt_cpumask);
mutex_unlock(&cpt_data.cpt_mutex);
@ -427,8 +422,6 @@ EXPORT_SYMBOL(cfs_cpt_set_node);
void
cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node)
{
cpumask_t *mask;
if (node < 0 || node >= MAX_NUMNODES) {
CDEBUG(D_INFO,
"Invalid NUMA id %d for CPU partition %d\n", node, cpt);
@ -437,10 +430,9 @@ cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node)
mutex_lock(&cpt_data.cpt_mutex);
mask = cpt_data.cpt_cpumask;
cfs_node_to_cpumask(node, mask);
cfs_node_to_cpumask(node, cpt_data.cpt_cpumask);
cfs_cpt_unset_cpumask(cptab, cpt, mask);
cfs_cpt_unset_cpumask(cptab, cpt, cpt_data.cpt_cpumask);
mutex_unlock(&cpt_data.cpt_mutex);
}
@ -556,7 +548,7 @@ EXPORT_SYMBOL(cfs_cpt_of_cpu);
int
cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt)
{
cpumask_t *cpumask;
cpumask_var_t *cpumask;
nodemask_t *nodemask;
int rc;
int i;
@ -564,24 +556,24 @@ cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt)
LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
if (cpt == CFS_CPT_ANY) {
cpumask = cptab->ctb_cpumask;
cpumask = &cptab->ctb_cpumask;
nodemask = cptab->ctb_nodemask;
} else {
cpumask = cptab->ctb_parts[cpt].cpt_cpumask;
cpumask = &cptab->ctb_parts[cpt].cpt_cpumask;
nodemask = cptab->ctb_parts[cpt].cpt_nodemask;
}
if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) {
if (cpumask_any_and(*cpumask, cpu_online_mask) >= nr_cpu_ids) {
CERROR("No online CPU found in CPU partition %d, did someone do CPU hotplug on system? You might need to reload Lustre modules to keep system working well.\n",
cpt);
return -EINVAL;
}
for_each_online_cpu(i) {
if (cpumask_test_cpu(i, cpumask))
if (cpumask_test_cpu(i, *cpumask))
continue;
rc = set_cpus_allowed_ptr(current, cpumask);
rc = set_cpus_allowed_ptr(current, *cpumask);
set_mems_allowed(*nodemask);
if (!rc)
schedule(); /* switch to allowed CPU */
@ -602,8 +594,8 @@ static int
cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
cpumask_t *node, int number)
{
cpumask_t *socket = NULL;
cpumask_t *core = NULL;
cpumask_var_t socket;
cpumask_var_t core;
int rc = 0;
int cpu;
@ -621,13 +613,17 @@ cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
return 0;
}
/* allocate scratch buffer */
LIBCFS_ALLOC(socket, cpumask_size());
LIBCFS_ALLOC(core, cpumask_size());
if (!socket || !core) {
/*
* Allocate scratch buffers
* As we cannot initialize a cpumask_var_t, we need
* to alloc both before we can risk trying to free either
*/
if (!zalloc_cpumask_var(&socket, GFP_NOFS))
rc = -ENOMEM;
if (!zalloc_cpumask_var(&core, GFP_NOFS))
rc = -ENOMEM;
if (rc)
goto out;
}
while (!cpumask_empty(node)) {
cpu = cpumask_first(node);
@ -665,10 +661,8 @@ cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
}
out:
if (socket)
LIBCFS_FREE(socket, cpumask_size());
if (core)
LIBCFS_FREE(core, cpumask_size());
free_cpumask_var(socket);
free_cpumask_var(core);
return rc;
}
@ -721,7 +715,7 @@ static struct cfs_cpt_table *
cfs_cpt_table_create(int ncpt)
{
struct cfs_cpt_table *cptab = NULL;
cpumask_t *mask = NULL;
cpumask_var_t mask;
int cpt = 0;
int num;
int rc;
@ -754,8 +748,7 @@ cfs_cpt_table_create(int ncpt)
goto failed;
}
LIBCFS_ALLOC(mask, cpumask_size());
if (!mask) {
if (!zalloc_cpumask_var(&mask, GFP_NOFS)){
CERROR("Failed to allocate scratch cpumask\n");
goto failed;
}
@ -782,7 +775,7 @@ cfs_cpt_table_create(int ncpt)
rc = cfs_cpt_choose_ncpus(cptab, cpt, mask, n);
if (rc < 0)
goto failed;
goto failed_mask;
LASSERT(num >= cpumask_weight(part->cpt_cpumask));
if (num == cpumask_weight(part->cpt_cpumask))
@ -795,20 +788,19 @@ cfs_cpt_table_create(int ncpt)
CERROR("Expect %d(%d) CPU partitions but got %d(%d), CPU hotplug/unplug while setting?\n",
cptab->ctb_nparts, num, cpt,
cpumask_weight(cptab->ctb_parts[ncpt - 1].cpt_cpumask));
goto failed;
goto failed_mask;
}
LIBCFS_FREE(mask, cpumask_size());
free_cpumask_var(mask);
return cptab;
failed_mask:
free_cpumask_var(mask);
failed:
CERROR("Failed to setup CPU-partition-table with %d CPU-partitions, online HW nodes: %d, HW cpus: %d.\n",
ncpt, num_online_nodes(), num_online_cpus());
if (mask)
LIBCFS_FREE(mask, cpumask_size());
if (cptab)
cfs_cpt_table_free(cptab);
@ -1011,8 +1003,7 @@ cfs_cpu_fini(void)
cpuhp_remove_state_nocalls(lustre_cpu_online);
cpuhp_remove_state_nocalls(CPUHP_LUSTRE_CFS_DEAD);
#endif
if (cpt_data.cpt_cpumask)
LIBCFS_FREE(cpt_data.cpt_cpumask, cpumask_size());
free_cpumask_var(cpt_data.cpt_cpumask);
}
int
@ -1024,8 +1015,7 @@ cfs_cpu_init(void)
memset(&cpt_data, 0, sizeof(cpt_data));
LIBCFS_ALLOC(cpt_data.cpt_cpumask, cpumask_size());
if (!cpt_data.cpt_cpumask) {
if (!zalloc_cpumask_var(&cpt_data.cpt_cpumask, GFP_NOFS)) {
CERROR("Failed to allocate scratch buffer\n");
return -1;
}