staging/lustre/ptlrpc: Do not use deprecated cpus_* functions
As per Rusty Russel, cpus_* functions are deprecated. When mixing cpumask_copy with cpus_weight, they operate on different sized masks if CPUMASK_OFFSTACK is enabled, causing an immediate assertion failure. Copying of cpumasks by assignment is also not allowed now. Additionally, in ptlrpc/service.c avoid the cpumask copies, since we only use it to check how many siblings are there for core #0 and nothing else. Reported-by: Tyson Whitehead <twhitehead@gmail.com> Signed-off-by: Oleg Drokin <green@linuxhacker.ru> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This commit is contained in:
parent
35168f512f
commit
dc0d838a05
|
@ -511,10 +511,10 @@ static int ptlrpcd_bind(int index, int max)
|
|||
#if defined(CONFIG_NUMA)
|
||||
{
|
||||
int i;
|
||||
mask = *cpumask_of_node(cpu_to_node(index));
|
||||
cpumask_copy(&mask, cpumask_of_node(cpu_to_node(index)));
|
||||
for (i = max; i < num_online_cpus(); i++)
|
||||
cpu_clear(i, mask);
|
||||
pc->pc_npartners = cpus_weight(mask) - 1;
|
||||
cpumask_clear_cpu(i, &mask);
|
||||
pc->pc_npartners = cpumask_weight(&mask) - 1;
|
||||
set_bit(LIOD_BIND, &pc->pc_flags);
|
||||
}
|
||||
#else
|
||||
|
@ -554,7 +554,7 @@ static int ptlrpcd_bind(int index, int max)
|
|||
* that are already initialized
|
||||
*/
|
||||
for (pidx = 0, i = 0; i < index; i++) {
|
||||
if (cpu_isset(i, mask)) {
|
||||
if (cpumask_test_cpu(i, &mask)) {
|
||||
ppc = &ptlrpcds->pd_threads[i];
|
||||
pc->pc_partners[pidx++] = ppc;
|
||||
ppc->pc_partners[ppc->
|
||||
|
|
|
@ -543,7 +543,6 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc,
|
|||
if (tc->tc_thr_factor != 0) {
|
||||
int factor = tc->tc_thr_factor;
|
||||
const int fade = 4;
|
||||
cpumask_t mask;
|
||||
|
||||
/*
|
||||
* User wants to increase number of threads with for
|
||||
|
@ -557,8 +556,8 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc,
|
|||
* have too many threads no matter how many cores/HTs
|
||||
* there are.
|
||||
*/
|
||||
cpumask_copy(&mask, topology_thread_cpumask(0));
|
||||
if (cpus_weight(mask) > 1) { /* weight is # of HTs */
|
||||
/* weight is # of HTs */
|
||||
if (cpumask_weight(topology_thread_cpumask(0)) > 1) {
|
||||
/* depress thread factor for hyper-thread */
|
||||
factor = factor - (factor >> 1) + (factor >> 3);
|
||||
}
|
||||
|
@ -2752,7 +2751,6 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
|
|||
|
||||
int ptlrpc_hr_init(void)
|
||||
{
|
||||
cpumask_t mask;
|
||||
struct ptlrpc_hr_partition *hrp;
|
||||
struct ptlrpc_hr_thread *hrt;
|
||||
int rc;
|
||||
|
@ -2770,8 +2768,7 @@ int ptlrpc_hr_init(void)
|
|||
|
||||
init_waitqueue_head(&ptlrpc_hr.hr_waitq);
|
||||
|
||||
cpumask_copy(&mask, topology_thread_cpumask(0));
|
||||
weight = cpus_weight(mask);
|
||||
weight = cpumask_weight(topology_thread_cpumask(0));
|
||||
|
||||
cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
|
||||
hrp->hrp_cpt = i;
|
||||
|
|
Loading…
Reference in New Issue