x86: Replace cpumask_weight() with cpumask_empty() where appropriate
In some cases, x86 code calls cpumask_weight() to check if any bit of a given cpumask is set. This can be done more efficiently with cpumask_empty() because cpumask_empty() stops traversing the cpumask as soon as it finds first set bit, while cpumask_weight() counts all bits unconditionally. Signed-off-by: Yury Norov <yury.norov@gmail.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Steve Wahl <steve.wahl@hpe.com> Link: https://lore.kernel.org/r/20220210224933.379149-17-yury.norov@gmail.com
This commit is contained in:
parent
5a0893088a
commit
3a5ff1f6dd
|
@ -341,14 +341,14 @@ static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
|
|||
|
||||
/* Check whether cpus belong to parent ctrl group */
|
||||
cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask);
|
||||
if (cpumask_weight(tmpmask)) {
|
||||
if (!cpumask_empty(tmpmask)) {
|
||||
rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Check whether cpus are dropped from this group */
|
||||
cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
|
||||
if (cpumask_weight(tmpmask)) {
|
||||
if (!cpumask_empty(tmpmask)) {
|
||||
/* Give any dropped cpus to parent rdtgroup */
|
||||
cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask);
|
||||
update_closid_rmid(tmpmask, prgrp);
|
||||
|
@ -359,7 +359,7 @@ static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
|
|||
* and update per-cpu rmid
|
||||
*/
|
||||
cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
|
||||
if (cpumask_weight(tmpmask)) {
|
||||
if (!cpumask_empty(tmpmask)) {
|
||||
head = &prgrp->mon.crdtgrp_list;
|
||||
list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
|
||||
if (crgrp == rdtgrp)
|
||||
|
@ -394,7 +394,7 @@ static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
|
|||
|
||||
/* Check whether cpus are dropped from this group */
|
||||
cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
|
||||
if (cpumask_weight(tmpmask)) {
|
||||
if (!cpumask_empty(tmpmask)) {
|
||||
/* Can't drop from default group */
|
||||
if (rdtgrp == &rdtgroup_default) {
|
||||
rdt_last_cmd_puts("Can't drop CPUs from default group\n");
|
||||
|
@ -413,12 +413,12 @@ static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
|
|||
* and update per-cpu closid/rmid.
|
||||
*/
|
||||
cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
|
||||
if (cpumask_weight(tmpmask)) {
|
||||
if (!cpumask_empty(tmpmask)) {
|
||||
list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) {
|
||||
if (r == rdtgrp)
|
||||
continue;
|
||||
cpumask_and(tmpmask1, &r->cpu_mask, tmpmask);
|
||||
if (cpumask_weight(tmpmask1))
|
||||
if (!cpumask_empty(tmpmask1))
|
||||
cpumask_rdtgrp_clear(r, tmpmask1);
|
||||
}
|
||||
update_closid_rmid(tmpmask, rdtgrp);
|
||||
|
@ -488,7 +488,7 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
|
|||
|
||||
/* check that user didn't specify any offline cpus */
|
||||
cpumask_andnot(tmpmask, newmask, cpu_online_mask);
|
||||
if (cpumask_weight(tmpmask)) {
|
||||
if (!cpumask_empty(tmpmask)) {
|
||||
ret = -EINVAL;
|
||||
rdt_last_cmd_puts("Can only assign online CPUs\n");
|
||||
goto unlock;
|
||||
|
|
|
@ -400,7 +400,7 @@ static void leave_uniprocessor(void)
|
|||
int cpu;
|
||||
int err;
|
||||
|
||||
if (!cpumask_available(downed_cpus) || cpumask_weight(downed_cpus) == 0)
|
||||
if (!cpumask_available(downed_cpus) || cpumask_empty(downed_cpus))
|
||||
return;
|
||||
pr_notice("Re-enabling CPUs...\n");
|
||||
for_each_cpu(cpu, downed_cpus) {
|
||||
|
|
|
@ -985,7 +985,7 @@ static int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
|
|||
|
||||
/* Clear global flags */
|
||||
if (master) {
|
||||
if (cpumask_weight(uv_nmi_cpu_mask))
|
||||
if (!cpumask_empty(uv_nmi_cpu_mask))
|
||||
uv_nmi_cleanup_mask();
|
||||
atomic_set(&uv_nmi_cpus_in_nmi, -1);
|
||||
atomic_set(&uv_nmi_cpu, -1);
|
||||
|
|
Loading…
Reference in New Issue