all: replace find_next{,_zero}_bit with find_first{,_zero}_bit where appropriate

find_first{,_zero}_bit is a more effective analogue of 'next' version if
start == 0. This patch replaces 'next' with 'first' where things look
trivial.

Signed-off-by: Yury Norov <yury.norov@gmail.com>
Tested-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
This commit is contained in:
Yury Norov 2021-08-14 14:17:03 -07:00
parent 93ba139ba8
commit b5c7e7ec7d
21 changed files with 47 additions and 48 deletions

View File

@ -375,7 +375,7 @@ int pasemi_dma_alloc_flag(void)
int bit; int bit;
retry: retry:
bit = find_next_bit(flags_free, MAX_FLAGS, 0); bit = find_first_bit(flags_free, MAX_FLAGS);
if (bit >= MAX_FLAGS) if (bit >= MAX_FLAGS)
return -ENOSPC; return -ENOSPC;
if (!test_and_clear_bit(bit, flags_free)) if (!test_and_clear_bit(bit, flags_free))
@ -440,7 +440,7 @@ int pasemi_dma_alloc_fun(void)
int bit; int bit;
retry: retry:
bit = find_next_bit(fun_free, MAX_FLAGS, 0); bit = find_first_bit(fun_free, MAX_FLAGS);
if (bit >= MAX_FLAGS) if (bit >= MAX_FLAGS)
return -ENOSPC; return -ENOSPC;
if (!test_and_clear_bit(bit, fun_free)) if (!test_and_clear_bit(bit, fun_free))

View File

@ -2021,7 +2021,7 @@ static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
while ((slotidx > 0) && (ofs >= ms->npages)) { while ((slotidx > 0) && (ofs >= ms->npages)) {
slotidx--; slotidx--;
ms = slots->memslots + slotidx; ms = slots->memslots + slotidx;
ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0); ofs = find_first_bit(kvm_second_dirty_bitmap(ms), ms->npages);
} }
return ms->base_gfn + ofs; return ms->base_gfn + ofs;
} }

View File

@ -196,7 +196,7 @@ rnbd_get_cpu_qlist(struct rnbd_clt_session *sess, int cpu)
return per_cpu_ptr(sess->cpu_queues, bit); return per_cpu_ptr(sess->cpu_queues, bit);
} else if (cpu != 0) { } else if (cpu != 0) {
/* Search from 0 to cpu */ /* Search from 0 to cpu */
bit = find_next_bit(sess->cpu_queues_bm, cpu, 0); bit = find_first_bit(sess->cpu_queues_bm, cpu);
if (bit < cpu) if (bit < cpu)
return per_cpu_ptr(sess->cpu_queues, bit); return per_cpu_ptr(sess->cpu_queues, bit);
} }

View File

@ -1681,7 +1681,7 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
dev_dbg(ecc->dev, "EMR%d 0x%08x\n", j, val); dev_dbg(ecc->dev, "EMR%d 0x%08x\n", j, val);
emr = val; emr = val;
for (i = find_next_bit(&emr, 32, 0); i < 32; for (i = find_first_bit(&emr, 32); i < 32;
i = find_next_bit(&emr, 32, i + 1)) { i = find_next_bit(&emr, 32, i + 1)) {
int k = (j << 5) + i; int k = (j << 5) + i;

View File

@ -347,7 +347,7 @@ static int ad7124_find_free_config_slot(struct ad7124_state *st)
{ {
unsigned int free_cfg_slot; unsigned int free_cfg_slot;
free_cfg_slot = find_next_zero_bit(&st->cfg_slots_status, AD7124_MAX_CONFIGS, 0); free_cfg_slot = find_first_zero_bit(&st->cfg_slots_status, AD7124_MAX_CONFIGS);
if (free_cfg_slot == AD7124_MAX_CONFIGS) if (free_cfg_slot == AD7124_MAX_CONFIGS)
return -1; return -1;

View File

@ -1709,14 +1709,14 @@ clean_msixtbl:
*/ */
static void irdma_get_used_rsrc(struct irdma_device *iwdev) static void irdma_get_used_rsrc(struct irdma_device *iwdev)
{ {
iwdev->rf->used_pds = find_next_zero_bit(iwdev->rf->allocated_pds, iwdev->rf->used_pds = find_first_zero_bit(iwdev->rf->allocated_pds,
iwdev->rf->max_pd, 0); iwdev->rf->max_pd);
iwdev->rf->used_qps = find_next_zero_bit(iwdev->rf->allocated_qps, iwdev->rf->used_qps = find_first_zero_bit(iwdev->rf->allocated_qps,
iwdev->rf->max_qp, 0); iwdev->rf->max_qp);
iwdev->rf->used_cqs = find_next_zero_bit(iwdev->rf->allocated_cqs, iwdev->rf->used_cqs = find_first_zero_bit(iwdev->rf->allocated_cqs,
iwdev->rf->max_cq, 0); iwdev->rf->max_cq);
iwdev->rf->used_mrs = find_next_zero_bit(iwdev->rf->allocated_mrs, iwdev->rf->used_mrs = find_first_zero_bit(iwdev->rf->allocated_mrs,
iwdev->rf->max_mr, 0); iwdev->rf->max_mr);
} }
void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf) void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf)

View File

@ -106,7 +106,7 @@ static int __must_check cec_devnode_register(struct cec_devnode *devnode,
/* Part 1: Find a free minor number */ /* Part 1: Find a free minor number */
mutex_lock(&cec_devnode_lock); mutex_lock(&cec_devnode_lock);
minor = find_next_zero_bit(cec_devnode_nums, CEC_NUM_DEVICES, 0); minor = find_first_zero_bit(cec_devnode_nums, CEC_NUM_DEVICES);
if (minor == CEC_NUM_DEVICES) { if (minor == CEC_NUM_DEVICES) {
mutex_unlock(&cec_devnode_lock); mutex_unlock(&cec_devnode_lock);
pr_err("could not get a free minor\n"); pr_err("could not get a free minor\n");

View File

@ -217,7 +217,7 @@ int __must_check media_devnode_register(struct media_device *mdev,
/* Part 1: Find a free minor number */ /* Part 1: Find a free minor number */
mutex_lock(&media_devnode_lock); mutex_lock(&media_devnode_lock);
minor = find_next_zero_bit(media_devnode_nums, MEDIA_NUM_DEVICES, 0); minor = find_first_zero_bit(media_devnode_nums, MEDIA_NUM_DEVICES);
if (minor == MEDIA_NUM_DEVICES) { if (minor == MEDIA_NUM_DEVICES) {
mutex_unlock(&media_devnode_lock); mutex_unlock(&media_devnode_lock);
pr_err("could not get a free minor\n"); pr_err("could not get a free minor\n");

View File

@ -213,7 +213,7 @@ static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index)
if (!val) if (!val)
return 0; return 0;
pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, 0); pos = find_first_bit(&val, MAX_MSI_IRQS_PER_CTRL);
while (pos != MAX_MSI_IRQS_PER_CTRL) { while (pos != MAX_MSI_IRQS_PER_CTRL) {
generic_handle_domain_irq(pp->irq_domain, generic_handle_domain_irq(pp->irq_domain,
(index * MAX_MSI_IRQS_PER_CTRL) + pos); (index * MAX_MSI_IRQS_PER_CTRL) + pos);

View File

@ -17990,8 +17990,8 @@ lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
* the driver starts at 0 each time. * the driver starts at 0 each time.
*/ */
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->hbalock);
xri = find_next_zero_bit(phba->sli4_hba.xri_bmask, xri = find_first_zero_bit(phba->sli4_hba.xri_bmask,
phba->sli4_hba.max_cfg_param.max_xri, 0); phba->sli4_hba.max_cfg_param.max_xri);
if (xri >= phba->sli4_hba.max_cfg_param.max_xri) { if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
return NO_XRI; return NO_XRI;
@ -19668,7 +19668,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
rpi_limit = phba->sli4_hba.next_rpi; rpi_limit = phba->sli4_hba.next_rpi;
rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0); rpi = find_first_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit);
if (rpi >= rpi_limit) if (rpi >= rpi_limit)
rpi = LPFC_RPI_ALLOC_ERROR; rpi = LPFC_RPI_ALLOC_ERROR;
else { else {
@ -20311,8 +20311,8 @@ next_priority:
* have been tested so that we can detect when we should * have been tested so that we can detect when we should
* change the priority level. * change the priority level.
*/ */
next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, next_fcf_index = find_first_bit(phba->fcf.fcf_rr_bmask,
LPFC_SLI4_FCF_TBL_INDX_MAX, 0); LPFC_SLI4_FCF_TBL_INDX_MAX);
} }

View File

@ -358,8 +358,8 @@ struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc,
goto out; goto out;
if (flags & K3_RINGACC_RING_USE_PROXY) { if (flags & K3_RINGACC_RING_USE_PROXY) {
proxy_id = find_next_zero_bit(ringacc->proxy_inuse, proxy_id = find_first_zero_bit(ringacc->proxy_inuse,
ringacc->num_proxies, 0); ringacc->num_proxies);
if (proxy_id == ringacc->num_proxies) if (proxy_id == ringacc->num_proxies)
goto error; goto error;
} }

View File

@ -1975,7 +1975,7 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty,
more = n - (size - tail); more = n - (size - tail);
if (eol == N_TTY_BUF_SIZE && more) { if (eol == N_TTY_BUF_SIZE && more) {
/* scan wrapped without finding set bit */ /* scan wrapped without finding set bit */
eol = find_next_bit(ldata->read_flags, more, 0); eol = find_first_bit(ldata->read_flags, more);
found = eol != more; found = eol != more;
} else } else
found = eol != size; found = eol != size;

View File

@ -246,8 +246,7 @@ void acrn_ioreq_request_clear(struct acrn_vm *vm)
spin_lock_bh(&vm->ioreq_clients_lock); spin_lock_bh(&vm->ioreq_clients_lock);
client = vm->default_client; client = vm->default_client;
if (client) { if (client) {
vcpu = find_next_bit(client->ioreqs_map, vcpu = find_first_bit(client->ioreqs_map, ACRN_IO_REQUEST_MAX);
ACRN_IO_REQUEST_MAX, 0);
while (vcpu < ACRN_IO_REQUEST_MAX) { while (vcpu < ACRN_IO_REQUEST_MAX) {
acrn_ioreq_complete_request(client, vcpu, NULL); acrn_ioreq_complete_request(client, vcpu, NULL);
vcpu = find_next_bit(client->ioreqs_map, vcpu = find_next_bit(client->ioreqs_map,

View File

@ -2558,8 +2558,8 @@ find_other_zone:
secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint); secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
if (secno >= MAIN_SECS(sbi)) { if (secno >= MAIN_SECS(sbi)) {
if (dir == ALLOC_RIGHT) { if (dir == ALLOC_RIGHT) {
secno = find_next_zero_bit(free_i->free_secmap, secno = find_first_zero_bit(free_i->free_secmap,
MAIN_SECS(sbi), 0); MAIN_SECS(sbi));
f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi)); f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
} else { } else {
go_left = 1; go_left = 1;
@ -2574,8 +2574,8 @@ find_other_zone:
left_start--; left_start--;
continue; continue;
} }
left_start = find_next_zero_bit(free_i->free_secmap, left_start = find_first_zero_bit(free_i->free_secmap,
MAIN_SECS(sbi), 0); MAIN_SECS(sbi));
f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi)); f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
break; break;
} }

View File

@ -379,7 +379,7 @@ static void o2hb_nego_timeout(struct work_struct *work)
o2hb_fill_node_map(live_node_bitmap, sizeof(live_node_bitmap)); o2hb_fill_node_map(live_node_bitmap, sizeof(live_node_bitmap));
/* lowest node as master node to make negotiate decision. */ /* lowest node as master node to make negotiate decision. */
master_node = find_next_bit(live_node_bitmap, O2NM_MAX_NODES, 0); master_node = find_first_bit(live_node_bitmap, O2NM_MAX_NODES);
if (master_node == o2nm_this_node()) { if (master_node == o2nm_this_node()) {
if (!test_bit(master_node, reg->hr_nego_node_bitmap)) { if (!test_bit(master_node, reg->hr_nego_node_bitmap)) {

View File

@ -1045,7 +1045,7 @@ static int dlm_send_regions(struct dlm_ctxt *dlm, unsigned long *node_map)
int status, ret = 0, i; int status, ret = 0, i;
char *p; char *p;
if (find_next_bit(node_map, O2NM_MAX_NODES, 0) >= O2NM_MAX_NODES) if (find_first_bit(node_map, O2NM_MAX_NODES) >= O2NM_MAX_NODES)
goto bail; goto bail;
qr = kzalloc(sizeof(struct dlm_query_region), GFP_KERNEL); qr = kzalloc(sizeof(struct dlm_query_region), GFP_KERNEL);
@ -1217,7 +1217,7 @@ static int dlm_send_nodeinfo(struct dlm_ctxt *dlm, unsigned long *node_map)
struct o2nm_node *node; struct o2nm_node *node;
int ret = 0, status, count, i; int ret = 0, status, count, i;
if (find_next_bit(node_map, O2NM_MAX_NODES, 0) >= O2NM_MAX_NODES) if (find_first_bit(node_map, O2NM_MAX_NODES) >= O2NM_MAX_NODES)
goto bail; goto bail;
qn = kzalloc(sizeof(struct dlm_query_nodeinfo), GFP_KERNEL); qn = kzalloc(sizeof(struct dlm_query_nodeinfo), GFP_KERNEL);

View File

@ -861,7 +861,7 @@ lookup:
* to see if there are any nodes that still need to be * to see if there are any nodes that still need to be
* considered. these will not appear in the mle nodemap * considered. these will not appear in the mle nodemap
* but they might own this lockres. wait on them. */ * but they might own this lockres. wait on them. */
bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); bit = find_first_bit(dlm->recovery_map, O2NM_MAX_NODES);
if (bit < O2NM_MAX_NODES) { if (bit < O2NM_MAX_NODES) {
mlog(0, "%s: res %.*s, At least one node (%d) " mlog(0, "%s: res %.*s, At least one node (%d) "
"to recover before lock mastery can begin\n", "to recover before lock mastery can begin\n",
@ -912,7 +912,7 @@ redo_request:
dlm_wait_for_recovery(dlm); dlm_wait_for_recovery(dlm);
spin_lock(&dlm->spinlock); spin_lock(&dlm->spinlock);
bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); bit = find_first_bit(dlm->recovery_map, O2NM_MAX_NODES);
if (bit < O2NM_MAX_NODES) { if (bit < O2NM_MAX_NODES) {
mlog(0, "%s: res %.*s, At least one node (%d) " mlog(0, "%s: res %.*s, At least one node (%d) "
"to recover before lock mastery can begin\n", "to recover before lock mastery can begin\n",
@ -1079,7 +1079,7 @@ recheck:
sleep = 1; sleep = 1;
/* have all nodes responded? */ /* have all nodes responded? */
if (voting_done && !*blocked) { if (voting_done && !*blocked) {
bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); bit = find_first_bit(mle->maybe_map, O2NM_MAX_NODES);
if (dlm->node_num <= bit) { if (dlm->node_num <= bit) {
/* my node number is lowest. /* my node number is lowest.
* now tell other nodes that I am * now tell other nodes that I am
@ -1234,8 +1234,8 @@ static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
} else { } else {
mlog(ML_ERROR, "node down! %d\n", node); mlog(ML_ERROR, "node down! %d\n", node);
if (blocked) { if (blocked) {
int lowest = find_next_bit(mle->maybe_map, int lowest = find_first_bit(mle->maybe_map,
O2NM_MAX_NODES, 0); O2NM_MAX_NODES);
/* act like it was never there */ /* act like it was never there */
clear_bit(node, mle->maybe_map); clear_bit(node, mle->maybe_map);
@ -1795,7 +1795,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
"MLE for it! (%.*s)\n", assert->node_idx, "MLE for it! (%.*s)\n", assert->node_idx,
namelen, name); namelen, name);
} else { } else {
int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0); int bit = find_first_bit(mle->maybe_map, O2NM_MAX_NODES);
if (bit >= O2NM_MAX_NODES) { if (bit >= O2NM_MAX_NODES) {
/* not necessarily an error, though less likely. /* not necessarily an error, though less likely.
* could be master just re-asserting. */ * could be master just re-asserting. */
@ -2521,7 +2521,7 @@ static int dlm_is_lockres_migratable(struct dlm_ctxt *dlm,
} }
if (!nonlocal) { if (!nonlocal) {
node_ref = find_next_bit(res->refmap, O2NM_MAX_NODES, 0); node_ref = find_first_bit(res->refmap, O2NM_MAX_NODES);
if (node_ref >= O2NM_MAX_NODES) if (node_ref >= O2NM_MAX_NODES)
return 0; return 0;
} }
@ -3303,7 +3303,7 @@ static void dlm_clean_block_mle(struct dlm_ctxt *dlm,
BUG_ON(mle->type != DLM_MLE_BLOCK); BUG_ON(mle->type != DLM_MLE_BLOCK);
spin_lock(&mle->spinlock); spin_lock(&mle->spinlock);
bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); bit = find_first_bit(mle->maybe_map, O2NM_MAX_NODES);
if (bit != dead_node) { if (bit != dead_node) {
mlog(0, "mle found, but dead node %u would not have been " mlog(0, "mle found, but dead node %u would not have been "
"master\n", dead_node); "master\n", dead_node);
@ -3542,7 +3542,7 @@ void dlm_force_free_mles(struct dlm_ctxt *dlm)
spin_lock(&dlm->master_lock); spin_lock(&dlm->master_lock);
BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING); BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING);
BUG_ON((find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0) < O2NM_MAX_NODES)); BUG_ON((find_first_bit(dlm->domain_map, O2NM_MAX_NODES) < O2NM_MAX_NODES));
for (i = 0; i < DLM_HASH_BUCKETS; i++) { for (i = 0; i < DLM_HASH_BUCKETS; i++) {
bucket = dlm_master_hash(dlm, i); bucket = dlm_master_hash(dlm, i);

View File

@ -451,7 +451,7 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
int bit; int bit;
bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES, 0); bit = find_first_bit(dlm->recovery_map, O2NM_MAX_NODES);
if (bit >= O2NM_MAX_NODES || bit < 0) if (bit >= O2NM_MAX_NODES || bit < 0)
dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
else else

View File

@ -92,7 +92,7 @@ int __dlm_lockres_unused(struct dlm_lock_resource *res)
return 0; return 0;
/* Another node has this resource with this node as the master */ /* Another node has this resource with this node as the master */
bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0); bit = find_first_bit(res->refmap, O2NM_MAX_NODES);
if (bit < O2NM_MAX_NODES) if (bit < O2NM_MAX_NODES)
return 0; return 0;

View File

@ -251,7 +251,7 @@ void gen_pool_destroy(struct gen_pool *pool)
list_del(&chunk->next_chunk); list_del(&chunk->next_chunk);
end_bit = chunk_size(chunk) >> order; end_bit = chunk_size(chunk) >> order;
bit = find_next_bit(chunk->bits, end_bit, 0); bit = find_first_bit(chunk->bits, end_bit);
BUG_ON(bit < end_bit); BUG_ON(bit < end_bit);
vfree(chunk); vfree(chunk);

View File

@ -608,7 +608,7 @@ static int clear_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
bitmap = &ncf->bitmap; bitmap = &ncf->bitmap;
spin_lock_irqsave(&nc->lock, flags); spin_lock_irqsave(&nc->lock, flags);
index = find_next_bit(bitmap, ncf->n_vids, 0); index = find_first_bit(bitmap, ncf->n_vids);
if (index >= ncf->n_vids) { if (index >= ncf->n_vids) {
spin_unlock_irqrestore(&nc->lock, flags); spin_unlock_irqrestore(&nc->lock, flags);
return -1; return -1;
@ -667,7 +667,7 @@ static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
return -1; return -1;
} }
index = find_next_zero_bit(bitmap, ncf->n_vids, 0); index = find_first_zero_bit(bitmap, ncf->n_vids);
if (index < 0 || index >= ncf->n_vids) { if (index < 0 || index >= ncf->n_vids) {
netdev_err(ndp->ndev.dev, netdev_err(ndp->ndev.dev,
"Channel %u already has all VLAN filters set\n", "Channel %u already has all VLAN filters set\n",