btrfs: raid56: reduce indentation in lock_stripe_add
In lock_stripe_add() we're traversing the stripe hash list and check if the current list element's raid_map equals is equal to the raid bio's raid_map. If both are equal we continue processing. If we'd check for inequality instead of equality we can reduce one level of indentation. Reviewed-by: Nikolay Borisov <nborisov@suse.com> Signed-off-by: Johannes Thumshirn <jthumshirn@suse.de> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
1d2e7c7c3e
commit
9d6cb1b0f9
|
@ -682,62 +682,59 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
|
|||
|
||||
spin_lock_irqsave(&h->lock, flags);
|
||||
list_for_each_entry(cur, &h->hash_list, hash_list) {
|
||||
if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
|
||||
spin_lock(&cur->bio_list_lock);
|
||||
if (cur->bbio->raid_map[0] != rbio->bbio->raid_map[0])
|
||||
continue;
|
||||
|
||||
/* can we steal this cached rbio's pages? */
|
||||
if (bio_list_empty(&cur->bio_list) &&
|
||||
list_empty(&cur->plug_list) &&
|
||||
test_bit(RBIO_CACHE_BIT, &cur->flags) &&
|
||||
!test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
|
||||
list_del_init(&cur->hash_list);
|
||||
refcount_dec(&cur->refs);
|
||||
spin_lock(&cur->bio_list_lock);
|
||||
|
||||
steal_rbio(cur, rbio);
|
||||
cache_drop = cur;
|
||||
spin_unlock(&cur->bio_list_lock);
|
||||
/* Can we steal this cached rbio's pages? */
|
||||
if (bio_list_empty(&cur->bio_list) &&
|
||||
list_empty(&cur->plug_list) &&
|
||||
test_bit(RBIO_CACHE_BIT, &cur->flags) &&
|
||||
!test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
|
||||
list_del_init(&cur->hash_list);
|
||||
refcount_dec(&cur->refs);
|
||||
|
||||
goto lockit;
|
||||
}
|
||||
steal_rbio(cur, rbio);
|
||||
cache_drop = cur;
|
||||
spin_unlock(&cur->bio_list_lock);
|
||||
|
||||
/* can we merge into the lock owner? */
|
||||
if (rbio_can_merge(cur, rbio)) {
|
||||
merge_rbio(cur, rbio);
|
||||
goto lockit;
|
||||
}
|
||||
|
||||
/* Can we merge into the lock owner? */
|
||||
if (rbio_can_merge(cur, rbio)) {
|
||||
merge_rbio(cur, rbio);
|
||||
spin_unlock(&cur->bio_list_lock);
|
||||
freeit = rbio;
|
||||
ret = 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* We couldn't merge with the running rbio, see if we can merge
|
||||
* with the pending ones. We don't have to check for rmw_locked
|
||||
* because there is no way they are inside finish_rmw right now
|
||||
*/
|
||||
list_for_each_entry(pending, &cur->plug_list, plug_list) {
|
||||
if (rbio_can_merge(pending, rbio)) {
|
||||
merge_rbio(pending, rbio);
|
||||
spin_unlock(&cur->bio_list_lock);
|
||||
freeit = rbio;
|
||||
ret = 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* we couldn't merge with the running
|
||||
* rbio, see if we can merge with the
|
||||
* pending ones. We don't have to
|
||||
* check for rmw_locked because there
|
||||
* is no way they are inside finish_rmw
|
||||
* right now
|
||||
*/
|
||||
list_for_each_entry(pending, &cur->plug_list,
|
||||
plug_list) {
|
||||
if (rbio_can_merge(pending, rbio)) {
|
||||
merge_rbio(pending, rbio);
|
||||
spin_unlock(&cur->bio_list_lock);
|
||||
freeit = rbio;
|
||||
ret = 1;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/* no merging, put us on the tail of the plug list,
|
||||
* our rbio will be started with the currently
|
||||
* running rbio unlocks
|
||||
*/
|
||||
list_add_tail(&rbio->plug_list, &cur->plug_list);
|
||||
spin_unlock(&cur->bio_list_lock);
|
||||
ret = 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* No merging, put us on the tail of the plug list, our rbio
|
||||
* will be started with the currently running rbio unlocks
|
||||
*/
|
||||
list_add_tail(&rbio->plug_list, &cur->plug_list);
|
||||
spin_unlock(&cur->bio_list_lock);
|
||||
ret = 1;
|
||||
goto out;
|
||||
}
|
||||
lockit:
|
||||
refcount_inc(&rbio->refs);
|
||||
|
|
Loading…
Reference in New Issue