Btrfs: reduce calls to wake_up on uncontended locks
The btrfs locks were unconditionally calling wake_up as the locks were released. This lead to extra thrashing on the waitqueue, especially for locks that were dominated by readers. Signed-off-by: Chris Mason <chris.mason@fusionio.com>
This commit is contained in:
parent
e39e64ac0c
commit
cbea5ac1ee
|
@ -78,13 +78,15 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
|
||||||
write_lock(&eb->lock);
|
write_lock(&eb->lock);
|
||||||
WARN_ON(atomic_read(&eb->spinning_writers));
|
WARN_ON(atomic_read(&eb->spinning_writers));
|
||||||
atomic_inc(&eb->spinning_writers);
|
atomic_inc(&eb->spinning_writers);
|
||||||
if (atomic_dec_and_test(&eb->blocking_writers))
|
if (atomic_dec_and_test(&eb->blocking_writers) &&
|
||||||
|
waitqueue_active(&eb->write_lock_wq))
|
||||||
wake_up(&eb->write_lock_wq);
|
wake_up(&eb->write_lock_wq);
|
||||||
} else if (rw == BTRFS_READ_LOCK_BLOCKING) {
|
} else if (rw == BTRFS_READ_LOCK_BLOCKING) {
|
||||||
BUG_ON(atomic_read(&eb->blocking_readers) == 0);
|
BUG_ON(atomic_read(&eb->blocking_readers) == 0);
|
||||||
read_lock(&eb->lock);
|
read_lock(&eb->lock);
|
||||||
atomic_inc(&eb->spinning_readers);
|
atomic_inc(&eb->spinning_readers);
|
||||||
if (atomic_dec_and_test(&eb->blocking_readers))
|
if (atomic_dec_and_test(&eb->blocking_readers) &&
|
||||||
|
waitqueue_active(&eb->read_lock_wq))
|
||||||
wake_up(&eb->read_lock_wq);
|
wake_up(&eb->read_lock_wq);
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
|
@ -199,7 +201,8 @@ void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
|
||||||
}
|
}
|
||||||
btrfs_assert_tree_read_locked(eb);
|
btrfs_assert_tree_read_locked(eb);
|
||||||
WARN_ON(atomic_read(&eb->blocking_readers) == 0);
|
WARN_ON(atomic_read(&eb->blocking_readers) == 0);
|
||||||
if (atomic_dec_and_test(&eb->blocking_readers))
|
if (atomic_dec_and_test(&eb->blocking_readers) &&
|
||||||
|
waitqueue_active(&eb->read_lock_wq))
|
||||||
wake_up(&eb->read_lock_wq);
|
wake_up(&eb->read_lock_wq);
|
||||||
atomic_dec(&eb->read_locks);
|
atomic_dec(&eb->read_locks);
|
||||||
}
|
}
|
||||||
|
@ -247,8 +250,9 @@ void btrfs_tree_unlock(struct extent_buffer *eb)
|
||||||
if (blockers) {
|
if (blockers) {
|
||||||
WARN_ON(atomic_read(&eb->spinning_writers));
|
WARN_ON(atomic_read(&eb->spinning_writers));
|
||||||
atomic_dec(&eb->blocking_writers);
|
atomic_dec(&eb->blocking_writers);
|
||||||
smp_wmb();
|
smp_mb();
|
||||||
wake_up(&eb->write_lock_wq);
|
if (waitqueue_active(&eb->write_lock_wq))
|
||||||
|
wake_up(&eb->write_lock_wq);
|
||||||
} else {
|
} else {
|
||||||
WARN_ON(atomic_read(&eb->spinning_writers) != 1);
|
WARN_ON(atomic_read(&eb->spinning_writers) != 1);
|
||||||
atomic_dec(&eb->spinning_writers);
|
atomic_dec(&eb->spinning_writers);
|
||||||
|
|
Loading…
Reference in New Issue