From 730d9ec36bf27e87805aee6ebf462071735e31f9 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Sat, 10 Oct 2015 18:16:38 +0200 Subject: [PATCH 1/6] btrfs: remove waitqueue_active check from btrfs_rm_dev_replace_unblocked Normally the waitqueue_active would need a barrier, but this is not necessary here because it's not a performance sensitive context and we can call wake_up directly. Suggested-by: Chris Mason Signed-off-by: David Sterba --- fs/btrfs/dev-replace.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index e54dd5905cee..733ff75b620e 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -454,8 +454,7 @@ static void btrfs_rm_dev_replace_blocked(struct btrfs_fs_info *fs_info) static void btrfs_rm_dev_replace_unblocked(struct btrfs_fs_info *fs_info) { clear_bit(BTRFS_FS_STATE_DEV_REPLACING, &fs_info->fs_state); - if (waitqueue_active(&fs_info->replace_wait)) - wake_up(&fs_info->replace_wait); + wake_up(&fs_info->replace_wait); } static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, From b666a9cd993c6a49c4bf23a1ed476359c978f60b Mon Sep 17 00:00:00 2001 From: David Sterba Date: Sat, 10 Oct 2015 18:24:48 +0200 Subject: [PATCH 2/6] btrfs: add barrier for waitqueue_active in clear_btree_io_tree waitqueue_active should be preceded by a barrier, in this function we don't need to call it all the time. Signed-off-by: David Sterba --- fs/btrfs/transaction.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 74bc3338418b..e8e5b5a10719 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -82,6 +82,12 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction) static void clear_btree_io_tree(struct extent_io_tree *tree) { spin_lock(&tree->lock); + /* + * Do a single barrier for the waitqueue_active check here, the state + * of the waitqueue should not change once clear_btree_io_tree is + * called. + */ + smp_mb(); while (!RB_EMPTY_ROOT(&tree->state)) { struct rb_node *node; struct extent_state *state; From 33a9eca7e4a4c2c17aebbb11f0e506a48ebc30c9 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Sat, 10 Oct 2015 18:35:10 +0200 Subject: [PATCH 3/6] btrfs: comment waitqueue_active implied by locks Suggested-by: Chris Mason Signed-off-by: David Sterba --- fs/btrfs/raid56.c | 6 +++++- fs/btrfs/tree-log.c | 6 ++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index fcf7265ca46f..1a33d3eb36de 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c @@ -810,7 +810,11 @@ static noinline void unlock_stripe(struct btrfs_raid_bio *rbio) } goto done_nolock; - } else if (waitqueue_active(&h->wait)) { + /* + * The barrier for this waitqueue_active is not needed, + * we're protected by h->lock and can't miss a wakeup. + */ + } else if (waitqueue_active(&h->wait)) { spin_unlock(&rbio->bio_list_lock); spin_unlock_irqrestore(&h->lock, flags); wake_up(&h->wait); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 1bbaace73383..d0deb4643502 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -2950,6 +2950,9 @@ out_wake_log_root: atomic_set(&log_root_tree->log_commit[index2], 0); mutex_unlock(&log_root_tree->log_mutex); + /* + * The barrier before waitqueue_active is implied by mutex_unlock + */ if (waitqueue_active(&log_root_tree->log_commit_wait[index2])) wake_up(&log_root_tree->log_commit_wait[index2]); out: @@ -2961,6 +2964,9 @@ out: atomic_set(&root->log_commit[index1], 0); mutex_unlock(&root->log_mutex); + /* + * The barrier before waitqueue_active is implied by mutex_unlock + */ if (waitqueue_active(&root->log_commit_wait[index1])) wake_up(&root->log_commit_wait[index1]); return ret; From a83342aa0c8f0ca90057d3837ae8d198186e5153 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Mon, 16 Feb 2015 19:36:47 +0100 Subject: [PATCH 4/6] btrfs: add comments to barriers before waitqueue_active Reduce number of undocumented barriers out there. Signed-off-by: David Sterba --- fs/btrfs/compression.c | 3 +++ fs/btrfs/extent-tree.c | 3 +-- fs/btrfs/locking.c | 3 +++ fs/btrfs/ordered-data.c | 6 ++++++ fs/btrfs/transaction.c | 3 +++ 5 files changed, 16 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 57ee8ca29b06..3a9317ce67f8 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -839,6 +839,9 @@ static void free_workspace(int type, struct list_head *workspace) btrfs_compress_op[idx]->free_workspace(workspace); atomic_dec(alloc_workspace); wake: + /* + * Make sure counter is updated before we wake up waiters. + */ smp_mb(); if (waitqueue_active(workspace_wait)) wake_up(workspace_wait); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 9f9604201333..59eb92f65c62 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -10363,8 +10363,7 @@ void btrfs_end_write_no_snapshoting(struct btrfs_root *root) { percpu_counter_dec(&root->subv_writers->counter); /* - * Make sure counter is updated before we wake up - * waiters. + * Make sure counter is updated before we wake up waiters. */ smp_mb(); if (waitqueue_active(&root->subv_writers->wait)) diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index d7e6baf1b205..03f8630dbaf2 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -280,6 +280,9 @@ void btrfs_tree_unlock(struct extent_buffer *eb) if (blockers) { WARN_ON(atomic_read(&eb->spinning_writers)); atomic_dec(&eb->blocking_writers); + /* + * Make sure counter is updated before we wake up waiters. + */ smp_mb(); if (waitqueue_active(&eb->write_lock_wq)) wake_up(&eb->write_lock_wq); diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 52170cf1757e..071005f008c1 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -345,6 +345,9 @@ int btrfs_dec_test_first_ordered_pending(struct inode *inode, if (entry->bytes_left == 0) { ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); + /* + * Implicit memory barrier after test_and_set_bit + */ if (waitqueue_active(&entry->wait)) wake_up(&entry->wait); } else { @@ -409,6 +412,9 @@ have_entry: if (entry->bytes_left == 0) { ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); + /* + * Implicit memory barrier after test_and_set_bit + */ if (waitqueue_active(&entry->wait)) wake_up(&entry->wait); } else { diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index e8e5b5a10719..3fd70f797b7d 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -861,6 +861,9 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, atomic_dec(&cur_trans->num_writers); extwriter_counter_dec(cur_trans, trans->type); + /* + * Make sure counter is updated before we wake up waiters. + */ smp_mb(); if (waitqueue_active(&cur_trans->writer_wait)) wake_up(&cur_trans->writer_wait); From 779adf0f647651f5a45eeee3442c881300ce989e Mon Sep 17 00:00:00 2001 From: David Sterba Date: Mon, 16 Feb 2015 19:39:00 +0100 Subject: [PATCH 5/6] btrfs: remove extra barrier before waitqueue_active Removing barriers is scary, but a call to atomic_dec_and_test implies a barrier, so we don't need to issue another one. Signed-off-by: David Sterba --- fs/btrfs/tree-log.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index d0deb4643502..63275594debd 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -229,7 +229,9 @@ int btrfs_pin_log_trans(struct btrfs_root *root) void btrfs_end_log_trans(struct btrfs_root *root) { if (atomic_dec_and_test(&root->log_writers)) { - smp_mb(); + /* + * Implicit memory barrier after atomic_dec_and_test + */ if (waitqueue_active(&root->log_writer_wait)) wake_up(&root->log_writer_wait); } @@ -2820,7 +2822,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, mutex_lock(&log_root_tree->log_mutex); if (atomic_dec_and_test(&log_root_tree->log_writers)) { - smp_mb(); + /* + * Implicit memory barrier after atomic_dec_and_test + */ if (waitqueue_active(&log_root_tree->log_writer_wait)) wake_up(&log_root_tree->log_writer_wait); } From ee86395458072760d62e66aad10a5e9e8902b8cf Mon Sep 17 00:00:00 2001 From: David Sterba Date: Mon, 16 Feb 2015 19:41:40 +0100 Subject: [PATCH 6/6] btrfs: comment the rest of implicit barriers before waitqueue_active There are atomic operations that imply the barrier for waitqueue_active mixed in an if-condition. Signed-off-by: David Sterba --- fs/btrfs/delayed-inode.c | 4 ++++ fs/btrfs/disk-io.c | 3 +++ fs/btrfs/inode.c | 3 +++ fs/btrfs/locking.c | 9 +++++++++ fs/btrfs/volumes.c | 3 +++ 5 files changed, 22 insertions(+) diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index a2ae42720a6a..e0941fbb913c 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -463,6 +463,10 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node, static void finish_one_item(struct btrfs_delayed_root *delayed_root) { int seq = atomic_inc_return(&delayed_root->items_seq); + + /* + * atomic_dec_return implies a barrier for waitqueue_active + */ if ((atomic_dec_return(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) && waitqueue_active(&delayed_root->wait)) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 295795aebe0b..379526ffd84d 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -802,6 +802,9 @@ static void run_one_async_done(struct btrfs_work *work) limit = btrfs_async_submit_limit(fs_info); limit = limit * 2 / 3; + /* + * atomic_dec_return implies a barrier for waitqueue_active + */ if (atomic_dec_return(&fs_info->nr_async_submits) < limit && waitqueue_active(&fs_info->async_submit_wait)) wake_up(&fs_info->async_submit_wait); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 611b66d73e80..7be4abe25e06 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1096,6 +1096,9 @@ static noinline void async_cow_submit(struct btrfs_work *work) nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >> PAGE_CACHE_SHIFT; + /* + * atomic_sub_return implies a barrier for waitqueue_active + */ if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) < 5 * 1024 * 1024 && waitqueue_active(&root->fs_info->async_submit_wait)) diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index 03f8630dbaf2..8077461fc56a 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -79,6 +79,9 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) write_lock(&eb->lock); WARN_ON(atomic_read(&eb->spinning_writers)); atomic_inc(&eb->spinning_writers); + /* + * atomic_dec_and_test implies a barrier for waitqueue_active + */ if (atomic_dec_and_test(&eb->blocking_writers) && waitqueue_active(&eb->write_lock_wq)) wake_up(&eb->write_lock_wq); @@ -86,6 +89,9 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) BUG_ON(atomic_read(&eb->blocking_readers) == 0); read_lock(&eb->lock); atomic_inc(&eb->spinning_readers); + /* + * atomic_dec_and_test implies a barrier for waitqueue_active + */ if (atomic_dec_and_test(&eb->blocking_readers) && waitqueue_active(&eb->read_lock_wq)) wake_up(&eb->read_lock_wq); @@ -229,6 +235,9 @@ void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) } btrfs_assert_tree_read_locked(eb); WARN_ON(atomic_read(&eb->blocking_readers) == 0); + /* + * atomic_dec_and_test implies a barrier for waitqueue_active + */ if (atomic_dec_and_test(&eb->blocking_readers) && waitqueue_active(&eb->read_lock_wq)) wake_up(&eb->read_lock_wq); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 6fc735869c18..ff3527192409 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -345,6 +345,9 @@ loop_lock: pending = pending->bi_next; cur->bi_next = NULL; + /* + * atomic_dec_return implies a barrier for waitqueue_active + */ if (atomic_dec_return(&fs_info->nr_async_bios) < limit && waitqueue_active(&fs_info->async_submit_wait)) wake_up(&fs_info->async_submit_wait);