btrfs: convert btrfs_delayed_ref_node.refs from atomic_t to refcount_t
refcount_t type and corresponding API should be used instead of atomic_t when the variable is used as a reference counter. This allows to avoid accidental refcounter overflows that might lead to use-after-free situations. Signed-off-by: Elena Reshetova <elena.reshetova@intel.com> Signed-off-by: Hans Liljestrand <ishkamiel@gmail.com> Signed-off-by: Kees Cook <keescook@chromium.org> Signed-off-by: David Windsor <dwindsor@gmail.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
1e4f4714d5
commit
6df8cdf5bd
|
@ -1286,7 +1286,7 @@ again:
|
||||||
head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
|
head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
|
||||||
if (head) {
|
if (head) {
|
||||||
if (!mutex_trylock(&head->mutex)) {
|
if (!mutex_trylock(&head->mutex)) {
|
||||||
atomic_inc(&head->node.refs);
|
refcount_inc(&head->node.refs);
|
||||||
spin_unlock(&delayed_refs->lock);
|
spin_unlock(&delayed_refs->lock);
|
||||||
|
|
||||||
btrfs_release_path(path);
|
btrfs_release_path(path);
|
||||||
|
|
|
@ -164,7 +164,7 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
|
||||||
if (mutex_trylock(&head->mutex))
|
if (mutex_trylock(&head->mutex))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
atomic_inc(&head->node.refs);
|
refcount_inc(&head->node.refs);
|
||||||
spin_unlock(&delayed_refs->lock);
|
spin_unlock(&delayed_refs->lock);
|
||||||
|
|
||||||
mutex_lock(&head->mutex);
|
mutex_lock(&head->mutex);
|
||||||
|
@ -590,7 +590,7 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
|
||||||
delayed_refs = &trans->transaction->delayed_refs;
|
delayed_refs = &trans->transaction->delayed_refs;
|
||||||
|
|
||||||
/* first set the basic ref node struct up */
|
/* first set the basic ref node struct up */
|
||||||
atomic_set(&ref->refs, 1);
|
refcount_set(&ref->refs, 1);
|
||||||
ref->bytenr = bytenr;
|
ref->bytenr = bytenr;
|
||||||
ref->num_bytes = num_bytes;
|
ref->num_bytes = num_bytes;
|
||||||
ref->ref_mod = count_mod;
|
ref->ref_mod = count_mod;
|
||||||
|
@ -682,7 +682,7 @@ add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
|
||||||
delayed_refs = &trans->transaction->delayed_refs;
|
delayed_refs = &trans->transaction->delayed_refs;
|
||||||
|
|
||||||
/* first set the basic ref node struct up */
|
/* first set the basic ref node struct up */
|
||||||
atomic_set(&ref->refs, 1);
|
refcount_set(&ref->refs, 1);
|
||||||
ref->bytenr = bytenr;
|
ref->bytenr = bytenr;
|
||||||
ref->num_bytes = num_bytes;
|
ref->num_bytes = num_bytes;
|
||||||
ref->ref_mod = 1;
|
ref->ref_mod = 1;
|
||||||
|
@ -739,7 +739,7 @@ add_delayed_data_ref(struct btrfs_fs_info *fs_info,
|
||||||
seq = atomic64_read(&fs_info->tree_mod_seq);
|
seq = atomic64_read(&fs_info->tree_mod_seq);
|
||||||
|
|
||||||
/* first set the basic ref node struct up */
|
/* first set the basic ref node struct up */
|
||||||
atomic_set(&ref->refs, 1);
|
refcount_set(&ref->refs, 1);
|
||||||
ref->bytenr = bytenr;
|
ref->bytenr = bytenr;
|
||||||
ref->num_bytes = num_bytes;
|
ref->num_bytes = num_bytes;
|
||||||
ref->ref_mod = 1;
|
ref->ref_mod = 1;
|
||||||
|
|
|
@ -18,6 +18,8 @@
|
||||||
#ifndef __DELAYED_REF__
|
#ifndef __DELAYED_REF__
|
||||||
#define __DELAYED_REF__
|
#define __DELAYED_REF__
|
||||||
|
|
||||||
|
#include <linux/refcount.h>
|
||||||
|
|
||||||
/* these are the possible values of struct btrfs_delayed_ref_node->action */
|
/* these are the possible values of struct btrfs_delayed_ref_node->action */
|
||||||
#define BTRFS_ADD_DELAYED_REF 1 /* add one backref to the tree */
|
#define BTRFS_ADD_DELAYED_REF 1 /* add one backref to the tree */
|
||||||
#define BTRFS_DROP_DELAYED_REF 2 /* delete one backref from the tree */
|
#define BTRFS_DROP_DELAYED_REF 2 /* delete one backref from the tree */
|
||||||
|
@ -53,7 +55,7 @@ struct btrfs_delayed_ref_node {
|
||||||
u64 seq;
|
u64 seq;
|
||||||
|
|
||||||
/* ref count on this data structure */
|
/* ref count on this data structure */
|
||||||
atomic_t refs;
|
refcount_t refs;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* how many refs is this entry adding or deleting. For
|
* how many refs is this entry adding or deleting. For
|
||||||
|
@ -220,8 +222,8 @@ btrfs_free_delayed_extent_op(struct btrfs_delayed_extent_op *op)
|
||||||
|
|
||||||
static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
|
static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
|
||||||
{
|
{
|
||||||
WARN_ON(atomic_read(&ref->refs) == 0);
|
WARN_ON(refcount_read(&ref->refs) == 0);
|
||||||
if (atomic_dec_and_test(&ref->refs)) {
|
if (refcount_dec_and_test(&ref->refs)) {
|
||||||
WARN_ON(ref->in_tree);
|
WARN_ON(ref->in_tree);
|
||||||
switch (ref->type) {
|
switch (ref->type) {
|
||||||
case BTRFS_TREE_BLOCK_REF_KEY:
|
case BTRFS_TREE_BLOCK_REF_KEY:
|
||||||
|
|
|
@ -4343,7 +4343,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
|
||||||
head = rb_entry(node, struct btrfs_delayed_ref_head,
|
head = rb_entry(node, struct btrfs_delayed_ref_head,
|
||||||
href_node);
|
href_node);
|
||||||
if (!mutex_trylock(&head->mutex)) {
|
if (!mutex_trylock(&head->mutex)) {
|
||||||
atomic_inc(&head->node.refs);
|
refcount_inc(&head->node.refs);
|
||||||
spin_unlock(&delayed_refs->lock);
|
spin_unlock(&delayed_refs->lock);
|
||||||
|
|
||||||
mutex_lock(&head->mutex);
|
mutex_lock(&head->mutex);
|
||||||
|
|
|
@ -892,7 +892,7 @@ search_again:
|
||||||
head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
|
head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
|
||||||
if (head) {
|
if (head) {
|
||||||
if (!mutex_trylock(&head->mutex)) {
|
if (!mutex_trylock(&head->mutex)) {
|
||||||
atomic_inc(&head->node.refs);
|
refcount_inc(&head->node.refs);
|
||||||
spin_unlock(&delayed_refs->lock);
|
spin_unlock(&delayed_refs->lock);
|
||||||
|
|
||||||
btrfs_release_path(path);
|
btrfs_release_path(path);
|
||||||
|
@ -2980,7 +2980,7 @@ again:
|
||||||
struct btrfs_delayed_ref_node *ref;
|
struct btrfs_delayed_ref_node *ref;
|
||||||
|
|
||||||
ref = &head->node;
|
ref = &head->node;
|
||||||
atomic_inc(&ref->refs);
|
refcount_inc(&ref->refs);
|
||||||
|
|
||||||
spin_unlock(&delayed_refs->lock);
|
spin_unlock(&delayed_refs->lock);
|
||||||
/*
|
/*
|
||||||
|
@ -3057,7 +3057,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!mutex_trylock(&head->mutex)) {
|
if (!mutex_trylock(&head->mutex)) {
|
||||||
atomic_inc(&head->node.refs);
|
refcount_inc(&head->node.refs);
|
||||||
spin_unlock(&delayed_refs->lock);
|
spin_unlock(&delayed_refs->lock);
|
||||||
|
|
||||||
btrfs_release_path(path);
|
btrfs_release_path(path);
|
||||||
|
|
Loading…
Reference in New Issue