fs, nfs: convert nfs4_lock_state.ls_count from atomic_t to refcount_t
atomic_t variables are currently used to implement reference counters with the following properties: - counter is initialized to 1 using atomic_set() - a resource is freed upon counter reaching zero - once counter reaches zero, its further increments aren't allowed - counter schema uses basic atomic operations (set, inc, inc_not_zero, dec_and_test, etc.) Such atomic variables should be converted to a newly provided refcount_t type and API that prevents accidental counter overflows and underflows. This is important since overflows and underflows can lead to use-after-free situation and be exploitable. The variable nfs4_lock_state.ls_count is used as pure reference counter. Convert it to refcount_t and fix up the operations. Suggested-by: Kees Cook <keescook@chromium.org> Reviewed-by: David Windsor <dwindsor@gmail.com> Reviewed-by: Hans Liljestrand <ishkamiel@gmail.com> Signed-off-by: Elena Reshetova <elena.reshetova@intel.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:
parent
0896cade12
commit
194bc1f481
|
@ -144,7 +144,7 @@ struct nfs4_lock_state {
|
|||
unsigned long ls_flags;
|
||||
struct nfs_seqid_counter ls_seqid;
|
||||
nfs4_stateid ls_stateid;
|
||||
atomic_t ls_count;
|
||||
refcount_t ls_count;
|
||||
fl_owner_t ls_owner;
|
||||
};
|
||||
|
||||
|
|
|
@ -2562,7 +2562,7 @@ static int nfs41_check_expired_locks(struct nfs4_state *state)
|
|||
if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
|
||||
struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
|
||||
|
||||
atomic_inc(&lsp->ls_count);
|
||||
refcount_inc(&lsp->ls_count);
|
||||
spin_unlock(&state->state_lock);
|
||||
|
||||
nfs4_put_lock_state(prev);
|
||||
|
@ -5923,7 +5923,7 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
|
|||
p->arg.seqid = seqid;
|
||||
p->res.seqid = seqid;
|
||||
p->lsp = lsp;
|
||||
atomic_inc(&lsp->ls_count);
|
||||
refcount_inc(&lsp->ls_count);
|
||||
/* Ensure we don't close file until we're done freeing locks! */
|
||||
p->ctx = get_nfs_open_context(ctx);
|
||||
p->l_ctx = nfs_get_lock_context(ctx);
|
||||
|
@ -6139,7 +6139,7 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
|
|||
p->res.lock_seqid = p->arg.lock_seqid;
|
||||
p->lsp = lsp;
|
||||
p->server = server;
|
||||
atomic_inc(&lsp->ls_count);
|
||||
refcount_inc(&lsp->ls_count);
|
||||
p->ctx = get_nfs_open_context(ctx);
|
||||
memcpy(&p->fl, fl, sizeof(p->fl));
|
||||
return p;
|
||||
|
|
|
@ -825,7 +825,7 @@ __nfs4_find_lock_state(struct nfs4_state *state,
|
|||
ret = pos;
|
||||
}
|
||||
if (ret)
|
||||
atomic_inc(&ret->ls_count);
|
||||
refcount_inc(&ret->ls_count);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -843,7 +843,7 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
|
|||
if (lsp == NULL)
|
||||
return NULL;
|
||||
nfs4_init_seqid_counter(&lsp->ls_seqid);
|
||||
atomic_set(&lsp->ls_count, 1);
|
||||
refcount_set(&lsp->ls_count, 1);
|
||||
lsp->ls_state = state;
|
||||
lsp->ls_owner = fl_owner;
|
||||
lsp->ls_seqid.owner_id = ida_simple_get(&server->lockowner_id, 0, 0, GFP_NOFS);
|
||||
|
@ -907,7 +907,7 @@ void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
|
|||
if (lsp == NULL)
|
||||
return;
|
||||
state = lsp->ls_state;
|
||||
if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock))
|
||||
if (!refcount_dec_and_lock(&lsp->ls_count, &state->state_lock))
|
||||
return;
|
||||
list_del(&lsp->ls_locks);
|
||||
if (list_empty(&state->lock_states))
|
||||
|
@ -927,7 +927,7 @@ static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
|
|||
struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
|
||||
|
||||
dst->fl_u.nfs4_fl.owner = lsp;
|
||||
atomic_inc(&lsp->ls_count);
|
||||
refcount_inc(&lsp->ls_count);
|
||||
}
|
||||
|
||||
static void nfs4_fl_release_lock(struct file_lock *fl)
|
||||
|
|
Loading…
Reference in New Issue