NFS client bugfixes for Linux 4.20
Highlights include: Stable fixes: - Don't exit the NFSv4 state manager without clearing NFS4CLNT_MANAGER_RUNNING Bugfixes: - Fix an Oops when destroying the RPCSEC_GSS credential cache - Fix an Oops during delegation callbacks - Ensure that the NFSv4 state manager exits the loop on SIGKILL - Fix a bogus get/put in generic_key_to_expire() -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJb7Lh0AAoJEA4mA3inWBJc8uAQAIkrGChs3AFuEQ3G3H9RlxDX WFsPghRGmDwXf2sD+nWjl0r60v0v5fQaUhW/7EPe2kbVTF/rnjieXNeFOw33ZMFk MDq03nL1/I25DoNK/qg5GZ2NIltZ9oKKbwaN+0LxXKz69X5qIXYnDzYPHDR/PNTg Go7PvG8rU31Wd67E2pquwC6zZ6rCPf2BtQjZdzouLAEUWXAMHyJmszpFUxhLMJoz k6dZouphj8fkMse3cfKLnGDqbQ2bE6+Yb0B6Hi0p5nShYgZTaQNZ9KxrEJF7J05i cxH6IvLEawEMWXYzGEwr1LUDDrpwveuNTt/OroTgOcSsVpZx1DE0sOZkQ4pt/uTe c5NzZYKjEOb2DWxoGR2GEDkRasKVBkWvR5MegvyDgyAcXkAjN/6CgYXiniNYDxl6 qk7sIqkJfug7fv+VW5YHwORKnvRIEDlFcwy5yZ0ij/Qa0dqUR3aczINGLwS6kcfn u7M42UR17FUo2zaI9pZhuijwntbtkXMIETWHGRctK7Mum6u37QSVySNCO2A4knBE jEy+oYPFCIUqH+ESpNp73otrVt1CTexScIJNsEi1naLmOhjQRW7YjUPEH1Xjg0Ss OGyqIjOf6ToF6ma39/XZI9miJe08k6x8b0aGUdG29Cko9UvjLH86ODEausSRAyFA OyZFFuHHAau5FGpNvZfj =AstN -----END PGP SIGNATURE----- Merge tag 'nfs-for-4.20-3' of git://git.linux-nfs.org/projects/trondmy/linux-nfs Pull NFS client bugfixes from Trond Myklebust: "Highlights include: Stable fixes: - Don't exit the NFSv4 state manager without clearing NFS4CLNT_MANAGER_RUNNING Bugfixes: - Fix an Oops when destroying the RPCSEC_GSS credential cache - Fix an Oops during delegation callbacks - Ensure that the NFSv4 state manager exits the loop on SIGKILL - Fix a bogus get/put in generic_key_to_expire()" * tag 'nfs-for-4.20-3' of git://git.linux-nfs.org/projects/trondmy/linux-nfs: NFSv4: Fix an Oops during delegation callbacks SUNRPC: Fix a bogus get/put in generic_key_to_expire() SUNRPC: Fix a Oops when destroying the RPCSEC_GSS credential cache NFSv4: Ensure that the state manager exits the loop on SIGKILL NFSv4: Don't exit the state manager without clearing NFS4CLNT_MANAGER_RUNNING
This commit is contained in:
commit
94ca5c18e1
|
@ -66,7 +66,7 @@ __be32 nfs4_callback_getattr(void *argp, void *resp,
|
|||
out_iput:
|
||||
rcu_read_unlock();
|
||||
trace_nfs4_cb_getattr(cps->clp, &args->fh, inode, -ntohl(res->status));
|
||||
iput(inode);
|
||||
nfs_iput_and_deactive(inode);
|
||||
out:
|
||||
dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
|
||||
return res->status;
|
||||
|
@ -108,7 +108,7 @@ __be32 nfs4_callback_recall(void *argp, void *resp,
|
|||
}
|
||||
trace_nfs4_cb_recall(cps->clp, &args->fh, inode,
|
||||
&args->stateid, -ntohl(res));
|
||||
iput(inode);
|
||||
nfs_iput_and_deactive(inode);
|
||||
out:
|
||||
dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
|
||||
return res;
|
||||
|
|
|
@ -850,16 +850,23 @@ nfs_delegation_find_inode_server(struct nfs_server *server,
|
|||
const struct nfs_fh *fhandle)
|
||||
{
|
||||
struct nfs_delegation *delegation;
|
||||
struct inode *res = NULL;
|
||||
struct inode *freeme, *res = NULL;
|
||||
|
||||
list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
|
||||
spin_lock(&delegation->lock);
|
||||
if (delegation->inode != NULL &&
|
||||
nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
|
||||
res = igrab(delegation->inode);
|
||||
freeme = igrab(delegation->inode);
|
||||
if (freeme && nfs_sb_active(freeme->i_sb))
|
||||
res = freeme;
|
||||
spin_unlock(&delegation->lock);
|
||||
if (res != NULL)
|
||||
return res;
|
||||
if (freeme) {
|
||||
rcu_read_unlock();
|
||||
iput(freeme);
|
||||
rcu_read_lock();
|
||||
}
|
||||
return ERR_PTR(-EAGAIN);
|
||||
}
|
||||
spin_unlock(&delegation->lock);
|
||||
|
|
|
@ -2601,11 +2601,12 @@ static void nfs4_state_manager(struct nfs_client *clp)
|
|||
nfs4_clear_state_manager_bit(clp);
|
||||
/* Did we race with an attempt to give us more work? */
|
||||
if (clp->cl_state == 0)
|
||||
break;
|
||||
return;
|
||||
if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
|
||||
break;
|
||||
} while (refcount_read(&clp->cl_count) > 1);
|
||||
return;
|
||||
return;
|
||||
} while (refcount_read(&clp->cl_count) > 1 && !signalled());
|
||||
goto out_drain;
|
||||
|
||||
out_error:
|
||||
if (strlen(section))
|
||||
section_sep = ": ";
|
||||
|
@ -2613,6 +2614,7 @@ out_error:
|
|||
" with error %d\n", section_sep, section,
|
||||
clp->cl_hostname, -status);
|
||||
ssleep(1);
|
||||
out_drain:
|
||||
nfs4_end_drain_session(clp);
|
||||
nfs4_clear_state_manager_bit(clp);
|
||||
}
|
||||
|
|
|
@ -281,13 +281,7 @@ static bool generic_key_to_expire(struct rpc_cred *cred)
|
|||
{
|
||||
struct auth_cred *acred = &container_of(cred, struct generic_cred,
|
||||
gc_base)->acred;
|
||||
bool ret;
|
||||
|
||||
get_rpccred(cred);
|
||||
ret = test_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags);
|
||||
put_rpccred(cred);
|
||||
|
||||
return ret;
|
||||
return test_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags);
|
||||
}
|
||||
|
||||
static const struct rpc_credops generic_credops = {
|
||||
|
|
|
@ -1239,36 +1239,59 @@ gss_create(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
|
|||
return &gss_auth->rpc_auth;
|
||||
}
|
||||
|
||||
static struct gss_cred *
|
||||
gss_dup_cred(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
|
||||
{
|
||||
struct gss_cred *new;
|
||||
|
||||
/* Make a copy of the cred so that we can reference count it */
|
||||
new = kzalloc(sizeof(*gss_cred), GFP_NOIO);
|
||||
if (new) {
|
||||
struct auth_cred acred = {
|
||||
.uid = gss_cred->gc_base.cr_uid,
|
||||
};
|
||||
struct gss_cl_ctx *ctx =
|
||||
rcu_dereference_protected(gss_cred->gc_ctx, 1);
|
||||
|
||||
rpcauth_init_cred(&new->gc_base, &acred,
|
||||
&gss_auth->rpc_auth,
|
||||
&gss_nullops);
|
||||
new->gc_base.cr_flags = 1UL << RPCAUTH_CRED_UPTODATE;
|
||||
new->gc_service = gss_cred->gc_service;
|
||||
new->gc_principal = gss_cred->gc_principal;
|
||||
kref_get(&gss_auth->kref);
|
||||
rcu_assign_pointer(new->gc_ctx, ctx);
|
||||
gss_get_ctx(ctx);
|
||||
}
|
||||
return new;
|
||||
}
|
||||
|
||||
/*
|
||||
* gss_destroying_context will cause the RPCSEC_GSS to send a NULL RPC call
|
||||
* gss_send_destroy_context will cause the RPCSEC_GSS to send a NULL RPC call
|
||||
* to the server with the GSS control procedure field set to
|
||||
* RPC_GSS_PROC_DESTROY. This should normally cause the server to release
|
||||
* all RPCSEC_GSS state associated with that context.
|
||||
*/
|
||||
static int
|
||||
gss_destroying_context(struct rpc_cred *cred)
|
||||
static void
|
||||
gss_send_destroy_context(struct rpc_cred *cred)
|
||||
{
|
||||
struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
|
||||
struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
|
||||
struct gss_cl_ctx *ctx = rcu_dereference_protected(gss_cred->gc_ctx, 1);
|
||||
struct gss_cred *new;
|
||||
struct rpc_task *task;
|
||||
|
||||
if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0)
|
||||
return 0;
|
||||
new = gss_dup_cred(gss_auth, gss_cred);
|
||||
if (new) {
|
||||
ctx->gc_proc = RPC_GSS_PROC_DESTROY;
|
||||
|
||||
ctx->gc_proc = RPC_GSS_PROC_DESTROY;
|
||||
cred->cr_ops = &gss_nullops;
|
||||
task = rpc_call_null(gss_auth->client, &new->gc_base,
|
||||
RPC_TASK_ASYNC|RPC_TASK_SOFT);
|
||||
if (!IS_ERR(task))
|
||||
rpc_put_task(task);
|
||||
|
||||
/* Take a reference to ensure the cred will be destroyed either
|
||||
* by the RPC call or by the put_rpccred() below */
|
||||
get_rpccred(cred);
|
||||
|
||||
task = rpc_call_null(gss_auth->client, cred, RPC_TASK_ASYNC|RPC_TASK_SOFT);
|
||||
if (!IS_ERR(task))
|
||||
rpc_put_task(task);
|
||||
|
||||
put_rpccred(cred);
|
||||
return 1;
|
||||
put_rpccred(&new->gc_base);
|
||||
}
|
||||
}
|
||||
|
||||
/* gss_destroy_cred (and gss_free_ctx) are used to clean up after failure
|
||||
|
@ -1330,8 +1353,8 @@ static void
|
|||
gss_destroy_cred(struct rpc_cred *cred)
|
||||
{
|
||||
|
||||
if (gss_destroying_context(cred))
|
||||
return;
|
||||
if (test_and_clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0)
|
||||
gss_send_destroy_context(cred);
|
||||
gss_destroy_nullcred(cred);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue