Merge branch 'nfs-for-2.6.36' of git://git.linux-nfs.org/projects/trondmy/nfs-2.6
* 'nfs-for-2.6.36' of git://git.linux-nfs.org/projects/trondmy/nfs-2.6: (42 commits) NFS: NFSv4.1 is no longer a "developer only" feature NFS: NFS_V4 is no longer an EXPERIMENTAL feature NFS: Fix /proc/mount for legacy binary interface NFS: Fix the locking in nfs4_callback_getattr SUNRPC: Defer deleting the security context until gss_do_free_ctx() SUNRPC: prevent task_cleanup running on freed xprt SUNRPC: Reduce asynchronous RPC task stack usage SUNRPC: Move the bound cred to struct rpc_rqst SUNRPC: Clean up of rpc_bindcred() SUNRPC: Move remaining RPC client related task initialisation into clnt.c SUNRPC: Ensure that rpc_exit() always wakes up a sleeping task SUNRPC: Make the credential cache hashtable size configurable SUNRPC: Store the hashtable size in struct rpc_cred_cache NFS: Ensure the AUTH_UNIX credcache is allocated dynamically NFS: Fix the NFS users of rpc_restart_call() SUNRPC: The function rpc_restart_call() should return success/failure NFSv4: Get rid of the bogus RPC_ASSASSINATED(task) checks NFSv4: Clean up the process of renewing the NFSv4 lease NFSv4.1: Handle NFS4ERR_DELAY on SEQUENCE correctly NFS: nfs_rename() should not have to flush out writebacks ...
This commit is contained in:
commit
5df6b8e65a
|
@ -61,8 +61,8 @@ config NFS_V3_ACL
|
||||||
If unsure, say N.
|
If unsure, say N.
|
||||||
|
|
||||||
config NFS_V4
|
config NFS_V4
|
||||||
bool "NFS client support for NFS version 4 (EXPERIMENTAL)"
|
bool "NFS client support for NFS version 4"
|
||||||
depends on NFS_FS && EXPERIMENTAL
|
depends on NFS_FS
|
||||||
select RPCSEC_GSS_KRB5
|
select RPCSEC_GSS_KRB5
|
||||||
help
|
help
|
||||||
This option enables support for version 4 of the NFS protocol
|
This option enables support for version 4 of the NFS protocol
|
||||||
|
@ -72,16 +72,16 @@ config NFS_V4
|
||||||
space programs which can be found in the Linux nfs-utils package,
|
space programs which can be found in the Linux nfs-utils package,
|
||||||
available from http://linux-nfs.org/.
|
available from http://linux-nfs.org/.
|
||||||
|
|
||||||
If unsure, say N.
|
If unsure, say Y.
|
||||||
|
|
||||||
config NFS_V4_1
|
config NFS_V4_1
|
||||||
bool "NFS client support for NFSv4.1 (DEVELOPER ONLY)"
|
bool "NFS client support for NFSv4.1 (EXPERIMENTAL)"
|
||||||
depends on NFS_V4 && EXPERIMENTAL
|
depends on NFS_V4 && EXPERIMENTAL
|
||||||
help
|
help
|
||||||
This option enables support for minor version 1 of the NFSv4 protocol
|
This option enables support for minor version 1 of the NFSv4 protocol
|
||||||
(draft-ietf-nfsv4-minorversion1) in the kernel's NFS client.
|
(draft-ietf-nfsv4-minorversion1) in the kernel's NFS client.
|
||||||
|
|
||||||
Unless you're an NFS developer, say N.
|
If unsure, say N.
|
||||||
|
|
||||||
config ROOT_NFS
|
config ROOT_NFS
|
||||||
bool "Root file system on NFS"
|
bool "Root file system on NFS"
|
||||||
|
|
|
@ -37,8 +37,8 @@ __be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *
|
||||||
if (inode == NULL)
|
if (inode == NULL)
|
||||||
goto out_putclient;
|
goto out_putclient;
|
||||||
nfsi = NFS_I(inode);
|
nfsi = NFS_I(inode);
|
||||||
down_read(&nfsi->rwsem);
|
rcu_read_lock();
|
||||||
delegation = nfsi->delegation;
|
delegation = rcu_dereference(nfsi->delegation);
|
||||||
if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
|
if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
|
||||||
goto out_iput;
|
goto out_iput;
|
||||||
res->size = i_size_read(inode);
|
res->size = i_size_read(inode);
|
||||||
|
@ -53,7 +53,7 @@ __be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *
|
||||||
args->bitmap[1];
|
args->bitmap[1];
|
||||||
res->status = 0;
|
res->status = 0;
|
||||||
out_iput:
|
out_iput:
|
||||||
up_read(&nfsi->rwsem);
|
rcu_read_unlock();
|
||||||
iput(inode);
|
iput(inode);
|
||||||
out_putclient:
|
out_putclient:
|
||||||
nfs_put_client(clp);
|
nfs_put_client(clp);
|
||||||
|
@ -62,16 +62,6 @@ out:
|
||||||
return res->status;
|
return res->status;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int (*nfs_validate_delegation_stateid(struct nfs_client *clp))(struct nfs_delegation *, const nfs4_stateid *)
|
|
||||||
{
|
|
||||||
#if defined(CONFIG_NFS_V4_1)
|
|
||||||
if (clp->cl_minorversion > 0)
|
|
||||||
return nfs41_validate_delegation_stateid;
|
|
||||||
#endif
|
|
||||||
return nfs4_validate_delegation_stateid;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
__be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy)
|
__be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy)
|
||||||
{
|
{
|
||||||
struct nfs_client *clp;
|
struct nfs_client *clp;
|
||||||
|
@ -92,8 +82,7 @@ __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy)
|
||||||
inode = nfs_delegation_find_inode(clp, &args->fh);
|
inode = nfs_delegation_find_inode(clp, &args->fh);
|
||||||
if (inode != NULL) {
|
if (inode != NULL) {
|
||||||
/* Set up a helper thread to actually return the delegation */
|
/* Set up a helper thread to actually return the delegation */
|
||||||
switch (nfs_async_inode_return_delegation(inode, &args->stateid,
|
switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
|
||||||
nfs_validate_delegation_stateid(clp))) {
|
|
||||||
case 0:
|
case 0:
|
||||||
res = 0;
|
res = 0;
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -150,6 +150,7 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
|
||||||
clp->cl_boot_time = CURRENT_TIME;
|
clp->cl_boot_time = CURRENT_TIME;
|
||||||
clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
|
clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
|
||||||
clp->cl_minorversion = cl_init->minorversion;
|
clp->cl_minorversion = cl_init->minorversion;
|
||||||
|
clp->cl_mvops = nfs_v4_minor_ops[cl_init->minorversion];
|
||||||
#endif
|
#endif
|
||||||
cred = rpc_lookup_machine_cred();
|
cred = rpc_lookup_machine_cred();
|
||||||
if (!IS_ERR(cred))
|
if (!IS_ERR(cred))
|
||||||
|
@ -178,7 +179,7 @@ static void nfs4_clear_client_minor_version(struct nfs_client *clp)
|
||||||
clp->cl_session = NULL;
|
clp->cl_session = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
clp->cl_call_sync = _nfs4_call_sync;
|
clp->cl_mvops = nfs_v4_minor_ops[0];
|
||||||
#endif /* CONFIG_NFS_V4_1 */
|
#endif /* CONFIG_NFS_V4_1 */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -188,7 +189,7 @@ static void nfs4_clear_client_minor_version(struct nfs_client *clp)
|
||||||
static void nfs4_destroy_callback(struct nfs_client *clp)
|
static void nfs4_destroy_callback(struct nfs_client *clp)
|
||||||
{
|
{
|
||||||
if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state))
|
if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state))
|
||||||
nfs_callback_down(clp->cl_minorversion);
|
nfs_callback_down(clp->cl_mvops->minor_version);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nfs4_shutdown_client(struct nfs_client *clp)
|
static void nfs4_shutdown_client(struct nfs_client *clp)
|
||||||
|
@ -1126,7 +1127,7 @@ static int nfs4_init_callback(struct nfs_client *clp)
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
error = nfs_callback_up(clp->cl_minorversion,
|
error = nfs_callback_up(clp->cl_mvops->minor_version,
|
||||||
clp->cl_rpcclient->cl_xprt);
|
clp->cl_rpcclient->cl_xprt);
|
||||||
if (error < 0) {
|
if (error < 0) {
|
||||||
dprintk("%s: failed to start callback. Error = %d\n",
|
dprintk("%s: failed to start callback. Error = %d\n",
|
||||||
|
@ -1143,10 +1144,8 @@ static int nfs4_init_callback(struct nfs_client *clp)
|
||||||
*/
|
*/
|
||||||
static int nfs4_init_client_minor_version(struct nfs_client *clp)
|
static int nfs4_init_client_minor_version(struct nfs_client *clp)
|
||||||
{
|
{
|
||||||
clp->cl_call_sync = _nfs4_call_sync;
|
|
||||||
|
|
||||||
#if defined(CONFIG_NFS_V4_1)
|
#if defined(CONFIG_NFS_V4_1)
|
||||||
if (clp->cl_minorversion) {
|
if (clp->cl_mvops->minor_version) {
|
||||||
struct nfs4_session *session = NULL;
|
struct nfs4_session *session = NULL;
|
||||||
/*
|
/*
|
||||||
* Create the session and mark it expired.
|
* Create the session and mark it expired.
|
||||||
|
@ -1158,7 +1157,13 @@ static int nfs4_init_client_minor_version(struct nfs_client *clp)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
clp->cl_session = session;
|
clp->cl_session = session;
|
||||||
clp->cl_call_sync = _nfs4_call_sync_session;
|
/*
|
||||||
|
* The create session reply races with the server back
|
||||||
|
* channel probe. Mark the client NFS_CS_SESSION_INITING
|
||||||
|
* so that the client back channel can find the
|
||||||
|
* nfs_client struct
|
||||||
|
*/
|
||||||
|
clp->cl_cons_state = NFS_CS_SESSION_INITING;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_NFS_V4_1 */
|
#endif /* CONFIG_NFS_V4_1 */
|
||||||
|
|
||||||
|
@ -1454,7 +1459,7 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
|
||||||
data->authflavor,
|
data->authflavor,
|
||||||
parent_server->client->cl_xprt->prot,
|
parent_server->client->cl_xprt->prot,
|
||||||
parent_server->client->cl_timeout,
|
parent_server->client->cl_timeout,
|
||||||
parent_client->cl_minorversion);
|
parent_client->cl_mvops->minor_version);
|
||||||
if (error < 0)
|
if (error < 0)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
|
|
|
@ -268,14 +268,6 @@ out:
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Sync all data to disk upon delegation return */
|
|
||||||
static void nfs_msync_inode(struct inode *inode)
|
|
||||||
{
|
|
||||||
filemap_fdatawrite(inode->i_mapping);
|
|
||||||
nfs_wb_all(inode);
|
|
||||||
filemap_fdatawait(inode->i_mapping);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Basic procedure for returning a delegation to the server
|
* Basic procedure for returning a delegation to the server
|
||||||
*/
|
*/
|
||||||
|
@ -367,7 +359,7 @@ int nfs_inode_return_delegation(struct inode *inode)
|
||||||
delegation = nfs_detach_delegation_locked(nfsi, NULL, clp);
|
delegation = nfs_detach_delegation_locked(nfsi, NULL, clp);
|
||||||
spin_unlock(&clp->cl_lock);
|
spin_unlock(&clp->cl_lock);
|
||||||
if (delegation != NULL) {
|
if (delegation != NULL) {
|
||||||
nfs_msync_inode(inode);
|
nfs_wb_all(inode);
|
||||||
err = __nfs_inode_return_delegation(inode, delegation, 1);
|
err = __nfs_inode_return_delegation(inode, delegation, 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -471,9 +463,7 @@ void nfs_expire_unreferenced_delegations(struct nfs_client *clp)
|
||||||
/*
|
/*
|
||||||
* Asynchronous delegation recall!
|
* Asynchronous delegation recall!
|
||||||
*/
|
*/
|
||||||
int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid,
|
int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
|
||||||
int (*validate_stateid)(struct nfs_delegation *delegation,
|
|
||||||
const nfs4_stateid *stateid))
|
|
||||||
{
|
{
|
||||||
struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
|
struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
|
||||||
struct nfs_delegation *delegation;
|
struct nfs_delegation *delegation;
|
||||||
|
@ -481,7 +471,7 @@ int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *s
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
delegation = rcu_dereference(NFS_I(inode)->delegation);
|
delegation = rcu_dereference(NFS_I(inode)->delegation);
|
||||||
|
|
||||||
if (!validate_stateid(delegation, stateid)) {
|
if (!clp->cl_mvops->validate_stateid(delegation, stateid)) {
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,9 +34,7 @@ enum {
|
||||||
int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
|
int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
|
||||||
void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
|
void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
|
||||||
int nfs_inode_return_delegation(struct inode *inode);
|
int nfs_inode_return_delegation(struct inode *inode);
|
||||||
int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid,
|
int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid);
|
||||||
int (*validate_stateid)(struct nfs_delegation *delegation,
|
|
||||||
const nfs4_stateid *stateid));
|
|
||||||
void nfs_inode_return_delegation_noreclaim(struct inode *inode);
|
void nfs_inode_return_delegation_noreclaim(struct inode *inode);
|
||||||
|
|
||||||
struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle);
|
struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle);
|
||||||
|
|
|
@ -1652,16 +1652,7 @@ static int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* ... prune child dentries and writebacks if needed.
|
|
||||||
*/
|
|
||||||
if (atomic_read(&old_dentry->d_count) > 1) {
|
|
||||||
if (S_ISREG(old_inode->i_mode))
|
|
||||||
nfs_wb_all(old_inode);
|
|
||||||
shrink_dcache_parent(old_dentry);
|
|
||||||
}
|
|
||||||
nfs_inode_return_delegation(old_inode);
|
nfs_inode_return_delegation(old_inode);
|
||||||
|
|
||||||
if (new_inode != NULL)
|
if (new_inode != NULL)
|
||||||
nfs_inode_return_delegation(new_inode);
|
nfs_inode_return_delegation(new_inode);
|
||||||
|
|
||||||
|
|
|
@ -69,6 +69,7 @@ struct nfs_direct_req {
|
||||||
|
|
||||||
/* I/O parameters */
|
/* I/O parameters */
|
||||||
struct nfs_open_context *ctx; /* file open context info */
|
struct nfs_open_context *ctx; /* file open context info */
|
||||||
|
struct nfs_lock_context *l_ctx; /* Lock context info */
|
||||||
struct kiocb * iocb; /* controlling i/o request */
|
struct kiocb * iocb; /* controlling i/o request */
|
||||||
struct inode * inode; /* target file of i/o */
|
struct inode * inode; /* target file of i/o */
|
||||||
|
|
||||||
|
@ -160,6 +161,7 @@ static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
|
||||||
INIT_LIST_HEAD(&dreq->rewrite_list);
|
INIT_LIST_HEAD(&dreq->rewrite_list);
|
||||||
dreq->iocb = NULL;
|
dreq->iocb = NULL;
|
||||||
dreq->ctx = NULL;
|
dreq->ctx = NULL;
|
||||||
|
dreq->l_ctx = NULL;
|
||||||
spin_lock_init(&dreq->lock);
|
spin_lock_init(&dreq->lock);
|
||||||
atomic_set(&dreq->io_count, 0);
|
atomic_set(&dreq->io_count, 0);
|
||||||
dreq->count = 0;
|
dreq->count = 0;
|
||||||
|
@ -173,6 +175,8 @@ static void nfs_direct_req_free(struct kref *kref)
|
||||||
{
|
{
|
||||||
struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
|
struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
|
||||||
|
|
||||||
|
if (dreq->l_ctx != NULL)
|
||||||
|
nfs_put_lock_context(dreq->l_ctx);
|
||||||
if (dreq->ctx != NULL)
|
if (dreq->ctx != NULL)
|
||||||
put_nfs_open_context(dreq->ctx);
|
put_nfs_open_context(dreq->ctx);
|
||||||
kmem_cache_free(nfs_direct_cachep, dreq);
|
kmem_cache_free(nfs_direct_cachep, dreq);
|
||||||
|
@ -336,6 +340,7 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
|
||||||
data->cred = msg.rpc_cred;
|
data->cred = msg.rpc_cred;
|
||||||
data->args.fh = NFS_FH(inode);
|
data->args.fh = NFS_FH(inode);
|
||||||
data->args.context = ctx;
|
data->args.context = ctx;
|
||||||
|
data->args.lock_context = dreq->l_ctx;
|
||||||
data->args.offset = pos;
|
data->args.offset = pos;
|
||||||
data->args.pgbase = pgbase;
|
data->args.pgbase = pgbase;
|
||||||
data->args.pages = data->pagevec;
|
data->args.pages = data->pagevec;
|
||||||
|
@ -416,24 +421,28 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
|
||||||
static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
|
static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
|
||||||
unsigned long nr_segs, loff_t pos)
|
unsigned long nr_segs, loff_t pos)
|
||||||
{
|
{
|
||||||
ssize_t result = 0;
|
ssize_t result = -ENOMEM;
|
||||||
struct inode *inode = iocb->ki_filp->f_mapping->host;
|
struct inode *inode = iocb->ki_filp->f_mapping->host;
|
||||||
struct nfs_direct_req *dreq;
|
struct nfs_direct_req *dreq;
|
||||||
|
|
||||||
dreq = nfs_direct_req_alloc();
|
dreq = nfs_direct_req_alloc();
|
||||||
if (!dreq)
|
if (dreq == NULL)
|
||||||
return -ENOMEM;
|
goto out;
|
||||||
|
|
||||||
dreq->inode = inode;
|
dreq->inode = inode;
|
||||||
dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
|
dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
|
||||||
|
dreq->l_ctx = nfs_get_lock_context(dreq->ctx);
|
||||||
|
if (dreq->l_ctx == NULL)
|
||||||
|
goto out_release;
|
||||||
if (!is_sync_kiocb(iocb))
|
if (!is_sync_kiocb(iocb))
|
||||||
dreq->iocb = iocb;
|
dreq->iocb = iocb;
|
||||||
|
|
||||||
result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos);
|
result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos);
|
||||||
if (!result)
|
if (!result)
|
||||||
result = nfs_direct_wait(dreq);
|
result = nfs_direct_wait(dreq);
|
||||||
|
out_release:
|
||||||
nfs_direct_req_release(dreq);
|
nfs_direct_req_release(dreq);
|
||||||
|
out:
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -574,6 +583,7 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
|
||||||
data->args.offset = 0;
|
data->args.offset = 0;
|
||||||
data->args.count = 0;
|
data->args.count = 0;
|
||||||
data->args.context = dreq->ctx;
|
data->args.context = dreq->ctx;
|
||||||
|
data->args.lock_context = dreq->l_ctx;
|
||||||
data->res.count = 0;
|
data->res.count = 0;
|
||||||
data->res.fattr = &data->fattr;
|
data->res.fattr = &data->fattr;
|
||||||
data->res.verf = &data->verf;
|
data->res.verf = &data->verf;
|
||||||
|
@ -761,6 +771,7 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
|
||||||
data->cred = msg.rpc_cred;
|
data->cred = msg.rpc_cred;
|
||||||
data->args.fh = NFS_FH(inode);
|
data->args.fh = NFS_FH(inode);
|
||||||
data->args.context = ctx;
|
data->args.context = ctx;
|
||||||
|
data->args.lock_context = dreq->l_ctx;
|
||||||
data->args.offset = pos;
|
data->args.offset = pos;
|
||||||
data->args.pgbase = pgbase;
|
data->args.pgbase = pgbase;
|
||||||
data->args.pages = data->pagevec;
|
data->args.pages = data->pagevec;
|
||||||
|
@ -845,7 +856,7 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
|
||||||
unsigned long nr_segs, loff_t pos,
|
unsigned long nr_segs, loff_t pos,
|
||||||
size_t count)
|
size_t count)
|
||||||
{
|
{
|
||||||
ssize_t result = 0;
|
ssize_t result = -ENOMEM;
|
||||||
struct inode *inode = iocb->ki_filp->f_mapping->host;
|
struct inode *inode = iocb->ki_filp->f_mapping->host;
|
||||||
struct nfs_direct_req *dreq;
|
struct nfs_direct_req *dreq;
|
||||||
size_t wsize = NFS_SERVER(inode)->wsize;
|
size_t wsize = NFS_SERVER(inode)->wsize;
|
||||||
|
@ -853,7 +864,7 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
|
||||||
|
|
||||||
dreq = nfs_direct_req_alloc();
|
dreq = nfs_direct_req_alloc();
|
||||||
if (!dreq)
|
if (!dreq)
|
||||||
return -ENOMEM;
|
goto out;
|
||||||
nfs_alloc_commit_data(dreq);
|
nfs_alloc_commit_data(dreq);
|
||||||
|
|
||||||
if (dreq->commit_data == NULL || count < wsize)
|
if (dreq->commit_data == NULL || count < wsize)
|
||||||
|
@ -861,14 +872,18 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
|
||||||
|
|
||||||
dreq->inode = inode;
|
dreq->inode = inode;
|
||||||
dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
|
dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
|
||||||
|
dreq->l_ctx = nfs_get_lock_context(dreq->ctx);
|
||||||
|
if (dreq->l_ctx != NULL)
|
||||||
|
goto out_release;
|
||||||
if (!is_sync_kiocb(iocb))
|
if (!is_sync_kiocb(iocb))
|
||||||
dreq->iocb = iocb;
|
dreq->iocb = iocb;
|
||||||
|
|
||||||
result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, sync);
|
result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, sync);
|
||||||
if (!result)
|
if (!result)
|
||||||
result = nfs_direct_wait(dreq);
|
result = nfs_direct_wait(dreq);
|
||||||
|
out_release:
|
||||||
nfs_direct_req_release(dreq);
|
nfs_direct_req_release(dreq);
|
||||||
|
out:
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -202,38 +202,12 @@ static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
|
||||||
return loff;
|
return loff;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Helper for nfs_file_flush() and nfs_file_fsync()
|
|
||||||
*
|
|
||||||
* Notice that it clears the NFS_CONTEXT_ERROR_WRITE before synching to
|
|
||||||
* disk, but it retrieves and clears ctx->error after synching, despite
|
|
||||||
* the two being set at the same time in nfs_context_set_write_error().
|
|
||||||
* This is because the former is used to notify the _next_ call to
|
|
||||||
* nfs_file_write() that a write error occured, and hence cause it to
|
|
||||||
* fall back to doing a synchronous write.
|
|
||||||
*/
|
|
||||||
static int nfs_do_fsync(struct nfs_open_context *ctx, struct inode *inode)
|
|
||||||
{
|
|
||||||
int have_error, status;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
have_error = test_and_clear_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
|
|
||||||
status = nfs_wb_all(inode);
|
|
||||||
have_error |= test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
|
|
||||||
if (have_error)
|
|
||||||
ret = xchg(&ctx->error, 0);
|
|
||||||
if (!ret)
|
|
||||||
ret = status;
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Flush all dirty pages, and check for write errors.
|
* Flush all dirty pages, and check for write errors.
|
||||||
*/
|
*/
|
||||||
static int
|
static int
|
||||||
nfs_file_flush(struct file *file, fl_owner_t id)
|
nfs_file_flush(struct file *file, fl_owner_t id)
|
||||||
{
|
{
|
||||||
struct nfs_open_context *ctx = nfs_file_open_context(file);
|
|
||||||
struct dentry *dentry = file->f_path.dentry;
|
struct dentry *dentry = file->f_path.dentry;
|
||||||
struct inode *inode = dentry->d_inode;
|
struct inode *inode = dentry->d_inode;
|
||||||
|
|
||||||
|
@ -246,7 +220,7 @@ nfs_file_flush(struct file *file, fl_owner_t id)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Flush writes to the server and return any errors */
|
/* Flush writes to the server and return any errors */
|
||||||
return nfs_do_fsync(ctx, inode);
|
return vfs_fsync(file, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t
|
static ssize_t
|
||||||
|
@ -321,6 +295,13 @@ nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
|
||||||
* Flush any dirty pages for this process, and check for write errors.
|
* Flush any dirty pages for this process, and check for write errors.
|
||||||
* The return status from this call provides a reliable indication of
|
* The return status from this call provides a reliable indication of
|
||||||
* whether any write errors occurred for this process.
|
* whether any write errors occurred for this process.
|
||||||
|
*
|
||||||
|
* Notice that it clears the NFS_CONTEXT_ERROR_WRITE before synching to
|
||||||
|
* disk, but it retrieves and clears ctx->error after synching, despite
|
||||||
|
* the two being set at the same time in nfs_context_set_write_error().
|
||||||
|
* This is because the former is used to notify the _next_ call to
|
||||||
|
* nfs_file_write() that a write error occured, and hence cause it to
|
||||||
|
* fall back to doing a synchronous write.
|
||||||
*/
|
*/
|
||||||
static int
|
static int
|
||||||
nfs_file_fsync(struct file *file, int datasync)
|
nfs_file_fsync(struct file *file, int datasync)
|
||||||
|
@ -328,13 +309,23 @@ nfs_file_fsync(struct file *file, int datasync)
|
||||||
struct dentry *dentry = file->f_path.dentry;
|
struct dentry *dentry = file->f_path.dentry;
|
||||||
struct nfs_open_context *ctx = nfs_file_open_context(file);
|
struct nfs_open_context *ctx = nfs_file_open_context(file);
|
||||||
struct inode *inode = dentry->d_inode;
|
struct inode *inode = dentry->d_inode;
|
||||||
|
int have_error, status;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
|
||||||
dprintk("NFS: fsync file(%s/%s) datasync %d\n",
|
dprintk("NFS: fsync file(%s/%s) datasync %d\n",
|
||||||
dentry->d_parent->d_name.name, dentry->d_name.name,
|
dentry->d_parent->d_name.name, dentry->d_name.name,
|
||||||
datasync);
|
datasync);
|
||||||
|
|
||||||
nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
|
nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
|
||||||
return nfs_do_fsync(ctx, inode);
|
have_error = test_and_clear_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
|
||||||
|
status = nfs_commit_inode(inode, FLUSH_SYNC);
|
||||||
|
have_error |= test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
|
||||||
|
if (have_error)
|
||||||
|
ret = xchg(&ctx->error, 0);
|
||||||
|
if (!ret)
|
||||||
|
ret = status;
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -648,7 +639,7 @@ static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
|
||||||
|
|
||||||
/* Return error values for O_DSYNC and IS_SYNC() */
|
/* Return error values for O_DSYNC and IS_SYNC() */
|
||||||
if (result >= 0 && nfs_need_sync_write(iocb->ki_filp, inode)) {
|
if (result >= 0 && nfs_need_sync_write(iocb->ki_filp, inode)) {
|
||||||
int err = nfs_do_fsync(nfs_file_open_context(iocb->ki_filp), inode);
|
int err = vfs_fsync(iocb->ki_filp, 0);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
result = err;
|
result = err;
|
||||||
}
|
}
|
||||||
|
@ -684,7 +675,7 @@ static ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe,
|
||||||
written = ret;
|
written = ret;
|
||||||
|
|
||||||
if (ret >= 0 && nfs_need_sync_write(filp, inode)) {
|
if (ret >= 0 && nfs_need_sync_write(filp, inode)) {
|
||||||
int err = nfs_do_fsync(nfs_file_open_context(filp), inode);
|
int err = vfs_fsync(filp, 0);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
ret = err;
|
ret = err;
|
||||||
}
|
}
|
||||||
|
|
|
@ -413,10 +413,8 @@ nfs_setattr(struct dentry *dentry, struct iattr *attr)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Write all dirty data */
|
/* Write all dirty data */
|
||||||
if (S_ISREG(inode->i_mode)) {
|
if (S_ISREG(inode->i_mode))
|
||||||
filemap_write_and_wait(inode->i_mapping);
|
|
||||||
nfs_wb_all(inode);
|
nfs_wb_all(inode);
|
||||||
}
|
|
||||||
|
|
||||||
fattr = nfs_alloc_fattr();
|
fattr = nfs_alloc_fattr();
|
||||||
if (fattr == NULL)
|
if (fattr == NULL)
|
||||||
|
@ -530,6 +528,68 @@ out:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void nfs_init_lock_context(struct nfs_lock_context *l_ctx)
|
||||||
|
{
|
||||||
|
atomic_set(&l_ctx->count, 1);
|
||||||
|
l_ctx->lockowner = current->files;
|
||||||
|
l_ctx->pid = current->tgid;
|
||||||
|
INIT_LIST_HEAD(&l_ctx->list);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct nfs_lock_context *__nfs_find_lock_context(struct nfs_open_context *ctx)
|
||||||
|
{
|
||||||
|
struct nfs_lock_context *pos;
|
||||||
|
|
||||||
|
list_for_each_entry(pos, &ctx->lock_context.list, list) {
|
||||||
|
if (pos->lockowner != current->files)
|
||||||
|
continue;
|
||||||
|
if (pos->pid != current->tgid)
|
||||||
|
continue;
|
||||||
|
atomic_inc(&pos->count);
|
||||||
|
return pos;
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx)
|
||||||
|
{
|
||||||
|
struct nfs_lock_context *res, *new = NULL;
|
||||||
|
struct inode *inode = ctx->path.dentry->d_inode;
|
||||||
|
|
||||||
|
spin_lock(&inode->i_lock);
|
||||||
|
res = __nfs_find_lock_context(ctx);
|
||||||
|
if (res == NULL) {
|
||||||
|
spin_unlock(&inode->i_lock);
|
||||||
|
new = kmalloc(sizeof(*new), GFP_KERNEL);
|
||||||
|
if (new == NULL)
|
||||||
|
return NULL;
|
||||||
|
nfs_init_lock_context(new);
|
||||||
|
spin_lock(&inode->i_lock);
|
||||||
|
res = __nfs_find_lock_context(ctx);
|
||||||
|
if (res == NULL) {
|
||||||
|
list_add_tail(&new->list, &ctx->lock_context.list);
|
||||||
|
new->open_context = ctx;
|
||||||
|
res = new;
|
||||||
|
new = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
spin_unlock(&inode->i_lock);
|
||||||
|
kfree(new);
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
void nfs_put_lock_context(struct nfs_lock_context *l_ctx)
|
||||||
|
{
|
||||||
|
struct nfs_open_context *ctx = l_ctx->open_context;
|
||||||
|
struct inode *inode = ctx->path.dentry->d_inode;
|
||||||
|
|
||||||
|
if (!atomic_dec_and_lock(&l_ctx->count, &inode->i_lock))
|
||||||
|
return;
|
||||||
|
list_del(&l_ctx->list);
|
||||||
|
spin_unlock(&inode->i_lock);
|
||||||
|
kfree(l_ctx);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* nfs_close_context - Common close_context() routine NFSv2/v3
|
* nfs_close_context - Common close_context() routine NFSv2/v3
|
||||||
* @ctx: pointer to context
|
* @ctx: pointer to context
|
||||||
|
@ -566,11 +626,11 @@ static struct nfs_open_context *alloc_nfs_open_context(struct path *path, struct
|
||||||
path_get(&ctx->path);
|
path_get(&ctx->path);
|
||||||
ctx->cred = get_rpccred(cred);
|
ctx->cred = get_rpccred(cred);
|
||||||
ctx->state = NULL;
|
ctx->state = NULL;
|
||||||
ctx->lockowner = current->files;
|
|
||||||
ctx->flags = 0;
|
ctx->flags = 0;
|
||||||
ctx->error = 0;
|
ctx->error = 0;
|
||||||
ctx->dir_cookie = 0;
|
ctx->dir_cookie = 0;
|
||||||
atomic_set(&ctx->count, 1);
|
nfs_init_lock_context(&ctx->lock_context);
|
||||||
|
ctx->lock_context.open_context = ctx;
|
||||||
}
|
}
|
||||||
return ctx;
|
return ctx;
|
||||||
}
|
}
|
||||||
|
@ -578,7 +638,7 @@ static struct nfs_open_context *alloc_nfs_open_context(struct path *path, struct
|
||||||
struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx)
|
struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx)
|
||||||
{
|
{
|
||||||
if (ctx != NULL)
|
if (ctx != NULL)
|
||||||
atomic_inc(&ctx->count);
|
atomic_inc(&ctx->lock_context.count);
|
||||||
return ctx;
|
return ctx;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -586,7 +646,7 @@ static void __put_nfs_open_context(struct nfs_open_context *ctx, int is_sync)
|
||||||
{
|
{
|
||||||
struct inode *inode = ctx->path.dentry->d_inode;
|
struct inode *inode = ctx->path.dentry->d_inode;
|
||||||
|
|
||||||
if (!atomic_dec_and_lock(&ctx->count, &inode->i_lock))
|
if (!atomic_dec_and_lock(&ctx->lock_context.count, &inode->i_lock))
|
||||||
return;
|
return;
|
||||||
list_del(&ctx->list);
|
list_del(&ctx->list);
|
||||||
spin_unlock(&inode->i_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
|
|
|
@ -370,10 +370,9 @@ unsigned int nfs_page_array_len(unsigned int base, size_t len)
|
||||||
* Helper for restarting RPC calls in the possible presence of NFSv4.1
|
* Helper for restarting RPC calls in the possible presence of NFSv4.1
|
||||||
* sessions.
|
* sessions.
|
||||||
*/
|
*/
|
||||||
static inline void nfs_restart_rpc(struct rpc_task *task, const struct nfs_client *clp)
|
static inline int nfs_restart_rpc(struct rpc_task *task, const struct nfs_client *clp)
|
||||||
{
|
{
|
||||||
if (nfs4_has_session(clp))
|
if (nfs4_has_session(clp))
|
||||||
rpc_restart_call_prepare(task);
|
return rpc_restart_call_prepare(task);
|
||||||
else
|
return rpc_restart_call(task);
|
||||||
rpc_restart_call(task);
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -233,7 +233,7 @@ nfs_xdr_removeargs(struct rpc_rqst *req, __be32 *p, const struct nfs_removeargs
|
||||||
static int
|
static int
|
||||||
nfs_xdr_readargs(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args)
|
nfs_xdr_readargs(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args)
|
||||||
{
|
{
|
||||||
struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
|
struct rpc_auth *auth = req->rq_cred->cr_auth;
|
||||||
unsigned int replen;
|
unsigned int replen;
|
||||||
u32 offset = (u32)args->offset;
|
u32 offset = (u32)args->offset;
|
||||||
u32 count = args->count;
|
u32 count = args->count;
|
||||||
|
@ -393,8 +393,7 @@ nfs_xdr_symlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs_symlinkargs *arg
|
||||||
static int
|
static int
|
||||||
nfs_xdr_readdirargs(struct rpc_rqst *req, __be32 *p, struct nfs_readdirargs *args)
|
nfs_xdr_readdirargs(struct rpc_rqst *req, __be32 *p, struct nfs_readdirargs *args)
|
||||||
{
|
{
|
||||||
struct rpc_task *task = req->rq_task;
|
struct rpc_auth *auth = req->rq_cred->cr_auth;
|
||||||
struct rpc_auth *auth = task->tk_msg.rpc_cred->cr_auth;
|
|
||||||
unsigned int replen;
|
unsigned int replen;
|
||||||
u32 count = args->count;
|
u32 count = args->count;
|
||||||
|
|
||||||
|
@ -575,7 +574,7 @@ nfs_xdr_diropres(struct rpc_rqst *req, __be32 *p, struct nfs_diropok *res)
|
||||||
static int
|
static int
|
||||||
nfs_xdr_readlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs_readlinkargs *args)
|
nfs_xdr_readlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs_readlinkargs *args)
|
||||||
{
|
{
|
||||||
struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
|
struct rpc_auth *auth = req->rq_cred->cr_auth;
|
||||||
unsigned int replen;
|
unsigned int replen;
|
||||||
|
|
||||||
p = xdr_encode_fhandle(p, args->fh);
|
p = xdr_encode_fhandle(p, args->fh);
|
||||||
|
|
|
@ -330,7 +330,7 @@ nfs3_xdr_accessargs(struct rpc_rqst *req, __be32 *p, struct nfs3_accessargs *arg
|
||||||
static int
|
static int
|
||||||
nfs3_xdr_readargs(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args)
|
nfs3_xdr_readargs(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args)
|
||||||
{
|
{
|
||||||
struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
|
struct rpc_auth *auth = req->rq_cred->cr_auth;
|
||||||
unsigned int replen;
|
unsigned int replen;
|
||||||
u32 count = args->count;
|
u32 count = args->count;
|
||||||
|
|
||||||
|
@ -471,7 +471,7 @@ nfs3_xdr_linkargs(struct rpc_rqst *req, __be32 *p, struct nfs3_linkargs *args)
|
||||||
static int
|
static int
|
||||||
nfs3_xdr_readdirargs(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirargs *args)
|
nfs3_xdr_readdirargs(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirargs *args)
|
||||||
{
|
{
|
||||||
struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
|
struct rpc_auth *auth = req->rq_cred->cr_auth;
|
||||||
unsigned int replen;
|
unsigned int replen;
|
||||||
u32 count = args->count;
|
u32 count = args->count;
|
||||||
|
|
||||||
|
@ -675,7 +675,7 @@ static int
|
||||||
nfs3_xdr_getaclargs(struct rpc_rqst *req, __be32 *p,
|
nfs3_xdr_getaclargs(struct rpc_rqst *req, __be32 *p,
|
||||||
struct nfs3_getaclargs *args)
|
struct nfs3_getaclargs *args)
|
||||||
{
|
{
|
||||||
struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
|
struct rpc_auth *auth = req->rq_cred->cr_auth;
|
||||||
unsigned int replen;
|
unsigned int replen;
|
||||||
|
|
||||||
p = xdr_encode_fhandle(p, args->fh);
|
p = xdr_encode_fhandle(p, args->fh);
|
||||||
|
@ -802,7 +802,7 @@ nfs3_xdr_accessres(struct rpc_rqst *req, __be32 *p, struct nfs3_accessres *res)
|
||||||
static int
|
static int
|
||||||
nfs3_xdr_readlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs3_readlinkargs *args)
|
nfs3_xdr_readlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs3_readlinkargs *args)
|
||||||
{
|
{
|
||||||
struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
|
struct rpc_auth *auth = req->rq_cred->cr_auth;
|
||||||
unsigned int replen;
|
unsigned int replen;
|
||||||
|
|
||||||
p = xdr_encode_fhandle(p, args->fh);
|
p = xdr_encode_fhandle(p, args->fh);
|
||||||
|
|
|
@ -45,10 +45,29 @@ enum nfs4_client_state {
|
||||||
NFS4CLNT_RECLAIM_NOGRACE,
|
NFS4CLNT_RECLAIM_NOGRACE,
|
||||||
NFS4CLNT_DELEGRETURN,
|
NFS4CLNT_DELEGRETURN,
|
||||||
NFS4CLNT_SESSION_RESET,
|
NFS4CLNT_SESSION_RESET,
|
||||||
NFS4CLNT_SESSION_DRAINING,
|
|
||||||
NFS4CLNT_RECALL_SLOT,
|
NFS4CLNT_RECALL_SLOT,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum nfs4_session_state {
|
||||||
|
NFS4_SESSION_INITING,
|
||||||
|
NFS4_SESSION_DRAINING,
|
||||||
|
};
|
||||||
|
|
||||||
|
struct nfs4_minor_version_ops {
|
||||||
|
u32 minor_version;
|
||||||
|
|
||||||
|
int (*call_sync)(struct nfs_server *server,
|
||||||
|
struct rpc_message *msg,
|
||||||
|
struct nfs4_sequence_args *args,
|
||||||
|
struct nfs4_sequence_res *res,
|
||||||
|
int cache_reply);
|
||||||
|
int (*validate_stateid)(struct nfs_delegation *,
|
||||||
|
const nfs4_stateid *);
|
||||||
|
const struct nfs4_state_recovery_ops *reboot_recovery_ops;
|
||||||
|
const struct nfs4_state_recovery_ops *nograce_recovery_ops;
|
||||||
|
const struct nfs4_state_maintenance_ops *state_renewal_ops;
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* struct rpc_sequence ensures that RPC calls are sent in the exact
|
* struct rpc_sequence ensures that RPC calls are sent in the exact
|
||||||
* order that they appear on the list.
|
* order that they appear on the list.
|
||||||
|
@ -89,7 +108,6 @@ struct nfs_unique_id {
|
||||||
*/
|
*/
|
||||||
struct nfs4_state_owner {
|
struct nfs4_state_owner {
|
||||||
struct nfs_unique_id so_owner_id;
|
struct nfs_unique_id so_owner_id;
|
||||||
struct nfs_client *so_client;
|
|
||||||
struct nfs_server *so_server;
|
struct nfs_server *so_server;
|
||||||
struct rb_node so_client_node;
|
struct rb_node so_client_node;
|
||||||
|
|
||||||
|
@ -99,7 +117,6 @@ struct nfs4_state_owner {
|
||||||
atomic_t so_count;
|
atomic_t so_count;
|
||||||
unsigned long so_flags;
|
unsigned long so_flags;
|
||||||
struct list_head so_states;
|
struct list_head so_states;
|
||||||
struct list_head so_delegations;
|
|
||||||
struct nfs_seqid_counter so_seqid;
|
struct nfs_seqid_counter so_seqid;
|
||||||
struct rpc_sequence so_sequence;
|
struct rpc_sequence so_sequence;
|
||||||
};
|
};
|
||||||
|
@ -125,10 +142,20 @@ enum {
|
||||||
* LOCK: one nfs4_state (LOCK) to hold the lock stateid nfs4_state(OPEN)
|
* LOCK: one nfs4_state (LOCK) to hold the lock stateid nfs4_state(OPEN)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
struct nfs4_lock_owner {
|
||||||
|
unsigned int lo_type;
|
||||||
|
#define NFS4_ANY_LOCK_TYPE (0U)
|
||||||
|
#define NFS4_FLOCK_LOCK_TYPE (1U << 0)
|
||||||
|
#define NFS4_POSIX_LOCK_TYPE (1U << 1)
|
||||||
|
union {
|
||||||
|
fl_owner_t posix_owner;
|
||||||
|
pid_t flock_owner;
|
||||||
|
} lo_u;
|
||||||
|
};
|
||||||
|
|
||||||
struct nfs4_lock_state {
|
struct nfs4_lock_state {
|
||||||
struct list_head ls_locks; /* Other lock stateids */
|
struct list_head ls_locks; /* Other lock stateids */
|
||||||
struct nfs4_state * ls_state; /* Pointer to open state */
|
struct nfs4_state * ls_state; /* Pointer to open state */
|
||||||
fl_owner_t ls_owner; /* POSIX lock owner */
|
|
||||||
#define NFS_LOCK_INITIALIZED 1
|
#define NFS_LOCK_INITIALIZED 1
|
||||||
int ls_flags;
|
int ls_flags;
|
||||||
struct nfs_seqid_counter ls_seqid;
|
struct nfs_seqid_counter ls_seqid;
|
||||||
|
@ -136,6 +163,7 @@ struct nfs4_lock_state {
|
||||||
struct nfs_unique_id ls_id;
|
struct nfs_unique_id ls_id;
|
||||||
nfs4_stateid ls_stateid;
|
nfs4_stateid ls_stateid;
|
||||||
atomic_t ls_count;
|
atomic_t ls_count;
|
||||||
|
struct nfs4_lock_owner ls_owner;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* bits for nfs4_state->flags */
|
/* bits for nfs4_state->flags */
|
||||||
|
@ -219,11 +247,15 @@ extern int nfs4_open_revalidate(struct inode *, struct dentry *, int, struct nam
|
||||||
extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle);
|
extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle);
|
||||||
extern int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
|
extern int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
|
||||||
struct nfs4_fs_locations *fs_locations, struct page *page);
|
struct nfs4_fs_locations *fs_locations, struct page *page);
|
||||||
|
extern void nfs4_release_lockowner(const struct nfs4_lock_state *);
|
||||||
|
|
||||||
extern struct nfs4_state_recovery_ops *nfs4_reboot_recovery_ops[];
|
|
||||||
extern struct nfs4_state_recovery_ops *nfs4_nograce_recovery_ops[];
|
|
||||||
#if defined(CONFIG_NFS_V4_1)
|
#if defined(CONFIG_NFS_V4_1)
|
||||||
extern int nfs4_setup_sequence(struct nfs_client *clp,
|
static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *server)
|
||||||
|
{
|
||||||
|
return server->nfs_client->cl_session;
|
||||||
|
}
|
||||||
|
|
||||||
|
extern int nfs4_setup_sequence(const struct nfs_server *server,
|
||||||
struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
|
struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
|
||||||
int cache_reply, struct rpc_task *task);
|
int cache_reply, struct rpc_task *task);
|
||||||
extern void nfs4_destroy_session(struct nfs4_session *session);
|
extern void nfs4_destroy_session(struct nfs4_session *session);
|
||||||
|
@ -234,7 +266,12 @@ extern int nfs4_init_session(struct nfs_server *server);
|
||||||
extern int nfs4_proc_get_lease_time(struct nfs_client *clp,
|
extern int nfs4_proc_get_lease_time(struct nfs_client *clp,
|
||||||
struct nfs_fsinfo *fsinfo);
|
struct nfs_fsinfo *fsinfo);
|
||||||
#else /* CONFIG_NFS_v4_1 */
|
#else /* CONFIG_NFS_v4_1 */
|
||||||
static inline int nfs4_setup_sequence(struct nfs_client *clp,
|
static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *server)
|
||||||
|
{
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int nfs4_setup_sequence(const struct nfs_server *server,
|
||||||
struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
|
struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
|
||||||
int cache_reply, struct rpc_task *task)
|
int cache_reply, struct rpc_task *task)
|
||||||
{
|
{
|
||||||
|
@ -247,7 +284,7 @@ static inline int nfs4_init_session(struct nfs_server *server)
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_NFS_V4_1 */
|
#endif /* CONFIG_NFS_V4_1 */
|
||||||
|
|
||||||
extern struct nfs4_state_maintenance_ops *nfs4_state_renewal_ops[];
|
extern const struct nfs4_minor_version_ops *nfs_v4_minor_ops[];
|
||||||
|
|
||||||
extern const u32 nfs4_fattr_bitmap[2];
|
extern const u32 nfs4_fattr_bitmap[2];
|
||||||
extern const u32 nfs4_statfs_bitmap[2];
|
extern const u32 nfs4_statfs_bitmap[2];
|
||||||
|
@ -284,7 +321,7 @@ extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
|
||||||
extern void nfs41_handle_recall_slot(struct nfs_client *clp);
|
extern void nfs41_handle_recall_slot(struct nfs_client *clp);
|
||||||
extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
|
extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
|
||||||
extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
|
extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
|
||||||
extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t);
|
extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t, pid_t);
|
||||||
|
|
||||||
extern struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask);
|
extern struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask);
|
||||||
extern int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task);
|
extern int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task);
|
||||||
|
|
|
@ -303,15 +303,19 @@ do_state_recovery:
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
|
static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
|
||||||
{
|
{
|
||||||
struct nfs_client *clp = server->nfs_client;
|
|
||||||
spin_lock(&clp->cl_lock);
|
spin_lock(&clp->cl_lock);
|
||||||
if (time_before(clp->cl_last_renewal,timestamp))
|
if (time_before(clp->cl_last_renewal,timestamp))
|
||||||
clp->cl_last_renewal = timestamp;
|
clp->cl_last_renewal = timestamp;
|
||||||
spin_unlock(&clp->cl_lock);
|
spin_unlock(&clp->cl_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
|
||||||
|
{
|
||||||
|
do_renew_lease(server->nfs_client, timestamp);
|
||||||
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_NFS_V4_1)
|
#if defined(CONFIG_NFS_V4_1)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -356,7 +360,7 @@ static void nfs41_check_drain_session_complete(struct nfs4_session *ses)
|
||||||
{
|
{
|
||||||
struct rpc_task *task;
|
struct rpc_task *task;
|
||||||
|
|
||||||
if (!test_bit(NFS4CLNT_SESSION_DRAINING, &ses->clp->cl_state)) {
|
if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
|
||||||
task = rpc_wake_up_next(&ses->fc_slot_table.slot_tbl_waitq);
|
task = rpc_wake_up_next(&ses->fc_slot_table.slot_tbl_waitq);
|
||||||
if (task)
|
if (task)
|
||||||
rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
|
rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
|
||||||
|
@ -370,12 +374,11 @@ static void nfs41_check_drain_session_complete(struct nfs4_session *ses)
|
||||||
complete(&ses->complete);
|
complete(&ses->complete);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nfs41_sequence_free_slot(const struct nfs_client *clp,
|
static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
|
||||||
struct nfs4_sequence_res *res)
|
|
||||||
{
|
{
|
||||||
struct nfs4_slot_table *tbl;
|
struct nfs4_slot_table *tbl;
|
||||||
|
|
||||||
tbl = &clp->cl_session->fc_slot_table;
|
tbl = &res->sr_session->fc_slot_table;
|
||||||
if (res->sr_slotid == NFS4_MAX_SLOT_TABLE) {
|
if (res->sr_slotid == NFS4_MAX_SLOT_TABLE) {
|
||||||
/* just wake up the next guy waiting since
|
/* just wake up the next guy waiting since
|
||||||
* we may have not consumed a slot after all */
|
* we may have not consumed a slot after all */
|
||||||
|
@ -385,18 +388,17 @@ static void nfs41_sequence_free_slot(const struct nfs_client *clp,
|
||||||
|
|
||||||
spin_lock(&tbl->slot_tbl_lock);
|
spin_lock(&tbl->slot_tbl_lock);
|
||||||
nfs4_free_slot(tbl, res->sr_slotid);
|
nfs4_free_slot(tbl, res->sr_slotid);
|
||||||
nfs41_check_drain_session_complete(clp->cl_session);
|
nfs41_check_drain_session_complete(res->sr_session);
|
||||||
spin_unlock(&tbl->slot_tbl_lock);
|
spin_unlock(&tbl->slot_tbl_lock);
|
||||||
res->sr_slotid = NFS4_MAX_SLOT_TABLE;
|
res->sr_slotid = NFS4_MAX_SLOT_TABLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nfs41_sequence_done(struct nfs_client *clp,
|
static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
|
||||||
struct nfs4_sequence_res *res,
|
|
||||||
int rpc_status)
|
|
||||||
{
|
{
|
||||||
unsigned long timestamp;
|
unsigned long timestamp;
|
||||||
struct nfs4_slot_table *tbl;
|
struct nfs4_slot_table *tbl;
|
||||||
struct nfs4_slot *slot;
|
struct nfs4_slot *slot;
|
||||||
|
struct nfs_client *clp;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* sr_status remains 1 if an RPC level error occurred. The server
|
* sr_status remains 1 if an RPC level error occurred. The server
|
||||||
|
@ -411,25 +413,51 @@ static void nfs41_sequence_done(struct nfs_client *clp,
|
||||||
if (res->sr_slotid == NFS4_MAX_SLOT_TABLE)
|
if (res->sr_slotid == NFS4_MAX_SLOT_TABLE)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
tbl = &res->sr_session->fc_slot_table;
|
||||||
|
slot = tbl->slots + res->sr_slotid;
|
||||||
|
|
||||||
/* Check the SEQUENCE operation status */
|
/* Check the SEQUENCE operation status */
|
||||||
if (res->sr_status == 0) {
|
switch (res->sr_status) {
|
||||||
tbl = &clp->cl_session->fc_slot_table;
|
case 0:
|
||||||
slot = tbl->slots + res->sr_slotid;
|
|
||||||
/* Update the slot's sequence and clientid lease timer */
|
/* Update the slot's sequence and clientid lease timer */
|
||||||
++slot->seq_nr;
|
++slot->seq_nr;
|
||||||
timestamp = res->sr_renewal_time;
|
timestamp = res->sr_renewal_time;
|
||||||
spin_lock(&clp->cl_lock);
|
clp = res->sr_session->clp;
|
||||||
if (time_before(clp->cl_last_renewal, timestamp))
|
do_renew_lease(clp, timestamp);
|
||||||
clp->cl_last_renewal = timestamp;
|
|
||||||
spin_unlock(&clp->cl_lock);
|
|
||||||
/* Check sequence flags */
|
/* Check sequence flags */
|
||||||
if (atomic_read(&clp->cl_count) > 1)
|
if (atomic_read(&clp->cl_count) > 1)
|
||||||
nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
|
nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
|
||||||
|
break;
|
||||||
|
case -NFS4ERR_DELAY:
|
||||||
|
/* The server detected a resend of the RPC call and
|
||||||
|
* returned NFS4ERR_DELAY as per Section 2.10.6.2
|
||||||
|
* of RFC5661.
|
||||||
|
*/
|
||||||
|
dprintk("%s: slot=%d seq=%d: Operation in progress\n",
|
||||||
|
__func__, res->sr_slotid, slot->seq_nr);
|
||||||
|
goto out_retry;
|
||||||
|
default:
|
||||||
|
/* Just update the slot sequence no. */
|
||||||
|
++slot->seq_nr;
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
/* The session may be reset by one of the error handlers. */
|
/* The session may be reset by one of the error handlers. */
|
||||||
dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
|
dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
|
||||||
nfs41_sequence_free_slot(clp, res);
|
nfs41_sequence_free_slot(res);
|
||||||
|
return 1;
|
||||||
|
out_retry:
|
||||||
|
if (!rpc_restart_call(task))
|
||||||
|
goto out;
|
||||||
|
rpc_delay(task, NFS4_POLL_RETRY_MAX);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int nfs4_sequence_done(struct rpc_task *task,
|
||||||
|
struct nfs4_sequence_res *res)
|
||||||
|
{
|
||||||
|
if (res->sr_session == NULL)
|
||||||
|
return 1;
|
||||||
|
return nfs41_sequence_done(task, res);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -480,12 +508,11 @@ static int nfs41_setup_sequence(struct nfs4_session *session,
|
||||||
if (res->sr_slotid != NFS4_MAX_SLOT_TABLE)
|
if (res->sr_slotid != NFS4_MAX_SLOT_TABLE)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
memset(res, 0, sizeof(*res));
|
|
||||||
res->sr_slotid = NFS4_MAX_SLOT_TABLE;
|
res->sr_slotid = NFS4_MAX_SLOT_TABLE;
|
||||||
tbl = &session->fc_slot_table;
|
tbl = &session->fc_slot_table;
|
||||||
|
|
||||||
spin_lock(&tbl->slot_tbl_lock);
|
spin_lock(&tbl->slot_tbl_lock);
|
||||||
if (test_bit(NFS4CLNT_SESSION_DRAINING, &session->clp->cl_state) &&
|
if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) &&
|
||||||
!rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
|
!rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
|
||||||
/*
|
/*
|
||||||
* The state manager will wait until the slot table is empty.
|
* The state manager will wait until the slot table is empty.
|
||||||
|
@ -525,6 +552,7 @@ static int nfs41_setup_sequence(struct nfs4_session *session,
|
||||||
res->sr_session = session;
|
res->sr_session = session;
|
||||||
res->sr_slotid = slotid;
|
res->sr_slotid = slotid;
|
||||||
res->sr_renewal_time = jiffies;
|
res->sr_renewal_time = jiffies;
|
||||||
|
res->sr_status_flags = 0;
|
||||||
/*
|
/*
|
||||||
* sr_status is only set in decode_sequence, and so will remain
|
* sr_status is only set in decode_sequence, and so will remain
|
||||||
* set to 1 if an rpc level failure occurs.
|
* set to 1 if an rpc level failure occurs.
|
||||||
|
@ -533,33 +561,33 @@ static int nfs41_setup_sequence(struct nfs4_session *session,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int nfs4_setup_sequence(struct nfs_client *clp,
|
int nfs4_setup_sequence(const struct nfs_server *server,
|
||||||
struct nfs4_sequence_args *args,
|
struct nfs4_sequence_args *args,
|
||||||
struct nfs4_sequence_res *res,
|
struct nfs4_sequence_res *res,
|
||||||
int cache_reply,
|
int cache_reply,
|
||||||
struct rpc_task *task)
|
struct rpc_task *task)
|
||||||
{
|
{
|
||||||
|
struct nfs4_session *session = nfs4_get_session(server);
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
dprintk("--> %s clp %p session %p sr_slotid %d\n",
|
if (session == NULL) {
|
||||||
__func__, clp, clp->cl_session, res->sr_slotid);
|
args->sa_session = NULL;
|
||||||
|
res->sr_session = NULL;
|
||||||
if (!nfs4_has_session(clp))
|
|
||||||
goto out;
|
goto out;
|
||||||
ret = nfs41_setup_sequence(clp->cl_session, args, res, cache_reply,
|
|
||||||
task);
|
|
||||||
if (ret && ret != -EAGAIN) {
|
|
||||||
/* terminate rpc task */
|
|
||||||
task->tk_status = ret;
|
|
||||||
task->tk_action = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dprintk("--> %s clp %p session %p sr_slotid %d\n",
|
||||||
|
__func__, session->clp, session, res->sr_slotid);
|
||||||
|
|
||||||
|
ret = nfs41_setup_sequence(session, args, res, cache_reply,
|
||||||
|
task);
|
||||||
out:
|
out:
|
||||||
dprintk("<-- %s status=%d\n", __func__, ret);
|
dprintk("<-- %s status=%d\n", __func__, ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct nfs41_call_sync_data {
|
struct nfs41_call_sync_data {
|
||||||
struct nfs_client *clp;
|
const struct nfs_server *seq_server;
|
||||||
struct nfs4_sequence_args *seq_args;
|
struct nfs4_sequence_args *seq_args;
|
||||||
struct nfs4_sequence_res *seq_res;
|
struct nfs4_sequence_res *seq_res;
|
||||||
int cache_reply;
|
int cache_reply;
|
||||||
|
@ -569,9 +597,9 @@ static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
|
||||||
{
|
{
|
||||||
struct nfs41_call_sync_data *data = calldata;
|
struct nfs41_call_sync_data *data = calldata;
|
||||||
|
|
||||||
dprintk("--> %s data->clp->cl_session %p\n", __func__,
|
dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
|
||||||
data->clp->cl_session);
|
|
||||||
if (nfs4_setup_sequence(data->clp, data->seq_args,
|
if (nfs4_setup_sequence(data->seq_server, data->seq_args,
|
||||||
data->seq_res, data->cache_reply, task))
|
data->seq_res, data->cache_reply, task))
|
||||||
return;
|
return;
|
||||||
rpc_call_start(task);
|
rpc_call_start(task);
|
||||||
|
@ -587,7 +615,7 @@ static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
|
||||||
{
|
{
|
||||||
struct nfs41_call_sync_data *data = calldata;
|
struct nfs41_call_sync_data *data = calldata;
|
||||||
|
|
||||||
nfs41_sequence_done(data->clp, data->seq_res, task->tk_status);
|
nfs41_sequence_done(task, data->seq_res);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct rpc_call_ops nfs41_call_sync_ops = {
|
struct rpc_call_ops nfs41_call_sync_ops = {
|
||||||
|
@ -600,8 +628,7 @@ struct rpc_call_ops nfs41_call_priv_sync_ops = {
|
||||||
.rpc_call_done = nfs41_call_sync_done,
|
.rpc_call_done = nfs41_call_sync_done,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int nfs4_call_sync_sequence(struct nfs_client *clp,
|
static int nfs4_call_sync_sequence(struct nfs_server *server,
|
||||||
struct rpc_clnt *clnt,
|
|
||||||
struct rpc_message *msg,
|
struct rpc_message *msg,
|
||||||
struct nfs4_sequence_args *args,
|
struct nfs4_sequence_args *args,
|
||||||
struct nfs4_sequence_res *res,
|
struct nfs4_sequence_res *res,
|
||||||
|
@ -611,13 +638,13 @@ static int nfs4_call_sync_sequence(struct nfs_client *clp,
|
||||||
int ret;
|
int ret;
|
||||||
struct rpc_task *task;
|
struct rpc_task *task;
|
||||||
struct nfs41_call_sync_data data = {
|
struct nfs41_call_sync_data data = {
|
||||||
.clp = clp,
|
.seq_server = server,
|
||||||
.seq_args = args,
|
.seq_args = args,
|
||||||
.seq_res = res,
|
.seq_res = res,
|
||||||
.cache_reply = cache_reply,
|
.cache_reply = cache_reply,
|
||||||
};
|
};
|
||||||
struct rpc_task_setup task_setup = {
|
struct rpc_task_setup task_setup = {
|
||||||
.rpc_client = clnt,
|
.rpc_client = server->client,
|
||||||
.rpc_message = msg,
|
.rpc_message = msg,
|
||||||
.callback_ops = &nfs41_call_sync_ops,
|
.callback_ops = &nfs41_call_sync_ops,
|
||||||
.callback_data = &data
|
.callback_data = &data
|
||||||
|
@ -642,10 +669,15 @@ int _nfs4_call_sync_session(struct nfs_server *server,
|
||||||
struct nfs4_sequence_res *res,
|
struct nfs4_sequence_res *res,
|
||||||
int cache_reply)
|
int cache_reply)
|
||||||
{
|
{
|
||||||
return nfs4_call_sync_sequence(server->nfs_client, server->client,
|
return nfs4_call_sync_sequence(server, msg, args, res, cache_reply, 0);
|
||||||
msg, args, res, cache_reply, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
static int nfs4_sequence_done(struct rpc_task *task,
|
||||||
|
struct nfs4_sequence_res *res)
|
||||||
|
{
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
#endif /* CONFIG_NFS_V4_1 */
|
#endif /* CONFIG_NFS_V4_1 */
|
||||||
|
|
||||||
int _nfs4_call_sync(struct nfs_server *server,
|
int _nfs4_call_sync(struct nfs_server *server,
|
||||||
|
@ -659,18 +691,9 @@ int _nfs4_call_sync(struct nfs_server *server,
|
||||||
}
|
}
|
||||||
|
|
||||||
#define nfs4_call_sync(server, msg, args, res, cache_reply) \
|
#define nfs4_call_sync(server, msg, args, res, cache_reply) \
|
||||||
(server)->nfs_client->cl_call_sync((server), (msg), &(args)->seq_args, \
|
(server)->nfs_client->cl_mvops->call_sync((server), (msg), &(args)->seq_args, \
|
||||||
&(res)->seq_res, (cache_reply))
|
&(res)->seq_res, (cache_reply))
|
||||||
|
|
||||||
static void nfs4_sequence_done(const struct nfs_server *server,
|
|
||||||
struct nfs4_sequence_res *res, int rpc_status)
|
|
||||||
{
|
|
||||||
#ifdef CONFIG_NFS_V4_1
|
|
||||||
if (nfs4_has_session(server->nfs_client))
|
|
||||||
nfs41_sequence_done(server->nfs_client, res, rpc_status);
|
|
||||||
#endif /* CONFIG_NFS_V4_1 */
|
|
||||||
}
|
|
||||||
|
|
||||||
static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
|
static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
|
||||||
{
|
{
|
||||||
struct nfs_inode *nfsi = NFS_I(dir);
|
struct nfs_inode *nfsi = NFS_I(dir);
|
||||||
|
@ -745,19 +768,14 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct path *path,
|
||||||
p->o_arg.server = server;
|
p->o_arg.server = server;
|
||||||
p->o_arg.bitmask = server->attr_bitmask;
|
p->o_arg.bitmask = server->attr_bitmask;
|
||||||
p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
|
p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
|
||||||
if (flags & O_EXCL) {
|
if (flags & O_CREAT) {
|
||||||
if (nfs4_has_persistent_session(server->nfs_client)) {
|
u32 *s;
|
||||||
/* GUARDED */
|
|
||||||
p->o_arg.u.attrs = &p->attrs;
|
|
||||||
memcpy(&p->attrs, attrs, sizeof(p->attrs));
|
|
||||||
} else { /* EXCLUSIVE4_1 */
|
|
||||||
u32 *s = (u32 *) p->o_arg.u.verifier.data;
|
|
||||||
s[0] = jiffies;
|
|
||||||
s[1] = current->pid;
|
|
||||||
}
|
|
||||||
} else if (flags & O_CREAT) {
|
|
||||||
p->o_arg.u.attrs = &p->attrs;
|
p->o_arg.u.attrs = &p->attrs;
|
||||||
memcpy(&p->attrs, attrs, sizeof(p->attrs));
|
memcpy(&p->attrs, attrs, sizeof(p->attrs));
|
||||||
|
s = (u32 *) p->o_arg.u.verifier.data;
|
||||||
|
s[0] = jiffies;
|
||||||
|
s[1] = current->pid;
|
||||||
}
|
}
|
||||||
p->c_arg.fh = &p->o_res.fh;
|
p->c_arg.fh = &p->o_res.fh;
|
||||||
p->c_arg.stateid = &p->o_res.stateid;
|
p->c_arg.stateid = &p->o_res.stateid;
|
||||||
|
@ -1255,8 +1273,6 @@ static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
|
||||||
struct nfs4_opendata *data = calldata;
|
struct nfs4_opendata *data = calldata;
|
||||||
|
|
||||||
data->rpc_status = task->tk_status;
|
data->rpc_status = task->tk_status;
|
||||||
if (RPC_ASSASSINATED(task))
|
|
||||||
return;
|
|
||||||
if (data->rpc_status == 0) {
|
if (data->rpc_status == 0) {
|
||||||
memcpy(data->o_res.stateid.data, data->c_res.stateid.data,
|
memcpy(data->o_res.stateid.data, data->c_res.stateid.data,
|
||||||
sizeof(data->o_res.stateid.data));
|
sizeof(data->o_res.stateid.data));
|
||||||
|
@ -1356,13 +1372,13 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
|
||||||
}
|
}
|
||||||
/* Update sequence id. */
|
/* Update sequence id. */
|
||||||
data->o_arg.id = sp->so_owner_id.id;
|
data->o_arg.id = sp->so_owner_id.id;
|
||||||
data->o_arg.clientid = sp->so_client->cl_clientid;
|
data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid;
|
||||||
if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) {
|
if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) {
|
||||||
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
|
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
|
||||||
nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
|
nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
|
||||||
}
|
}
|
||||||
data->timestamp = jiffies;
|
data->timestamp = jiffies;
|
||||||
if (nfs4_setup_sequence(data->o_arg.server->nfs_client,
|
if (nfs4_setup_sequence(data->o_arg.server,
|
||||||
&data->o_arg.seq_args,
|
&data->o_arg.seq_args,
|
||||||
&data->o_res.seq_res, 1, task))
|
&data->o_res.seq_res, 1, task))
|
||||||
return;
|
return;
|
||||||
|
@ -1385,11 +1401,9 @@ static void nfs4_open_done(struct rpc_task *task, void *calldata)
|
||||||
|
|
||||||
data->rpc_status = task->tk_status;
|
data->rpc_status = task->tk_status;
|
||||||
|
|
||||||
nfs4_sequence_done(data->o_arg.server, &data->o_res.seq_res,
|
if (!nfs4_sequence_done(task, &data->o_res.seq_res))
|
||||||
task->tk_status);
|
|
||||||
|
|
||||||
if (RPC_ASSASSINATED(task))
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (task->tk_status == 0) {
|
if (task->tk_status == 0) {
|
||||||
switch (data->o_res.f_attr->mode & S_IFMT) {
|
switch (data->o_res.f_attr->mode & S_IFMT) {
|
||||||
case S_IFREG:
|
case S_IFREG:
|
||||||
|
@ -1773,7 +1787,7 @@ static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
|
||||||
if (nfs4_copy_delegation_stateid(&arg.stateid, inode)) {
|
if (nfs4_copy_delegation_stateid(&arg.stateid, inode)) {
|
||||||
/* Use that stateid */
|
/* Use that stateid */
|
||||||
} else if (state != NULL) {
|
} else if (state != NULL) {
|
||||||
nfs4_copy_stateid(&arg.stateid, state, current->files);
|
nfs4_copy_stateid(&arg.stateid, state, current->files, current->tgid);
|
||||||
} else
|
} else
|
||||||
memcpy(&arg.stateid, &zero_stateid, sizeof(arg.stateid));
|
memcpy(&arg.stateid, &zero_stateid, sizeof(arg.stateid));
|
||||||
|
|
||||||
|
@ -1838,8 +1852,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
|
||||||
struct nfs4_state *state = calldata->state;
|
struct nfs4_state *state = calldata->state;
|
||||||
struct nfs_server *server = NFS_SERVER(calldata->inode);
|
struct nfs_server *server = NFS_SERVER(calldata->inode);
|
||||||
|
|
||||||
nfs4_sequence_done(server, &calldata->res.seq_res, task->tk_status);
|
if (!nfs4_sequence_done(task, &calldata->res.seq_res))
|
||||||
if (RPC_ASSASSINATED(task))
|
|
||||||
return;
|
return;
|
||||||
/* hmm. we are done with the inode, and in the process of freeing
|
/* hmm. we are done with the inode, and in the process of freeing
|
||||||
* the state_owner. we keep this around to process errors
|
* the state_owner. we keep this around to process errors
|
||||||
|
@ -1903,7 +1916,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
|
||||||
|
|
||||||
nfs_fattr_init(calldata->res.fattr);
|
nfs_fattr_init(calldata->res.fattr);
|
||||||
calldata->timestamp = jiffies;
|
calldata->timestamp = jiffies;
|
||||||
if (nfs4_setup_sequence((NFS_SERVER(calldata->inode))->nfs_client,
|
if (nfs4_setup_sequence(NFS_SERVER(calldata->inode),
|
||||||
&calldata->arg.seq_args, &calldata->res.seq_res,
|
&calldata->arg.seq_args, &calldata->res.seq_res,
|
||||||
1, task))
|
1, task))
|
||||||
return;
|
return;
|
||||||
|
@ -2648,7 +2661,8 @@ static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
|
||||||
{
|
{
|
||||||
struct nfs_removeres *res = task->tk_msg.rpc_resp;
|
struct nfs_removeres *res = task->tk_msg.rpc_resp;
|
||||||
|
|
||||||
nfs4_sequence_done(res->server, &res->seq_res, task->tk_status);
|
if (!nfs4_sequence_done(task, &res->seq_res))
|
||||||
|
return 0;
|
||||||
if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
|
if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
|
||||||
return 0;
|
return 0;
|
||||||
update_changeattr(dir, &res->cinfo);
|
update_changeattr(dir, &res->cinfo);
|
||||||
|
@ -3093,7 +3107,8 @@ static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
|
||||||
|
|
||||||
dprintk("--> %s\n", __func__);
|
dprintk("--> %s\n", __func__);
|
||||||
|
|
||||||
nfs4_sequence_done(server, &data->res.seq_res, task->tk_status);
|
if (!nfs4_sequence_done(task, &data->res.seq_res))
|
||||||
|
return -EAGAIN;
|
||||||
|
|
||||||
if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
|
if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
|
||||||
nfs_restart_rpc(task, server->nfs_client);
|
nfs_restart_rpc(task, server->nfs_client);
|
||||||
|
@ -3116,8 +3131,8 @@ static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
|
||||||
{
|
{
|
||||||
struct inode *inode = data->inode;
|
struct inode *inode = data->inode;
|
||||||
|
|
||||||
nfs4_sequence_done(NFS_SERVER(inode), &data->res.seq_res,
|
if (!nfs4_sequence_done(task, &data->res.seq_res))
|
||||||
task->tk_status);
|
return -EAGAIN;
|
||||||
|
|
||||||
if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
|
if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
|
||||||
nfs_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
|
nfs_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
|
||||||
|
@ -3145,8 +3160,9 @@ static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data)
|
||||||
{
|
{
|
||||||
struct inode *inode = data->inode;
|
struct inode *inode = data->inode;
|
||||||
|
|
||||||
nfs4_sequence_done(NFS_SERVER(inode), &data->res.seq_res,
|
if (!nfs4_sequence_done(task, &data->res.seq_res))
|
||||||
task->tk_status);
|
return -EAGAIN;
|
||||||
|
|
||||||
if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
|
if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
|
||||||
nfs_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
|
nfs_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
|
@ -3196,10 +3212,7 @@ static void nfs4_renew_done(struct rpc_task *task, void *calldata)
|
||||||
nfs4_schedule_state_recovery(clp);
|
nfs4_schedule_state_recovery(clp);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
spin_lock(&clp->cl_lock);
|
do_renew_lease(clp, timestamp);
|
||||||
if (time_before(clp->cl_last_renewal,timestamp))
|
|
||||||
clp->cl_last_renewal = timestamp;
|
|
||||||
spin_unlock(&clp->cl_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct rpc_call_ops nfs4_renew_ops = {
|
static const struct rpc_call_ops nfs4_renew_ops = {
|
||||||
|
@ -3240,10 +3253,7 @@ int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
|
||||||
status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
|
status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
|
||||||
if (status < 0)
|
if (status < 0)
|
||||||
return status;
|
return status;
|
||||||
spin_lock(&clp->cl_lock);
|
do_renew_lease(clp, now);
|
||||||
if (time_before(clp->cl_last_renewal,now))
|
|
||||||
clp->cl_last_renewal = now;
|
|
||||||
spin_unlock(&clp->cl_lock);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3464,9 +3474,11 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
_nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs_client *clp, struct nfs4_state *state)
|
nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state)
|
||||||
{
|
{
|
||||||
if (!clp || task->tk_status >= 0)
|
struct nfs_client *clp = server->nfs_client;
|
||||||
|
|
||||||
|
if (task->tk_status >= 0)
|
||||||
return 0;
|
return 0;
|
||||||
switch(task->tk_status) {
|
switch(task->tk_status) {
|
||||||
case -NFS4ERR_ADMIN_REVOKED:
|
case -NFS4ERR_ADMIN_REVOKED:
|
||||||
|
@ -3498,8 +3510,7 @@ _nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
#endif /* CONFIG_NFS_V4_1 */
|
#endif /* CONFIG_NFS_V4_1 */
|
||||||
case -NFS4ERR_DELAY:
|
case -NFS4ERR_DELAY:
|
||||||
if (server)
|
nfs_inc_server_stats(server, NFSIOS_DELAY);
|
||||||
nfs_inc_server_stats(server, NFSIOS_DELAY);
|
|
||||||
case -NFS4ERR_GRACE:
|
case -NFS4ERR_GRACE:
|
||||||
case -EKEYEXPIRED:
|
case -EKEYEXPIRED:
|
||||||
rpc_delay(task, NFS4_POLL_RETRY_MAX);
|
rpc_delay(task, NFS4_POLL_RETRY_MAX);
|
||||||
|
@ -3520,12 +3531,6 @@ do_state_recovery:
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
|
||||||
nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state)
|
|
||||||
{
|
|
||||||
return _nfs4_async_handle_error(task, server, server->nfs_client, state);
|
|
||||||
}
|
|
||||||
|
|
||||||
int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
|
int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
|
||||||
unsigned short port, struct rpc_cred *cred,
|
unsigned short port, struct rpc_cred *cred,
|
||||||
struct nfs4_setclientid_res *res)
|
struct nfs4_setclientid_res *res)
|
||||||
|
@ -3641,8 +3646,8 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
|
||||||
{
|
{
|
||||||
struct nfs4_delegreturndata *data = calldata;
|
struct nfs4_delegreturndata *data = calldata;
|
||||||
|
|
||||||
nfs4_sequence_done(data->res.server, &data->res.seq_res,
|
if (!nfs4_sequence_done(task, &data->res.seq_res))
|
||||||
task->tk_status);
|
return;
|
||||||
|
|
||||||
switch (task->tk_status) {
|
switch (task->tk_status) {
|
||||||
case -NFS4ERR_STALE_STATEID:
|
case -NFS4ERR_STALE_STATEID:
|
||||||
|
@ -3672,7 +3677,7 @@ static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
|
||||||
|
|
||||||
d_data = (struct nfs4_delegreturndata *)data;
|
d_data = (struct nfs4_delegreturndata *)data;
|
||||||
|
|
||||||
if (nfs4_setup_sequence(d_data->res.server->nfs_client,
|
if (nfs4_setup_sequence(d_data->res.server,
|
||||||
&d_data->args.seq_args,
|
&d_data->args.seq_args,
|
||||||
&d_data->res.seq_res, 1, task))
|
&d_data->res.seq_res, 1, task))
|
||||||
return;
|
return;
|
||||||
|
@ -3892,9 +3897,7 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
|
||||||
{
|
{
|
||||||
struct nfs4_unlockdata *calldata = data;
|
struct nfs4_unlockdata *calldata = data;
|
||||||
|
|
||||||
nfs4_sequence_done(calldata->server, &calldata->res.seq_res,
|
if (!nfs4_sequence_done(task, &calldata->res.seq_res))
|
||||||
task->tk_status);
|
|
||||||
if (RPC_ASSASSINATED(task))
|
|
||||||
return;
|
return;
|
||||||
switch (task->tk_status) {
|
switch (task->tk_status) {
|
||||||
case 0:
|
case 0:
|
||||||
|
@ -3927,7 +3930,7 @@ static void nfs4_locku_prepare(struct rpc_task *task, void *data)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
calldata->timestamp = jiffies;
|
calldata->timestamp = jiffies;
|
||||||
if (nfs4_setup_sequence(calldata->server->nfs_client,
|
if (nfs4_setup_sequence(calldata->server,
|
||||||
&calldata->arg.seq_args,
|
&calldata->arg.seq_args,
|
||||||
&calldata->res.seq_res, 1, task))
|
&calldata->res.seq_res, 1, task))
|
||||||
return;
|
return;
|
||||||
|
@ -4082,7 +4085,8 @@ static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
|
||||||
} else
|
} else
|
||||||
data->arg.new_lock_owner = 0;
|
data->arg.new_lock_owner = 0;
|
||||||
data->timestamp = jiffies;
|
data->timestamp = jiffies;
|
||||||
if (nfs4_setup_sequence(data->server->nfs_client, &data->arg.seq_args,
|
if (nfs4_setup_sequence(data->server,
|
||||||
|
&data->arg.seq_args,
|
||||||
&data->res.seq_res, 1, task))
|
&data->res.seq_res, 1, task))
|
||||||
return;
|
return;
|
||||||
rpc_call_start(task);
|
rpc_call_start(task);
|
||||||
|
@ -4101,12 +4105,10 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
|
||||||
|
|
||||||
dprintk("%s: begin!\n", __func__);
|
dprintk("%s: begin!\n", __func__);
|
||||||
|
|
||||||
nfs4_sequence_done(data->server, &data->res.seq_res,
|
if (!nfs4_sequence_done(task, &data->res.seq_res))
|
||||||
task->tk_status);
|
return;
|
||||||
|
|
||||||
data->rpc_status = task->tk_status;
|
data->rpc_status = task->tk_status;
|
||||||
if (RPC_ASSASSINATED(task))
|
|
||||||
goto out;
|
|
||||||
if (data->arg.new_lock_owner != 0) {
|
if (data->arg.new_lock_owner != 0) {
|
||||||
if (data->rpc_status == 0)
|
if (data->rpc_status == 0)
|
||||||
nfs_confirm_seqid(&data->lsp->ls_seqid, 0);
|
nfs_confirm_seqid(&data->lsp->ls_seqid, 0);
|
||||||
|
@ -4424,6 +4426,34 @@ out:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void nfs4_release_lockowner_release(void *calldata)
|
||||||
|
{
|
||||||
|
kfree(calldata);
|
||||||
|
}
|
||||||
|
|
||||||
|
const struct rpc_call_ops nfs4_release_lockowner_ops = {
|
||||||
|
.rpc_release = nfs4_release_lockowner_release,
|
||||||
|
};
|
||||||
|
|
||||||
|
void nfs4_release_lockowner(const struct nfs4_lock_state *lsp)
|
||||||
|
{
|
||||||
|
struct nfs_server *server = lsp->ls_state->owner->so_server;
|
||||||
|
struct nfs_release_lockowner_args *args;
|
||||||
|
struct rpc_message msg = {
|
||||||
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
|
||||||
|
};
|
||||||
|
|
||||||
|
if (server->nfs_client->cl_mvops->minor_version != 0)
|
||||||
|
return;
|
||||||
|
args = kmalloc(sizeof(*args), GFP_NOFS);
|
||||||
|
if (!args)
|
||||||
|
return;
|
||||||
|
args->lock_owner.clientid = server->nfs_client->cl_clientid;
|
||||||
|
args->lock_owner.id = lsp->ls_id.id;
|
||||||
|
msg.rpc_argp = args;
|
||||||
|
rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, args);
|
||||||
|
}
|
||||||
|
|
||||||
#define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
|
#define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
|
||||||
|
|
||||||
int nfs4_setxattr(struct dentry *dentry, const char *key, const void *buf,
|
int nfs4_setxattr(struct dentry *dentry, const char *key, const void *buf,
|
||||||
|
@ -4611,7 +4641,8 @@ static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
|
||||||
(struct nfs4_get_lease_time_data *)calldata;
|
(struct nfs4_get_lease_time_data *)calldata;
|
||||||
|
|
||||||
dprintk("--> %s\n", __func__);
|
dprintk("--> %s\n", __func__);
|
||||||
nfs41_sequence_done(data->clp, &data->res->lr_seq_res, task->tk_status);
|
if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
|
||||||
|
return;
|
||||||
switch (task->tk_status) {
|
switch (task->tk_status) {
|
||||||
case -NFS4ERR_DELAY:
|
case -NFS4ERR_DELAY:
|
||||||
case -NFS4ERR_GRACE:
|
case -NFS4ERR_GRACE:
|
||||||
|
@ -4805,13 +4836,6 @@ struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
|
||||||
if (!session)
|
if (!session)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
/*
|
|
||||||
* The create session reply races with the server back
|
|
||||||
* channel probe. Mark the client NFS_CS_SESSION_INITING
|
|
||||||
* so that the client back channel can find the
|
|
||||||
* nfs_client struct
|
|
||||||
*/
|
|
||||||
clp->cl_cons_state = NFS_CS_SESSION_INITING;
|
|
||||||
init_completion(&session->complete);
|
init_completion(&session->complete);
|
||||||
|
|
||||||
tbl = &session->fc_slot_table;
|
tbl = &session->fc_slot_table;
|
||||||
|
@ -4824,6 +4848,8 @@ struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
|
||||||
spin_lock_init(&tbl->slot_tbl_lock);
|
spin_lock_init(&tbl->slot_tbl_lock);
|
||||||
rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table");
|
rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table");
|
||||||
|
|
||||||
|
session->session_state = 1<<NFS4_SESSION_INITING;
|
||||||
|
|
||||||
session->clp = clp;
|
session->clp = clp;
|
||||||
return session;
|
return session;
|
||||||
}
|
}
|
||||||
|
@ -5040,6 +5066,10 @@ int nfs4_init_session(struct nfs_server *server)
|
||||||
if (!nfs4_has_session(clp))
|
if (!nfs4_has_session(clp))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
session = clp->cl_session;
|
||||||
|
if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state))
|
||||||
|
return 0;
|
||||||
|
|
||||||
rsize = server->rsize;
|
rsize = server->rsize;
|
||||||
if (rsize == 0)
|
if (rsize == 0)
|
||||||
rsize = NFS_MAX_FILE_IO_SIZE;
|
rsize = NFS_MAX_FILE_IO_SIZE;
|
||||||
|
@ -5047,7 +5077,6 @@ int nfs4_init_session(struct nfs_server *server)
|
||||||
if (wsize == 0)
|
if (wsize == 0)
|
||||||
wsize = NFS_MAX_FILE_IO_SIZE;
|
wsize = NFS_MAX_FILE_IO_SIZE;
|
||||||
|
|
||||||
session = clp->cl_session;
|
|
||||||
session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead;
|
session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead;
|
||||||
session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead;
|
session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead;
|
||||||
|
|
||||||
|
@ -5060,69 +5089,70 @@ int nfs4_init_session(struct nfs_server *server)
|
||||||
/*
|
/*
|
||||||
* Renew the cl_session lease.
|
* Renew the cl_session lease.
|
||||||
*/
|
*/
|
||||||
static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
|
struct nfs4_sequence_data {
|
||||||
{
|
struct nfs_client *clp;
|
||||||
struct nfs4_sequence_args args;
|
struct nfs4_sequence_args args;
|
||||||
struct nfs4_sequence_res res;
|
struct nfs4_sequence_res res;
|
||||||
|
};
|
||||||
struct rpc_message msg = {
|
|
||||||
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
|
|
||||||
.rpc_argp = &args,
|
|
||||||
.rpc_resp = &res,
|
|
||||||
.rpc_cred = cred,
|
|
||||||
};
|
|
||||||
|
|
||||||
args.sa_cache_this = 0;
|
|
||||||
|
|
||||||
return nfs4_call_sync_sequence(clp, clp->cl_rpcclient, &msg, &args,
|
|
||||||
&res, args.sa_cache_this, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void nfs41_sequence_release(void *data)
|
static void nfs41_sequence_release(void *data)
|
||||||
{
|
{
|
||||||
struct nfs_client *clp = (struct nfs_client *)data;
|
struct nfs4_sequence_data *calldata = data;
|
||||||
|
struct nfs_client *clp = calldata->clp;
|
||||||
|
|
||||||
if (atomic_read(&clp->cl_count) > 1)
|
if (atomic_read(&clp->cl_count) > 1)
|
||||||
nfs4_schedule_state_renewal(clp);
|
nfs4_schedule_state_renewal(clp);
|
||||||
nfs_put_client(clp);
|
nfs_put_client(clp);
|
||||||
|
kfree(calldata);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
|
||||||
|
{
|
||||||
|
switch(task->tk_status) {
|
||||||
|
case -NFS4ERR_DELAY:
|
||||||
|
case -EKEYEXPIRED:
|
||||||
|
rpc_delay(task, NFS4_POLL_RETRY_MAX);
|
||||||
|
return -EAGAIN;
|
||||||
|
default:
|
||||||
|
nfs4_schedule_state_recovery(clp);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
|
static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
|
||||||
{
|
{
|
||||||
struct nfs_client *clp = (struct nfs_client *)data;
|
struct nfs4_sequence_data *calldata = data;
|
||||||
|
struct nfs_client *clp = calldata->clp;
|
||||||
|
|
||||||
nfs41_sequence_done(clp, task->tk_msg.rpc_resp, task->tk_status);
|
if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
|
||||||
|
return;
|
||||||
|
|
||||||
if (task->tk_status < 0) {
|
if (task->tk_status < 0) {
|
||||||
dprintk("%s ERROR %d\n", __func__, task->tk_status);
|
dprintk("%s ERROR %d\n", __func__, task->tk_status);
|
||||||
if (atomic_read(&clp->cl_count) == 1)
|
if (atomic_read(&clp->cl_count) == 1)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (_nfs4_async_handle_error(task, NULL, clp, NULL)
|
if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
|
||||||
== -EAGAIN) {
|
rpc_restart_call_prepare(task);
|
||||||
nfs_restart_rpc(task, clp);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
|
dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
|
||||||
out:
|
out:
|
||||||
kfree(task->tk_msg.rpc_argp);
|
|
||||||
kfree(task->tk_msg.rpc_resp);
|
|
||||||
|
|
||||||
dprintk("<-- %s\n", __func__);
|
dprintk("<-- %s\n", __func__);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
|
static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
|
||||||
{
|
{
|
||||||
struct nfs_client *clp;
|
struct nfs4_sequence_data *calldata = data;
|
||||||
|
struct nfs_client *clp = calldata->clp;
|
||||||
struct nfs4_sequence_args *args;
|
struct nfs4_sequence_args *args;
|
||||||
struct nfs4_sequence_res *res;
|
struct nfs4_sequence_res *res;
|
||||||
|
|
||||||
clp = (struct nfs_client *)data;
|
|
||||||
args = task->tk_msg.rpc_argp;
|
args = task->tk_msg.rpc_argp;
|
||||||
res = task->tk_msg.rpc_resp;
|
res = task->tk_msg.rpc_resp;
|
||||||
|
|
||||||
if (nfs4_setup_sequence(clp, args, res, 0, task))
|
if (nfs41_setup_sequence(clp->cl_session, args, res, 0, task))
|
||||||
return;
|
return;
|
||||||
rpc_call_start(task);
|
rpc_call_start(task);
|
||||||
}
|
}
|
||||||
|
@ -5133,32 +5163,67 @@ static const struct rpc_call_ops nfs41_sequence_ops = {
|
||||||
.rpc_release = nfs41_sequence_release,
|
.rpc_release = nfs41_sequence_release,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int nfs41_proc_async_sequence(struct nfs_client *clp,
|
static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
|
||||||
struct rpc_cred *cred)
|
|
||||||
{
|
{
|
||||||
struct nfs4_sequence_args *args;
|
struct nfs4_sequence_data *calldata;
|
||||||
struct nfs4_sequence_res *res;
|
|
||||||
struct rpc_message msg = {
|
struct rpc_message msg = {
|
||||||
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
|
||||||
.rpc_cred = cred,
|
.rpc_cred = cred,
|
||||||
};
|
};
|
||||||
|
struct rpc_task_setup task_setup_data = {
|
||||||
|
.rpc_client = clp->cl_rpcclient,
|
||||||
|
.rpc_message = &msg,
|
||||||
|
.callback_ops = &nfs41_sequence_ops,
|
||||||
|
.flags = RPC_TASK_ASYNC | RPC_TASK_SOFT,
|
||||||
|
};
|
||||||
|
|
||||||
if (!atomic_inc_not_zero(&clp->cl_count))
|
if (!atomic_inc_not_zero(&clp->cl_count))
|
||||||
return -EIO;
|
return ERR_PTR(-EIO);
|
||||||
args = kzalloc(sizeof(*args), GFP_NOFS);
|
calldata = kmalloc(sizeof(*calldata), GFP_NOFS);
|
||||||
res = kzalloc(sizeof(*res), GFP_NOFS);
|
if (calldata == NULL) {
|
||||||
if (!args || !res) {
|
|
||||||
kfree(args);
|
|
||||||
kfree(res);
|
|
||||||
nfs_put_client(clp);
|
nfs_put_client(clp);
|
||||||
return -ENOMEM;
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
res->sr_slotid = NFS4_MAX_SLOT_TABLE;
|
calldata->res.sr_slotid = NFS4_MAX_SLOT_TABLE;
|
||||||
msg.rpc_argp = args;
|
msg.rpc_argp = &calldata->args;
|
||||||
msg.rpc_resp = res;
|
msg.rpc_resp = &calldata->res;
|
||||||
|
calldata->clp = clp;
|
||||||
|
task_setup_data.callback_data = calldata;
|
||||||
|
|
||||||
return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT,
|
return rpc_run_task(&task_setup_data);
|
||||||
&nfs41_sequence_ops, (void *)clp);
|
}
|
||||||
|
|
||||||
|
static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred)
|
||||||
|
{
|
||||||
|
struct rpc_task *task;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
task = _nfs41_proc_sequence(clp, cred);
|
||||||
|
if (IS_ERR(task))
|
||||||
|
ret = PTR_ERR(task);
|
||||||
|
else
|
||||||
|
rpc_put_task(task);
|
||||||
|
dprintk("<-- %s status=%d\n", __func__, ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
|
||||||
|
{
|
||||||
|
struct rpc_task *task;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
task = _nfs41_proc_sequence(clp, cred);
|
||||||
|
if (IS_ERR(task)) {
|
||||||
|
ret = PTR_ERR(task);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
ret = rpc_wait_for_completion_task(task);
|
||||||
|
if (!ret)
|
||||||
|
ret = task->tk_status;
|
||||||
|
rpc_put_task(task);
|
||||||
|
out:
|
||||||
|
dprintk("<-- %s status=%d\n", __func__, ret);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct nfs4_reclaim_complete_data {
|
struct nfs4_reclaim_complete_data {
|
||||||
|
@ -5172,13 +5237,31 @@ static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
|
||||||
struct nfs4_reclaim_complete_data *calldata = data;
|
struct nfs4_reclaim_complete_data *calldata = data;
|
||||||
|
|
||||||
rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
|
rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
|
||||||
if (nfs4_setup_sequence(calldata->clp, &calldata->arg.seq_args,
|
if (nfs41_setup_sequence(calldata->clp->cl_session,
|
||||||
|
&calldata->arg.seq_args,
|
||||||
&calldata->res.seq_res, 0, task))
|
&calldata->res.seq_res, 0, task))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
rpc_call_start(task);
|
rpc_call_start(task);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
|
||||||
|
{
|
||||||
|
switch(task->tk_status) {
|
||||||
|
case 0:
|
||||||
|
case -NFS4ERR_COMPLETE_ALREADY:
|
||||||
|
case -NFS4ERR_WRONG_CRED: /* What to do here? */
|
||||||
|
break;
|
||||||
|
case -NFS4ERR_DELAY:
|
||||||
|
case -EKEYEXPIRED:
|
||||||
|
rpc_delay(task, NFS4_POLL_RETRY_MAX);
|
||||||
|
return -EAGAIN;
|
||||||
|
default:
|
||||||
|
nfs4_schedule_state_recovery(clp);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
|
static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
|
||||||
{
|
{
|
||||||
struct nfs4_reclaim_complete_data *calldata = data;
|
struct nfs4_reclaim_complete_data *calldata = data;
|
||||||
|
@ -5186,32 +5269,13 @@ static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
|
||||||
struct nfs4_sequence_res *res = &calldata->res.seq_res;
|
struct nfs4_sequence_res *res = &calldata->res.seq_res;
|
||||||
|
|
||||||
dprintk("--> %s\n", __func__);
|
dprintk("--> %s\n", __func__);
|
||||||
nfs41_sequence_done(clp, res, task->tk_status);
|
if (!nfs41_sequence_done(task, res))
|
||||||
switch (task->tk_status) {
|
return;
|
||||||
case 0:
|
|
||||||
case -NFS4ERR_COMPLETE_ALREADY:
|
|
||||||
break;
|
|
||||||
case -NFS4ERR_BADSESSION:
|
|
||||||
case -NFS4ERR_DEADSESSION:
|
|
||||||
/*
|
|
||||||
* Handle the session error, but do not retry the operation, as
|
|
||||||
* we have no way of telling whether the clientid had to be
|
|
||||||
* reset before we got our reply. If reset, a new wave of
|
|
||||||
* reclaim operations will follow, containing their own reclaim
|
|
||||||
* complete. We don't want our retry to get on the way of
|
|
||||||
* recovery by incorrectly indicating to the server that we're
|
|
||||||
* done reclaiming state since the process had to be restarted.
|
|
||||||
*/
|
|
||||||
_nfs4_async_handle_error(task, NULL, clp, NULL);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
if (_nfs4_async_handle_error(
|
|
||||||
task, NULL, clp, NULL) == -EAGAIN) {
|
|
||||||
rpc_restart_call_prepare(task);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
|
||||||
|
rpc_restart_call_prepare(task);
|
||||||
|
return;
|
||||||
|
}
|
||||||
dprintk("<-- %s\n", __func__);
|
dprintk("<-- %s\n", __func__);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5325,28 +5389,30 @@ struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
|
||||||
* Per minor version reboot and network partition recovery ops
|
.minor_version = 0,
|
||||||
*/
|
.call_sync = _nfs4_call_sync,
|
||||||
|
.validate_stateid = nfs4_validate_delegation_stateid,
|
||||||
struct nfs4_state_recovery_ops *nfs4_reboot_recovery_ops[] = {
|
.reboot_recovery_ops = &nfs40_reboot_recovery_ops,
|
||||||
&nfs40_reboot_recovery_ops,
|
.nograce_recovery_ops = &nfs40_nograce_recovery_ops,
|
||||||
#if defined(CONFIG_NFS_V4_1)
|
.state_renewal_ops = &nfs40_state_renewal_ops,
|
||||||
&nfs41_reboot_recovery_ops,
|
|
||||||
#endif
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct nfs4_state_recovery_ops *nfs4_nograce_recovery_ops[] = {
|
|
||||||
&nfs40_nograce_recovery_ops,
|
|
||||||
#if defined(CONFIG_NFS_V4_1)
|
#if defined(CONFIG_NFS_V4_1)
|
||||||
&nfs41_nograce_recovery_ops,
|
static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
|
||||||
#endif
|
.minor_version = 1,
|
||||||
|
.call_sync = _nfs4_call_sync_session,
|
||||||
|
.validate_stateid = nfs41_validate_delegation_stateid,
|
||||||
|
.reboot_recovery_ops = &nfs41_reboot_recovery_ops,
|
||||||
|
.nograce_recovery_ops = &nfs41_nograce_recovery_ops,
|
||||||
|
.state_renewal_ops = &nfs41_state_renewal_ops,
|
||||||
};
|
};
|
||||||
|
#endif
|
||||||
|
|
||||||
struct nfs4_state_maintenance_ops *nfs4_state_renewal_ops[] = {
|
const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
|
||||||
&nfs40_state_renewal_ops,
|
[0] = &nfs_v4_0_minor_ops,
|
||||||
#if defined(CONFIG_NFS_V4_1)
|
#if defined(CONFIG_NFS_V4_1)
|
||||||
&nfs41_state_renewal_ops,
|
[1] = &nfs_v4_1_minor_ops,
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -54,14 +54,14 @@
|
||||||
void
|
void
|
||||||
nfs4_renew_state(struct work_struct *work)
|
nfs4_renew_state(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct nfs4_state_maintenance_ops *ops;
|
const struct nfs4_state_maintenance_ops *ops;
|
||||||
struct nfs_client *clp =
|
struct nfs_client *clp =
|
||||||
container_of(work, struct nfs_client, cl_renewd.work);
|
container_of(work, struct nfs_client, cl_renewd.work);
|
||||||
struct rpc_cred *cred;
|
struct rpc_cred *cred;
|
||||||
long lease;
|
long lease;
|
||||||
unsigned long last, now;
|
unsigned long last, now;
|
||||||
|
|
||||||
ops = nfs4_state_renewal_ops[clp->cl_minorversion];
|
ops = clp->cl_mvops->state_renewal_ops;
|
||||||
dprintk("%s: start\n", __func__);
|
dprintk("%s: start\n", __func__);
|
||||||
/* Are there any active superblocks? */
|
/* Are there any active superblocks? */
|
||||||
if (list_empty(&clp->cl_superblocks))
|
if (list_empty(&clp->cl_superblocks))
|
||||||
|
|
|
@ -145,7 +145,9 @@ static void nfs4_end_drain_session(struct nfs_client *clp)
|
||||||
struct nfs4_session *ses = clp->cl_session;
|
struct nfs4_session *ses = clp->cl_session;
|
||||||
int max_slots;
|
int max_slots;
|
||||||
|
|
||||||
if (test_and_clear_bit(NFS4CLNT_SESSION_DRAINING, &clp->cl_state)) {
|
if (ses == NULL)
|
||||||
|
return;
|
||||||
|
if (test_and_clear_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
|
||||||
spin_lock(&ses->fc_slot_table.slot_tbl_lock);
|
spin_lock(&ses->fc_slot_table.slot_tbl_lock);
|
||||||
max_slots = ses->fc_slot_table.max_slots;
|
max_slots = ses->fc_slot_table.max_slots;
|
||||||
while (max_slots--) {
|
while (max_slots--) {
|
||||||
|
@ -167,7 +169,7 @@ static int nfs4_begin_drain_session(struct nfs_client *clp)
|
||||||
struct nfs4_slot_table *tbl = &ses->fc_slot_table;
|
struct nfs4_slot_table *tbl = &ses->fc_slot_table;
|
||||||
|
|
||||||
spin_lock(&tbl->slot_tbl_lock);
|
spin_lock(&tbl->slot_tbl_lock);
|
||||||
set_bit(NFS4CLNT_SESSION_DRAINING, &clp->cl_state);
|
set_bit(NFS4_SESSION_DRAINING, &ses->session_state);
|
||||||
if (tbl->highest_used_slotid != -1) {
|
if (tbl->highest_used_slotid != -1) {
|
||||||
INIT_COMPLETION(ses->complete);
|
INIT_COMPLETION(ses->complete);
|
||||||
spin_unlock(&tbl->slot_tbl_lock);
|
spin_unlock(&tbl->slot_tbl_lock);
|
||||||
|
@ -371,7 +373,6 @@ nfs4_alloc_state_owner(void)
|
||||||
return NULL;
|
return NULL;
|
||||||
spin_lock_init(&sp->so_lock);
|
spin_lock_init(&sp->so_lock);
|
||||||
INIT_LIST_HEAD(&sp->so_states);
|
INIT_LIST_HEAD(&sp->so_states);
|
||||||
INIT_LIST_HEAD(&sp->so_delegations);
|
|
||||||
rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue");
|
rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue");
|
||||||
sp->so_seqid.sequence = &sp->so_sequence;
|
sp->so_seqid.sequence = &sp->so_sequence;
|
||||||
spin_lock_init(&sp->so_sequence.lock);
|
spin_lock_init(&sp->so_sequence.lock);
|
||||||
|
@ -384,7 +385,7 @@ static void
|
||||||
nfs4_drop_state_owner(struct nfs4_state_owner *sp)
|
nfs4_drop_state_owner(struct nfs4_state_owner *sp)
|
||||||
{
|
{
|
||||||
if (!RB_EMPTY_NODE(&sp->so_client_node)) {
|
if (!RB_EMPTY_NODE(&sp->so_client_node)) {
|
||||||
struct nfs_client *clp = sp->so_client;
|
struct nfs_client *clp = sp->so_server->nfs_client;
|
||||||
|
|
||||||
spin_lock(&clp->cl_lock);
|
spin_lock(&clp->cl_lock);
|
||||||
rb_erase(&sp->so_client_node, &clp->cl_state_owners);
|
rb_erase(&sp->so_client_node, &clp->cl_state_owners);
|
||||||
|
@ -406,7 +407,6 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct
|
||||||
new = nfs4_alloc_state_owner();
|
new = nfs4_alloc_state_owner();
|
||||||
if (new == NULL)
|
if (new == NULL)
|
||||||
return NULL;
|
return NULL;
|
||||||
new->so_client = clp;
|
|
||||||
new->so_server = server;
|
new->so_server = server;
|
||||||
new->so_cred = cred;
|
new->so_cred = cred;
|
||||||
spin_lock(&clp->cl_lock);
|
spin_lock(&clp->cl_lock);
|
||||||
|
@ -423,7 +423,7 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct
|
||||||
|
|
||||||
void nfs4_put_state_owner(struct nfs4_state_owner *sp)
|
void nfs4_put_state_owner(struct nfs4_state_owner *sp)
|
||||||
{
|
{
|
||||||
struct nfs_client *clp = sp->so_client;
|
struct nfs_client *clp = sp->so_server->nfs_client;
|
||||||
struct rpc_cred *cred = sp->so_cred;
|
struct rpc_cred *cred = sp->so_cred;
|
||||||
|
|
||||||
if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
|
if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
|
||||||
|
@ -602,12 +602,21 @@ void nfs4_close_sync(struct path *path, struct nfs4_state *state, fmode_t fmode)
|
||||||
* that is compatible with current->files
|
* that is compatible with current->files
|
||||||
*/
|
*/
|
||||||
static struct nfs4_lock_state *
|
static struct nfs4_lock_state *
|
||||||
__nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
|
__nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type)
|
||||||
{
|
{
|
||||||
struct nfs4_lock_state *pos;
|
struct nfs4_lock_state *pos;
|
||||||
list_for_each_entry(pos, &state->lock_states, ls_locks) {
|
list_for_each_entry(pos, &state->lock_states, ls_locks) {
|
||||||
if (pos->ls_owner != fl_owner)
|
if (type != NFS4_ANY_LOCK_TYPE && pos->ls_owner.lo_type != type)
|
||||||
continue;
|
continue;
|
||||||
|
switch (pos->ls_owner.lo_type) {
|
||||||
|
case NFS4_POSIX_LOCK_TYPE:
|
||||||
|
if (pos->ls_owner.lo_u.posix_owner != fl_owner)
|
||||||
|
continue;
|
||||||
|
break;
|
||||||
|
case NFS4_FLOCK_LOCK_TYPE:
|
||||||
|
if (pos->ls_owner.lo_u.flock_owner != fl_pid)
|
||||||
|
continue;
|
||||||
|
}
|
||||||
atomic_inc(&pos->ls_count);
|
atomic_inc(&pos->ls_count);
|
||||||
return pos;
|
return pos;
|
||||||
}
|
}
|
||||||
|
@ -619,10 +628,10 @@ __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
|
||||||
* exists, return an uninitialized one.
|
* exists, return an uninitialized one.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
|
static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type)
|
||||||
{
|
{
|
||||||
struct nfs4_lock_state *lsp;
|
struct nfs4_lock_state *lsp;
|
||||||
struct nfs_client *clp = state->owner->so_client;
|
struct nfs_client *clp = state->owner->so_server->nfs_client;
|
||||||
|
|
||||||
lsp = kzalloc(sizeof(*lsp), GFP_NOFS);
|
lsp = kzalloc(sizeof(*lsp), GFP_NOFS);
|
||||||
if (lsp == NULL)
|
if (lsp == NULL)
|
||||||
|
@ -633,7 +642,18 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
|
||||||
lsp->ls_seqid.sequence = &lsp->ls_sequence;
|
lsp->ls_seqid.sequence = &lsp->ls_sequence;
|
||||||
atomic_set(&lsp->ls_count, 1);
|
atomic_set(&lsp->ls_count, 1);
|
||||||
lsp->ls_state = state;
|
lsp->ls_state = state;
|
||||||
lsp->ls_owner = fl_owner;
|
lsp->ls_owner.lo_type = type;
|
||||||
|
switch (lsp->ls_owner.lo_type) {
|
||||||
|
case NFS4_FLOCK_LOCK_TYPE:
|
||||||
|
lsp->ls_owner.lo_u.flock_owner = fl_pid;
|
||||||
|
break;
|
||||||
|
case NFS4_POSIX_LOCK_TYPE:
|
||||||
|
lsp->ls_owner.lo_u.posix_owner = fl_owner;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
kfree(lsp);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
spin_lock(&clp->cl_lock);
|
spin_lock(&clp->cl_lock);
|
||||||
nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64);
|
nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64);
|
||||||
spin_unlock(&clp->cl_lock);
|
spin_unlock(&clp->cl_lock);
|
||||||
|
@ -643,7 +663,7 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
|
||||||
|
|
||||||
static void nfs4_free_lock_state(struct nfs4_lock_state *lsp)
|
static void nfs4_free_lock_state(struct nfs4_lock_state *lsp)
|
||||||
{
|
{
|
||||||
struct nfs_client *clp = lsp->ls_state->owner->so_client;
|
struct nfs_client *clp = lsp->ls_state->owner->so_server->nfs_client;
|
||||||
|
|
||||||
spin_lock(&clp->cl_lock);
|
spin_lock(&clp->cl_lock);
|
||||||
nfs_free_unique_id(&clp->cl_lockowner_id, &lsp->ls_id);
|
nfs_free_unique_id(&clp->cl_lockowner_id, &lsp->ls_id);
|
||||||
|
@ -657,13 +677,13 @@ static void nfs4_free_lock_state(struct nfs4_lock_state *lsp)
|
||||||
* exists, return an uninitialized one.
|
* exists, return an uninitialized one.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
|
static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner, pid_t pid, unsigned int type)
|
||||||
{
|
{
|
||||||
struct nfs4_lock_state *lsp, *new = NULL;
|
struct nfs4_lock_state *lsp, *new = NULL;
|
||||||
|
|
||||||
for(;;) {
|
for(;;) {
|
||||||
spin_lock(&state->state_lock);
|
spin_lock(&state->state_lock);
|
||||||
lsp = __nfs4_find_lock_state(state, owner);
|
lsp = __nfs4_find_lock_state(state, owner, pid, type);
|
||||||
if (lsp != NULL)
|
if (lsp != NULL)
|
||||||
break;
|
break;
|
||||||
if (new != NULL) {
|
if (new != NULL) {
|
||||||
|
@ -674,7 +694,7 @@ static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
spin_unlock(&state->state_lock);
|
spin_unlock(&state->state_lock);
|
||||||
new = nfs4_alloc_lock_state(state, owner);
|
new = nfs4_alloc_lock_state(state, owner, pid, type);
|
||||||
if (new == NULL)
|
if (new == NULL)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -701,6 +721,8 @@ void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
|
||||||
if (list_empty(&state->lock_states))
|
if (list_empty(&state->lock_states))
|
||||||
clear_bit(LK_STATE_IN_USE, &state->flags);
|
clear_bit(LK_STATE_IN_USE, &state->flags);
|
||||||
spin_unlock(&state->state_lock);
|
spin_unlock(&state->state_lock);
|
||||||
|
if (lsp->ls_flags & NFS_LOCK_INITIALIZED)
|
||||||
|
nfs4_release_lockowner(lsp);
|
||||||
nfs4_free_lock_state(lsp);
|
nfs4_free_lock_state(lsp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -728,7 +750,12 @@ int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
|
||||||
|
|
||||||
if (fl->fl_ops != NULL)
|
if (fl->fl_ops != NULL)
|
||||||
return 0;
|
return 0;
|
||||||
lsp = nfs4_get_lock_state(state, fl->fl_owner);
|
if (fl->fl_flags & FL_POSIX)
|
||||||
|
lsp = nfs4_get_lock_state(state, fl->fl_owner, 0, NFS4_POSIX_LOCK_TYPE);
|
||||||
|
else if (fl->fl_flags & FL_FLOCK)
|
||||||
|
lsp = nfs4_get_lock_state(state, 0, fl->fl_pid, NFS4_FLOCK_LOCK_TYPE);
|
||||||
|
else
|
||||||
|
return -EINVAL;
|
||||||
if (lsp == NULL)
|
if (lsp == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
fl->fl_u.nfs4_fl.owner = lsp;
|
fl->fl_u.nfs4_fl.owner = lsp;
|
||||||
|
@ -740,7 +767,7 @@ int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
|
||||||
* Byte-range lock aware utility to initialize the stateid of read/write
|
* Byte-range lock aware utility to initialize the stateid of read/write
|
||||||
* requests.
|
* requests.
|
||||||
*/
|
*/
|
||||||
void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner)
|
void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid)
|
||||||
{
|
{
|
||||||
struct nfs4_lock_state *lsp;
|
struct nfs4_lock_state *lsp;
|
||||||
int seq;
|
int seq;
|
||||||
|
@ -753,7 +780,7 @@ void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t f
|
||||||
return;
|
return;
|
||||||
|
|
||||||
spin_lock(&state->state_lock);
|
spin_lock(&state->state_lock);
|
||||||
lsp = __nfs4_find_lock_state(state, fl_owner);
|
lsp = __nfs4_find_lock_state(state, fl_owner, fl_pid, NFS4_ANY_LOCK_TYPE);
|
||||||
if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
|
if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
|
||||||
memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
|
memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
|
||||||
spin_unlock(&state->state_lock);
|
spin_unlock(&state->state_lock);
|
||||||
|
@ -1041,11 +1068,11 @@ restart:
|
||||||
case -NFS4ERR_BAD_STATEID:
|
case -NFS4ERR_BAD_STATEID:
|
||||||
case -NFS4ERR_RECLAIM_BAD:
|
case -NFS4ERR_RECLAIM_BAD:
|
||||||
case -NFS4ERR_RECLAIM_CONFLICT:
|
case -NFS4ERR_RECLAIM_CONFLICT:
|
||||||
nfs4_state_mark_reclaim_nograce(sp->so_client, state);
|
nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
|
||||||
break;
|
break;
|
||||||
case -NFS4ERR_EXPIRED:
|
case -NFS4ERR_EXPIRED:
|
||||||
case -NFS4ERR_NO_GRACE:
|
case -NFS4ERR_NO_GRACE:
|
||||||
nfs4_state_mark_reclaim_nograce(sp->so_client, state);
|
nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
|
||||||
case -NFS4ERR_STALE_CLIENTID:
|
case -NFS4ERR_STALE_CLIENTID:
|
||||||
case -NFS4ERR_BADSESSION:
|
case -NFS4ERR_BADSESSION:
|
||||||
case -NFS4ERR_BADSLOT:
|
case -NFS4ERR_BADSLOT:
|
||||||
|
@ -1120,8 +1147,7 @@ static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
|
||||||
if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
|
if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
nfs4_reclaim_complete(clp,
|
nfs4_reclaim_complete(clp, clp->cl_mvops->reboot_recovery_ops);
|
||||||
nfs4_reboot_recovery_ops[clp->cl_minorversion]);
|
|
||||||
|
|
||||||
for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
|
for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
|
||||||
sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
|
sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
|
||||||
|
@ -1211,8 +1237,8 @@ restart:
|
||||||
static int nfs4_check_lease(struct nfs_client *clp)
|
static int nfs4_check_lease(struct nfs_client *clp)
|
||||||
{
|
{
|
||||||
struct rpc_cred *cred;
|
struct rpc_cred *cred;
|
||||||
struct nfs4_state_maintenance_ops *ops =
|
const struct nfs4_state_maintenance_ops *ops =
|
||||||
nfs4_state_renewal_ops[clp->cl_minorversion];
|
clp->cl_mvops->state_renewal_ops;
|
||||||
int status = -NFS4ERR_EXPIRED;
|
int status = -NFS4ERR_EXPIRED;
|
||||||
|
|
||||||
/* Is the client already known to have an expired lease? */
|
/* Is the client already known to have an expired lease? */
|
||||||
|
@ -1235,8 +1261,8 @@ out:
|
||||||
static int nfs4_reclaim_lease(struct nfs_client *clp)
|
static int nfs4_reclaim_lease(struct nfs_client *clp)
|
||||||
{
|
{
|
||||||
struct rpc_cred *cred;
|
struct rpc_cred *cred;
|
||||||
struct nfs4_state_recovery_ops *ops =
|
const struct nfs4_state_recovery_ops *ops =
|
||||||
nfs4_reboot_recovery_ops[clp->cl_minorversion];
|
clp->cl_mvops->reboot_recovery_ops;
|
||||||
int status = -ENOENT;
|
int status = -ENOENT;
|
||||||
|
|
||||||
cred = ops->get_clid_cred(clp);
|
cred = ops->get_clid_cred(clp);
|
||||||
|
@ -1444,7 +1470,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
|
||||||
/* First recover reboot state... */
|
/* First recover reboot state... */
|
||||||
if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
|
if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
|
||||||
status = nfs4_do_reclaim(clp,
|
status = nfs4_do_reclaim(clp,
|
||||||
nfs4_reboot_recovery_ops[clp->cl_minorversion]);
|
clp->cl_mvops->reboot_recovery_ops);
|
||||||
if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
|
if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
|
||||||
test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
|
test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
|
||||||
continue;
|
continue;
|
||||||
|
@ -1458,7 +1484,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
|
||||||
/* Now recover expired state... */
|
/* Now recover expired state... */
|
||||||
if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
|
if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
|
||||||
status = nfs4_do_reclaim(clp,
|
status = nfs4_do_reclaim(clp,
|
||||||
nfs4_nograce_recovery_ops[clp->cl_minorversion]);
|
clp->cl_mvops->nograce_recovery_ops);
|
||||||
if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
|
if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
|
||||||
test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) ||
|
test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) ||
|
||||||
test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
|
test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
|
||||||
|
|
107
fs/nfs/nfs4xdr.c
107
fs/nfs/nfs4xdr.c
|
@ -202,14 +202,17 @@ static int nfs4_stat_to_errno(int);
|
||||||
#define encode_link_maxsz (op_encode_hdr_maxsz + \
|
#define encode_link_maxsz (op_encode_hdr_maxsz + \
|
||||||
nfs4_name_maxsz)
|
nfs4_name_maxsz)
|
||||||
#define decode_link_maxsz (op_decode_hdr_maxsz + decode_change_info_maxsz)
|
#define decode_link_maxsz (op_decode_hdr_maxsz + decode_change_info_maxsz)
|
||||||
|
#define encode_lockowner_maxsz (7)
|
||||||
#define encode_lock_maxsz (op_encode_hdr_maxsz + \
|
#define encode_lock_maxsz (op_encode_hdr_maxsz + \
|
||||||
7 + \
|
7 + \
|
||||||
1 + encode_stateid_maxsz + 8)
|
1 + encode_stateid_maxsz + 1 + \
|
||||||
|
encode_lockowner_maxsz)
|
||||||
#define decode_lock_denied_maxsz \
|
#define decode_lock_denied_maxsz \
|
||||||
(8 + decode_lockowner_maxsz)
|
(8 + decode_lockowner_maxsz)
|
||||||
#define decode_lock_maxsz (op_decode_hdr_maxsz + \
|
#define decode_lock_maxsz (op_decode_hdr_maxsz + \
|
||||||
decode_lock_denied_maxsz)
|
decode_lock_denied_maxsz)
|
||||||
#define encode_lockt_maxsz (op_encode_hdr_maxsz + 12)
|
#define encode_lockt_maxsz (op_encode_hdr_maxsz + 5 + \
|
||||||
|
encode_lockowner_maxsz)
|
||||||
#define decode_lockt_maxsz (op_decode_hdr_maxsz + \
|
#define decode_lockt_maxsz (op_decode_hdr_maxsz + \
|
||||||
decode_lock_denied_maxsz)
|
decode_lock_denied_maxsz)
|
||||||
#define encode_locku_maxsz (op_encode_hdr_maxsz + 3 + \
|
#define encode_locku_maxsz (op_encode_hdr_maxsz + 3 + \
|
||||||
|
@ -217,6 +220,11 @@ static int nfs4_stat_to_errno(int);
|
||||||
4)
|
4)
|
||||||
#define decode_locku_maxsz (op_decode_hdr_maxsz + \
|
#define decode_locku_maxsz (op_decode_hdr_maxsz + \
|
||||||
decode_stateid_maxsz)
|
decode_stateid_maxsz)
|
||||||
|
#define encode_release_lockowner_maxsz \
|
||||||
|
(op_encode_hdr_maxsz + \
|
||||||
|
encode_lockowner_maxsz)
|
||||||
|
#define decode_release_lockowner_maxsz \
|
||||||
|
(op_decode_hdr_maxsz)
|
||||||
#define encode_access_maxsz (op_encode_hdr_maxsz + 1)
|
#define encode_access_maxsz (op_encode_hdr_maxsz + 1)
|
||||||
#define decode_access_maxsz (op_decode_hdr_maxsz + 2)
|
#define decode_access_maxsz (op_decode_hdr_maxsz + 2)
|
||||||
#define encode_symlink_maxsz (op_encode_hdr_maxsz + \
|
#define encode_symlink_maxsz (op_encode_hdr_maxsz + \
|
||||||
|
@ -471,6 +479,12 @@ static int nfs4_stat_to_errno(int);
|
||||||
decode_sequence_maxsz + \
|
decode_sequence_maxsz + \
|
||||||
decode_putfh_maxsz + \
|
decode_putfh_maxsz + \
|
||||||
decode_locku_maxsz)
|
decode_locku_maxsz)
|
||||||
|
#define NFS4_enc_release_lockowner_sz \
|
||||||
|
(compound_encode_hdr_maxsz + \
|
||||||
|
encode_lockowner_maxsz)
|
||||||
|
#define NFS4_dec_release_lockowner_sz \
|
||||||
|
(compound_decode_hdr_maxsz + \
|
||||||
|
decode_lockowner_maxsz)
|
||||||
#define NFS4_enc_access_sz (compound_encode_hdr_maxsz + \
|
#define NFS4_enc_access_sz (compound_encode_hdr_maxsz + \
|
||||||
encode_sequence_maxsz + \
|
encode_sequence_maxsz + \
|
||||||
encode_putfh_maxsz + \
|
encode_putfh_maxsz + \
|
||||||
|
@ -744,7 +758,7 @@ static void encode_compound_hdr(struct xdr_stream *xdr,
|
||||||
struct compound_hdr *hdr)
|
struct compound_hdr *hdr)
|
||||||
{
|
{
|
||||||
__be32 *p;
|
__be32 *p;
|
||||||
struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
|
struct rpc_auth *auth = req->rq_cred->cr_auth;
|
||||||
|
|
||||||
/* initialize running count of expected bytes in reply.
|
/* initialize running count of expected bytes in reply.
|
||||||
* NOTE: the replied tag SHOULD be the same is the one sent,
|
* NOTE: the replied tag SHOULD be the same is the one sent,
|
||||||
|
@ -1042,6 +1056,17 @@ static inline uint64_t nfs4_lock_length(struct file_lock *fl)
|
||||||
return fl->fl_end - fl->fl_start + 1;
|
return fl->fl_end - fl->fl_start + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void encode_lockowner(struct xdr_stream *xdr, const struct nfs_lowner *lowner)
|
||||||
|
{
|
||||||
|
__be32 *p;
|
||||||
|
|
||||||
|
p = reserve_space(xdr, 28);
|
||||||
|
p = xdr_encode_hyper(p, lowner->clientid);
|
||||||
|
*p++ = cpu_to_be32(16);
|
||||||
|
p = xdr_encode_opaque_fixed(p, "lock id:", 8);
|
||||||
|
xdr_encode_hyper(p, lowner->id);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* opcode,type,reclaim,offset,length,new_lock_owner = 32
|
* opcode,type,reclaim,offset,length,new_lock_owner = 32
|
||||||
* open_seqid,open_stateid,lock_seqid,lock_owner.clientid, lock_owner.id = 40
|
* open_seqid,open_stateid,lock_seqid,lock_owner.clientid, lock_owner.id = 40
|
||||||
|
@ -1058,14 +1083,11 @@ static void encode_lock(struct xdr_stream *xdr, const struct nfs_lock_args *args
|
||||||
p = xdr_encode_hyper(p, nfs4_lock_length(args->fl));
|
p = xdr_encode_hyper(p, nfs4_lock_length(args->fl));
|
||||||
*p = cpu_to_be32(args->new_lock_owner);
|
*p = cpu_to_be32(args->new_lock_owner);
|
||||||
if (args->new_lock_owner){
|
if (args->new_lock_owner){
|
||||||
p = reserve_space(xdr, 4+NFS4_STATEID_SIZE+32);
|
p = reserve_space(xdr, 4+NFS4_STATEID_SIZE+4);
|
||||||
*p++ = cpu_to_be32(args->open_seqid->sequence->counter);
|
*p++ = cpu_to_be32(args->open_seqid->sequence->counter);
|
||||||
p = xdr_encode_opaque_fixed(p, args->open_stateid->data, NFS4_STATEID_SIZE);
|
p = xdr_encode_opaque_fixed(p, args->open_stateid->data, NFS4_STATEID_SIZE);
|
||||||
*p++ = cpu_to_be32(args->lock_seqid->sequence->counter);
|
*p++ = cpu_to_be32(args->lock_seqid->sequence->counter);
|
||||||
p = xdr_encode_hyper(p, args->lock_owner.clientid);
|
encode_lockowner(xdr, &args->lock_owner);
|
||||||
*p++ = cpu_to_be32(16);
|
|
||||||
p = xdr_encode_opaque_fixed(p, "lock id:", 8);
|
|
||||||
xdr_encode_hyper(p, args->lock_owner.id);
|
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
p = reserve_space(xdr, NFS4_STATEID_SIZE+4);
|
p = reserve_space(xdr, NFS4_STATEID_SIZE+4);
|
||||||
|
@ -1080,15 +1102,12 @@ static void encode_lockt(struct xdr_stream *xdr, const struct nfs_lockt_args *ar
|
||||||
{
|
{
|
||||||
__be32 *p;
|
__be32 *p;
|
||||||
|
|
||||||
p = reserve_space(xdr, 52);
|
p = reserve_space(xdr, 24);
|
||||||
*p++ = cpu_to_be32(OP_LOCKT);
|
*p++ = cpu_to_be32(OP_LOCKT);
|
||||||
*p++ = cpu_to_be32(nfs4_lock_type(args->fl, 0));
|
*p++ = cpu_to_be32(nfs4_lock_type(args->fl, 0));
|
||||||
p = xdr_encode_hyper(p, args->fl->fl_start);
|
p = xdr_encode_hyper(p, args->fl->fl_start);
|
||||||
p = xdr_encode_hyper(p, nfs4_lock_length(args->fl));
|
p = xdr_encode_hyper(p, nfs4_lock_length(args->fl));
|
||||||
p = xdr_encode_hyper(p, args->lock_owner.clientid);
|
encode_lockowner(xdr, &args->lock_owner);
|
||||||
*p++ = cpu_to_be32(16);
|
|
||||||
p = xdr_encode_opaque_fixed(p, "lock id:", 8);
|
|
||||||
xdr_encode_hyper(p, args->lock_owner.id);
|
|
||||||
hdr->nops++;
|
hdr->nops++;
|
||||||
hdr->replen += decode_lockt_maxsz;
|
hdr->replen += decode_lockt_maxsz;
|
||||||
}
|
}
|
||||||
|
@ -1108,6 +1127,17 @@ static void encode_locku(struct xdr_stream *xdr, const struct nfs_locku_args *ar
|
||||||
hdr->replen += decode_locku_maxsz;
|
hdr->replen += decode_locku_maxsz;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void encode_release_lockowner(struct xdr_stream *xdr, const struct nfs_lowner *lowner, struct compound_hdr *hdr)
|
||||||
|
{
|
||||||
|
__be32 *p;
|
||||||
|
|
||||||
|
p = reserve_space(xdr, 4);
|
||||||
|
*p = cpu_to_be32(OP_RELEASE_LOCKOWNER);
|
||||||
|
encode_lockowner(xdr, lowner);
|
||||||
|
hdr->nops++;
|
||||||
|
hdr->replen += decode_release_lockowner_maxsz;
|
||||||
|
}
|
||||||
|
|
||||||
static void encode_lookup(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr)
|
static void encode_lookup(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr)
|
||||||
{
|
{
|
||||||
int len = name->len;
|
int len = name->len;
|
||||||
|
@ -1172,7 +1202,7 @@ static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_op
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
clp = arg->server->nfs_client;
|
clp = arg->server->nfs_client;
|
||||||
if (clp->cl_minorversion > 0) {
|
if (clp->cl_mvops->minor_version > 0) {
|
||||||
if (nfs4_has_persistent_session(clp)) {
|
if (nfs4_has_persistent_session(clp)) {
|
||||||
*p = cpu_to_be32(NFS4_CREATE_GUARDED);
|
*p = cpu_to_be32(NFS4_CREATE_GUARDED);
|
||||||
encode_attrs(xdr, arg->u.attrs, arg->server);
|
encode_attrs(xdr, arg->u.attrs, arg->server);
|
||||||
|
@ -1324,14 +1354,14 @@ static void encode_putrootfh(struct xdr_stream *xdr, struct compound_hdr *hdr)
|
||||||
hdr->replen += decode_putrootfh_maxsz;
|
hdr->replen += decode_putrootfh_maxsz;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void encode_stateid(struct xdr_stream *xdr, const struct nfs_open_context *ctx)
|
static void encode_stateid(struct xdr_stream *xdr, const struct nfs_open_context *ctx, const struct nfs_lock_context *l_ctx)
|
||||||
{
|
{
|
||||||
nfs4_stateid stateid;
|
nfs4_stateid stateid;
|
||||||
__be32 *p;
|
__be32 *p;
|
||||||
|
|
||||||
p = reserve_space(xdr, NFS4_STATEID_SIZE);
|
p = reserve_space(xdr, NFS4_STATEID_SIZE);
|
||||||
if (ctx->state != NULL) {
|
if (ctx->state != NULL) {
|
||||||
nfs4_copy_stateid(&stateid, ctx->state, ctx->lockowner);
|
nfs4_copy_stateid(&stateid, ctx->state, l_ctx->lockowner, l_ctx->pid);
|
||||||
xdr_encode_opaque_fixed(p, stateid.data, NFS4_STATEID_SIZE);
|
xdr_encode_opaque_fixed(p, stateid.data, NFS4_STATEID_SIZE);
|
||||||
} else
|
} else
|
||||||
xdr_encode_opaque_fixed(p, zero_stateid.data, NFS4_STATEID_SIZE);
|
xdr_encode_opaque_fixed(p, zero_stateid.data, NFS4_STATEID_SIZE);
|
||||||
|
@ -1344,7 +1374,7 @@ static void encode_read(struct xdr_stream *xdr, const struct nfs_readargs *args,
|
||||||
p = reserve_space(xdr, 4);
|
p = reserve_space(xdr, 4);
|
||||||
*p = cpu_to_be32(OP_READ);
|
*p = cpu_to_be32(OP_READ);
|
||||||
|
|
||||||
encode_stateid(xdr, args->context);
|
encode_stateid(xdr, args->context, args->lock_context);
|
||||||
|
|
||||||
p = reserve_space(xdr, 12);
|
p = reserve_space(xdr, 12);
|
||||||
p = xdr_encode_hyper(p, args->offset);
|
p = xdr_encode_hyper(p, args->offset);
|
||||||
|
@ -1523,7 +1553,7 @@ static void encode_write(struct xdr_stream *xdr, const struct nfs_writeargs *arg
|
||||||
p = reserve_space(xdr, 4);
|
p = reserve_space(xdr, 4);
|
||||||
*p = cpu_to_be32(OP_WRITE);
|
*p = cpu_to_be32(OP_WRITE);
|
||||||
|
|
||||||
encode_stateid(xdr, args->context);
|
encode_stateid(xdr, args->context, args->lock_context);
|
||||||
|
|
||||||
p = reserve_space(xdr, 16);
|
p = reserve_space(xdr, 16);
|
||||||
p = xdr_encode_hyper(p, args->offset);
|
p = xdr_encode_hyper(p, args->offset);
|
||||||
|
@ -1704,7 +1734,7 @@ static u32 nfs4_xdr_minorversion(const struct nfs4_sequence_args *args)
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_NFS_V4_1)
|
#if defined(CONFIG_NFS_V4_1)
|
||||||
if (args->sa_session)
|
if (args->sa_session)
|
||||||
return args->sa_session->clp->cl_minorversion;
|
return args->sa_session->clp->cl_mvops->minor_version;
|
||||||
#endif /* CONFIG_NFS_V4_1 */
|
#endif /* CONFIG_NFS_V4_1 */
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -2048,6 +2078,20 @@ static int nfs4_xdr_enc_locku(struct rpc_rqst *req, __be32 *p, struct nfs_locku_
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int nfs4_xdr_enc_release_lockowner(struct rpc_rqst *req, __be32 *p, struct nfs_release_lockowner_args *args)
|
||||||
|
{
|
||||||
|
struct xdr_stream xdr;
|
||||||
|
struct compound_hdr hdr = {
|
||||||
|
.minorversion = 0,
|
||||||
|
};
|
||||||
|
|
||||||
|
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
|
||||||
|
encode_compound_hdr(&xdr, req, &hdr);
|
||||||
|
encode_release_lockowner(&xdr, &args->lock_owner, &hdr);
|
||||||
|
encode_nops(&hdr);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Encode a READLINK request
|
* Encode a READLINK request
|
||||||
*/
|
*/
|
||||||
|
@ -2395,7 +2439,7 @@ static int nfs4_xdr_enc_exchange_id(struct rpc_rqst *req, uint32_t *p,
|
||||||
{
|
{
|
||||||
struct xdr_stream xdr;
|
struct xdr_stream xdr;
|
||||||
struct compound_hdr hdr = {
|
struct compound_hdr hdr = {
|
||||||
.minorversion = args->client->cl_minorversion,
|
.minorversion = args->client->cl_mvops->minor_version,
|
||||||
};
|
};
|
||||||
|
|
||||||
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
|
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
|
||||||
|
@ -2413,7 +2457,7 @@ static int nfs4_xdr_enc_create_session(struct rpc_rqst *req, uint32_t *p,
|
||||||
{
|
{
|
||||||
struct xdr_stream xdr;
|
struct xdr_stream xdr;
|
||||||
struct compound_hdr hdr = {
|
struct compound_hdr hdr = {
|
||||||
.minorversion = args->client->cl_minorversion,
|
.minorversion = args->client->cl_mvops->minor_version,
|
||||||
};
|
};
|
||||||
|
|
||||||
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
|
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
|
||||||
|
@ -2431,7 +2475,7 @@ static int nfs4_xdr_enc_destroy_session(struct rpc_rqst *req, uint32_t *p,
|
||||||
{
|
{
|
||||||
struct xdr_stream xdr;
|
struct xdr_stream xdr;
|
||||||
struct compound_hdr hdr = {
|
struct compound_hdr hdr = {
|
||||||
.minorversion = session->clp->cl_minorversion,
|
.minorversion = session->clp->cl_mvops->minor_version,
|
||||||
};
|
};
|
||||||
|
|
||||||
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
|
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
|
||||||
|
@ -3973,6 +4017,11 @@ static int decode_locku(struct xdr_stream *xdr, struct nfs_locku_res *res)
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int decode_release_lockowner(struct xdr_stream *xdr)
|
||||||
|
{
|
||||||
|
return decode_op_hdr(xdr, OP_RELEASE_LOCKOWNER);
|
||||||
|
}
|
||||||
|
|
||||||
static int decode_lookup(struct xdr_stream *xdr)
|
static int decode_lookup(struct xdr_stream *xdr)
|
||||||
{
|
{
|
||||||
return decode_op_hdr(xdr, OP_LOOKUP);
|
return decode_op_hdr(xdr, OP_LOOKUP);
|
||||||
|
@ -5259,6 +5308,19 @@ out:
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int nfs4_xdr_dec_release_lockowner(struct rpc_rqst *rqstp, __be32 *p, void *dummy)
|
||||||
|
{
|
||||||
|
struct xdr_stream xdr;
|
||||||
|
struct compound_hdr hdr;
|
||||||
|
int status;
|
||||||
|
|
||||||
|
xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
|
||||||
|
status = decode_compound_hdr(&xdr, &hdr);
|
||||||
|
if (!status)
|
||||||
|
status = decode_release_lockowner(&xdr);
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Decode READLINK response
|
* Decode READLINK response
|
||||||
*/
|
*/
|
||||||
|
@ -5866,6 +5928,7 @@ struct rpc_procinfo nfs4_procedures[] = {
|
||||||
PROC(GETACL, enc_getacl, dec_getacl),
|
PROC(GETACL, enc_getacl, dec_getacl),
|
||||||
PROC(SETACL, enc_setacl, dec_setacl),
|
PROC(SETACL, enc_setacl, dec_setacl),
|
||||||
PROC(FS_LOCATIONS, enc_fs_locations, dec_fs_locations),
|
PROC(FS_LOCATIONS, enc_fs_locations, dec_fs_locations),
|
||||||
|
PROC(RELEASE_LOCKOWNER, enc_release_lockowner, dec_release_lockowner),
|
||||||
#if defined(CONFIG_NFS_V4_1)
|
#if defined(CONFIG_NFS_V4_1)
|
||||||
PROC(EXCHANGE_ID, enc_exchange_id, dec_exchange_id),
|
PROC(EXCHANGE_ID, enc_exchange_id, dec_exchange_id),
|
||||||
PROC(CREATE_SESSION, enc_create_session, dec_create_session),
|
PROC(CREATE_SESSION, enc_create_session, dec_create_session),
|
||||||
|
|
|
@ -79,6 +79,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
|
||||||
req->wb_pgbase = offset;
|
req->wb_pgbase = offset;
|
||||||
req->wb_bytes = count;
|
req->wb_bytes = count;
|
||||||
req->wb_context = get_nfs_open_context(ctx);
|
req->wb_context = get_nfs_open_context(ctx);
|
||||||
|
req->wb_lock_context = nfs_get_lock_context(ctx);
|
||||||
kref_init(&req->wb_kref);
|
kref_init(&req->wb_kref);
|
||||||
return req;
|
return req;
|
||||||
}
|
}
|
||||||
|
@ -141,11 +142,16 @@ void nfs_clear_request(struct nfs_page *req)
|
||||||
{
|
{
|
||||||
struct page *page = req->wb_page;
|
struct page *page = req->wb_page;
|
||||||
struct nfs_open_context *ctx = req->wb_context;
|
struct nfs_open_context *ctx = req->wb_context;
|
||||||
|
struct nfs_lock_context *l_ctx = req->wb_lock_context;
|
||||||
|
|
||||||
if (page != NULL) {
|
if (page != NULL) {
|
||||||
page_cache_release(page);
|
page_cache_release(page);
|
||||||
req->wb_page = NULL;
|
req->wb_page = NULL;
|
||||||
}
|
}
|
||||||
|
if (l_ctx != NULL) {
|
||||||
|
nfs_put_lock_context(l_ctx);
|
||||||
|
req->wb_lock_context = NULL;
|
||||||
|
}
|
||||||
if (ctx != NULL) {
|
if (ctx != NULL) {
|
||||||
put_nfs_open_context(ctx);
|
put_nfs_open_context(ctx);
|
||||||
req->wb_context = NULL;
|
req->wb_context = NULL;
|
||||||
|
@ -235,7 +241,7 @@ static int nfs_can_coalesce_requests(struct nfs_page *prev,
|
||||||
{
|
{
|
||||||
if (req->wb_context->cred != prev->wb_context->cred)
|
if (req->wb_context->cred != prev->wb_context->cred)
|
||||||
return 0;
|
return 0;
|
||||||
if (req->wb_context->lockowner != prev->wb_context->lockowner)
|
if (req->wb_lock_context->lockowner != prev->wb_lock_context->lockowner)
|
||||||
return 0;
|
return 0;
|
||||||
if (req->wb_context->state != prev->wb_context->state)
|
if (req->wb_context->state != prev->wb_context->state)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -190,6 +190,7 @@ static int nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
|
||||||
data->args.pages = data->pagevec;
|
data->args.pages = data->pagevec;
|
||||||
data->args.count = count;
|
data->args.count = count;
|
||||||
data->args.context = get_nfs_open_context(req->wb_context);
|
data->args.context = get_nfs_open_context(req->wb_context);
|
||||||
|
data->args.lock_context = req->wb_lock_context;
|
||||||
|
|
||||||
data->res.fattr = &data->fattr;
|
data->res.fattr = &data->fattr;
|
||||||
data->res.count = count;
|
data->res.count = count;
|
||||||
|
@ -410,7 +411,7 @@ void nfs_read_prepare(struct rpc_task *task, void *calldata)
|
||||||
{
|
{
|
||||||
struct nfs_read_data *data = calldata;
|
struct nfs_read_data *data = calldata;
|
||||||
|
|
||||||
if (nfs4_setup_sequence(NFS_SERVER(data->inode)->nfs_client,
|
if (nfs4_setup_sequence(NFS_SERVER(data->inode),
|
||||||
&data->args.seq_args, &data->res.seq_res,
|
&data->args.seq_args, &data->res.seq_res,
|
||||||
0, task))
|
0, task))
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -546,6 +546,9 @@ static void nfs_show_mountd_options(struct seq_file *m, struct nfs_server *nfss,
|
||||||
{
|
{
|
||||||
struct sockaddr *sap = (struct sockaddr *)&nfss->mountd_address;
|
struct sockaddr *sap = (struct sockaddr *)&nfss->mountd_address;
|
||||||
|
|
||||||
|
if (nfss->flags & NFS_MOUNT_LEGACY_INTERFACE)
|
||||||
|
return;
|
||||||
|
|
||||||
switch (sap->sa_family) {
|
switch (sap->sa_family) {
|
||||||
case AF_INET: {
|
case AF_INET: {
|
||||||
struct sockaddr_in *sin = (struct sockaddr_in *)sap;
|
struct sockaddr_in *sin = (struct sockaddr_in *)sap;
|
||||||
|
@ -1780,6 +1783,7 @@ static int nfs_validate_mount_data(void *options,
|
||||||
* can deal with.
|
* can deal with.
|
||||||
*/
|
*/
|
||||||
args->flags = data->flags & NFS_MOUNT_FLAGMASK;
|
args->flags = data->flags & NFS_MOUNT_FLAGMASK;
|
||||||
|
args->flags |= NFS_MOUNT_LEGACY_INTERFACE;
|
||||||
args->rsize = data->rsize;
|
args->rsize = data->rsize;
|
||||||
args->wsize = data->wsize;
|
args->wsize = data->wsize;
|
||||||
args->timeo = data->timeo;
|
args->timeo = data->timeo;
|
||||||
|
|
|
@ -110,7 +110,7 @@ void nfs_unlink_prepare(struct rpc_task *task, void *calldata)
|
||||||
struct nfs_unlinkdata *data = calldata;
|
struct nfs_unlinkdata *data = calldata;
|
||||||
struct nfs_server *server = NFS_SERVER(data->dir);
|
struct nfs_server *server = NFS_SERVER(data->dir);
|
||||||
|
|
||||||
if (nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
|
if (nfs4_setup_sequence(server, &data->args.seq_args,
|
||||||
&data->res.seq_res, 1, task))
|
&data->res.seq_res, 1, task))
|
||||||
return;
|
return;
|
||||||
rpc_call_start(task);
|
rpc_call_start(task);
|
||||||
|
|
|
@ -700,7 +700,9 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
|
||||||
req = nfs_page_find_request(page);
|
req = nfs_page_find_request(page);
|
||||||
if (req == NULL)
|
if (req == NULL)
|
||||||
return 0;
|
return 0;
|
||||||
do_flush = req->wb_page != page || req->wb_context != ctx;
|
do_flush = req->wb_page != page || req->wb_context != ctx ||
|
||||||
|
req->wb_lock_context->lockowner != current->files ||
|
||||||
|
req->wb_lock_context->pid != current->tgid;
|
||||||
nfs_release_request(req);
|
nfs_release_request(req);
|
||||||
if (!do_flush)
|
if (!do_flush)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -824,6 +826,7 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
|
||||||
data->args.pages = data->pagevec;
|
data->args.pages = data->pagevec;
|
||||||
data->args.count = count;
|
data->args.count = count;
|
||||||
data->args.context = get_nfs_open_context(req->wb_context);
|
data->args.context = get_nfs_open_context(req->wb_context);
|
||||||
|
data->args.lock_context = req->wb_lock_context;
|
||||||
data->args.stable = NFS_UNSTABLE;
|
data->args.stable = NFS_UNSTABLE;
|
||||||
if (how & FLUSH_STABLE) {
|
if (how & FLUSH_STABLE) {
|
||||||
data->args.stable = NFS_DATA_SYNC;
|
data->args.stable = NFS_DATA_SYNC;
|
||||||
|
@ -1047,9 +1050,9 @@ out:
|
||||||
void nfs_write_prepare(struct rpc_task *task, void *calldata)
|
void nfs_write_prepare(struct rpc_task *task, void *calldata)
|
||||||
{
|
{
|
||||||
struct nfs_write_data *data = calldata;
|
struct nfs_write_data *data = calldata;
|
||||||
struct nfs_client *clp = (NFS_SERVER(data->inode))->nfs_client;
|
|
||||||
|
|
||||||
if (nfs4_setup_sequence(clp, &data->args.seq_args,
|
if (nfs4_setup_sequence(NFS_SERVER(data->inode),
|
||||||
|
&data->args.seq_args,
|
||||||
&data->res.seq_res, 1, task))
|
&data->res.seq_res, 1, task))
|
||||||
return;
|
return;
|
||||||
rpc_call_start(task);
|
rpc_call_start(task);
|
||||||
|
|
|
@ -523,6 +523,7 @@ enum {
|
||||||
NFSPROC4_CLNT_GETACL,
|
NFSPROC4_CLNT_GETACL,
|
||||||
NFSPROC4_CLNT_SETACL,
|
NFSPROC4_CLNT_SETACL,
|
||||||
NFSPROC4_CLNT_FS_LOCATIONS,
|
NFSPROC4_CLNT_FS_LOCATIONS,
|
||||||
|
NFSPROC4_CLNT_RELEASE_LOCKOWNER,
|
||||||
|
|
||||||
/* nfs41 */
|
/* nfs41 */
|
||||||
NFSPROC4_CLNT_EXCHANGE_ID,
|
NFSPROC4_CLNT_EXCHANGE_ID,
|
||||||
|
|
|
@ -72,13 +72,20 @@ struct nfs_access_entry {
|
||||||
int mask;
|
int mask;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct nfs_lock_context {
|
||||||
|
atomic_t count;
|
||||||
|
struct list_head list;
|
||||||
|
struct nfs_open_context *open_context;
|
||||||
|
fl_owner_t lockowner;
|
||||||
|
pid_t pid;
|
||||||
|
};
|
||||||
|
|
||||||
struct nfs4_state;
|
struct nfs4_state;
|
||||||
struct nfs_open_context {
|
struct nfs_open_context {
|
||||||
atomic_t count;
|
struct nfs_lock_context lock_context;
|
||||||
struct path path;
|
struct path path;
|
||||||
struct rpc_cred *cred;
|
struct rpc_cred *cred;
|
||||||
struct nfs4_state *state;
|
struct nfs4_state *state;
|
||||||
fl_owner_t lockowner;
|
|
||||||
fmode_t mode;
|
fmode_t mode;
|
||||||
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -353,6 +360,8 @@ extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr);
|
||||||
extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx);
|
extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx);
|
||||||
extern void put_nfs_open_context(struct nfs_open_context *ctx);
|
extern void put_nfs_open_context(struct nfs_open_context *ctx);
|
||||||
extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode);
|
extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode);
|
||||||
|
extern struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx);
|
||||||
|
extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx);
|
||||||
extern u64 nfs_compat_user_ino64(u64 fileid);
|
extern u64 nfs_compat_user_ino64(u64 fileid);
|
||||||
extern void nfs_fattr_init(struct nfs_fattr *fattr);
|
extern void nfs_fattr_init(struct nfs_fattr *fattr);
|
||||||
|
|
||||||
|
|
|
@ -15,6 +15,7 @@ struct nlm_host;
|
||||||
struct nfs4_sequence_args;
|
struct nfs4_sequence_args;
|
||||||
struct nfs4_sequence_res;
|
struct nfs4_sequence_res;
|
||||||
struct nfs_server;
|
struct nfs_server;
|
||||||
|
struct nfs4_minor_version_ops;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The nfs_client identifies our client state to the server.
|
* The nfs_client identifies our client state to the server.
|
||||||
|
@ -70,11 +71,7 @@ struct nfs_client {
|
||||||
*/
|
*/
|
||||||
char cl_ipaddr[48];
|
char cl_ipaddr[48];
|
||||||
unsigned char cl_id_uniquifier;
|
unsigned char cl_id_uniquifier;
|
||||||
int (* cl_call_sync)(struct nfs_server *server,
|
const struct nfs4_minor_version_ops *cl_mvops;
|
||||||
struct rpc_message *msg,
|
|
||||||
struct nfs4_sequence_args *args,
|
|
||||||
struct nfs4_sequence_res *res,
|
|
||||||
int cache_reply);
|
|
||||||
#endif /* CONFIG_NFS_V4 */
|
#endif /* CONFIG_NFS_V4 */
|
||||||
|
|
||||||
#ifdef CONFIG_NFS_V4_1
|
#ifdef CONFIG_NFS_V4_1
|
||||||
|
|
|
@ -69,5 +69,6 @@ struct nfs_mount_data {
|
||||||
#define NFS_MOUNT_LOOKUP_CACHE_NONEG 0x10000
|
#define NFS_MOUNT_LOOKUP_CACHE_NONEG 0x10000
|
||||||
#define NFS_MOUNT_LOOKUP_CACHE_NONE 0x20000
|
#define NFS_MOUNT_LOOKUP_CACHE_NONE 0x20000
|
||||||
#define NFS_MOUNT_NORESVPORT 0x40000
|
#define NFS_MOUNT_NORESVPORT 0x40000
|
||||||
|
#define NFS_MOUNT_LEGACY_INTERFACE 0x80000
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -39,6 +39,7 @@ struct nfs_page {
|
||||||
struct list_head wb_list; /* Defines state of page: */
|
struct list_head wb_list; /* Defines state of page: */
|
||||||
struct page *wb_page; /* page to read in/write out */
|
struct page *wb_page; /* page to read in/write out */
|
||||||
struct nfs_open_context *wb_context; /* File state context info */
|
struct nfs_open_context *wb_context; /* File state context info */
|
||||||
|
struct nfs_lock_context *wb_lock_context; /* lock context info */
|
||||||
atomic_t wb_complete; /* i/os we're waiting for */
|
atomic_t wb_complete; /* i/os we're waiting for */
|
||||||
pgoff_t wb_index; /* Offset >> PAGE_CACHE_SHIFT */
|
pgoff_t wb_index; /* Offset >> PAGE_CACHE_SHIFT */
|
||||||
unsigned int wb_offset, /* Offset & ~PAGE_CACHE_MASK */
|
unsigned int wb_offset, /* Offset & ~PAGE_CACHE_MASK */
|
||||||
|
|
|
@ -196,8 +196,10 @@ struct nfs_openargs {
|
||||||
__u64 clientid;
|
__u64 clientid;
|
||||||
__u64 id;
|
__u64 id;
|
||||||
union {
|
union {
|
||||||
struct iattr * attrs; /* UNCHECKED, GUARDED */
|
struct {
|
||||||
nfs4_verifier verifier; /* EXCLUSIVE */
|
struct iattr * attrs; /* UNCHECKED, GUARDED */
|
||||||
|
nfs4_verifier verifier; /* EXCLUSIVE */
|
||||||
|
};
|
||||||
nfs4_stateid delegation; /* CLAIM_DELEGATE_CUR */
|
nfs4_stateid delegation; /* CLAIM_DELEGATE_CUR */
|
||||||
fmode_t delegation_type; /* CLAIM_PREVIOUS */
|
fmode_t delegation_type; /* CLAIM_PREVIOUS */
|
||||||
} u;
|
} u;
|
||||||
|
@ -313,6 +315,10 @@ struct nfs_lockt_res {
|
||||||
struct nfs4_sequence_res seq_res;
|
struct nfs4_sequence_res seq_res;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct nfs_release_lockowner_args {
|
||||||
|
struct nfs_lowner lock_owner;
|
||||||
|
};
|
||||||
|
|
||||||
struct nfs4_delegreturnargs {
|
struct nfs4_delegreturnargs {
|
||||||
const struct nfs_fh *fhandle;
|
const struct nfs_fh *fhandle;
|
||||||
const nfs4_stateid *stateid;
|
const nfs4_stateid *stateid;
|
||||||
|
@ -332,6 +338,7 @@ struct nfs4_delegreturnres {
|
||||||
struct nfs_readargs {
|
struct nfs_readargs {
|
||||||
struct nfs_fh * fh;
|
struct nfs_fh * fh;
|
||||||
struct nfs_open_context *context;
|
struct nfs_open_context *context;
|
||||||
|
struct nfs_lock_context *lock_context;
|
||||||
__u64 offset;
|
__u64 offset;
|
||||||
__u32 count;
|
__u32 count;
|
||||||
unsigned int pgbase;
|
unsigned int pgbase;
|
||||||
|
@ -352,6 +359,7 @@ struct nfs_readres {
|
||||||
struct nfs_writeargs {
|
struct nfs_writeargs {
|
||||||
struct nfs_fh * fh;
|
struct nfs_fh * fh;
|
||||||
struct nfs_open_context *context;
|
struct nfs_open_context *context;
|
||||||
|
struct nfs_lock_context *lock_context;
|
||||||
__u64 offset;
|
__u64 offset;
|
||||||
__u32 count;
|
__u32 count;
|
||||||
enum nfs3_stable_how stable;
|
enum nfs3_stable_how stable;
|
||||||
|
|
|
@ -61,13 +61,7 @@ struct rpc_cred {
|
||||||
/*
|
/*
|
||||||
* Client authentication handle
|
* Client authentication handle
|
||||||
*/
|
*/
|
||||||
#define RPC_CREDCACHE_HASHBITS 4
|
struct rpc_cred_cache;
|
||||||
#define RPC_CREDCACHE_NR (1 << RPC_CREDCACHE_HASHBITS)
|
|
||||||
struct rpc_cred_cache {
|
|
||||||
struct hlist_head hashtable[RPC_CREDCACHE_NR];
|
|
||||||
spinlock_t lock;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct rpc_authops;
|
struct rpc_authops;
|
||||||
struct rpc_auth {
|
struct rpc_auth {
|
||||||
unsigned int au_cslack; /* call cred size estimate */
|
unsigned int au_cslack; /* call cred size estimate */
|
||||||
|
@ -112,7 +106,7 @@ struct rpc_credops {
|
||||||
void (*crdestroy)(struct rpc_cred *);
|
void (*crdestroy)(struct rpc_cred *);
|
||||||
|
|
||||||
int (*crmatch)(struct auth_cred *, struct rpc_cred *, int);
|
int (*crmatch)(struct auth_cred *, struct rpc_cred *, int);
|
||||||
void (*crbind)(struct rpc_task *, struct rpc_cred *, int);
|
struct rpc_cred * (*crbind)(struct rpc_task *, struct rpc_cred *, int);
|
||||||
__be32 * (*crmarshal)(struct rpc_task *, __be32 *);
|
__be32 * (*crmarshal)(struct rpc_task *, __be32 *);
|
||||||
int (*crrefresh)(struct rpc_task *);
|
int (*crrefresh)(struct rpc_task *);
|
||||||
__be32 * (*crvalidate)(struct rpc_task *, __be32 *);
|
__be32 * (*crvalidate)(struct rpc_task *, __be32 *);
|
||||||
|
@ -125,11 +119,12 @@ struct rpc_credops {
|
||||||
extern const struct rpc_authops authunix_ops;
|
extern const struct rpc_authops authunix_ops;
|
||||||
extern const struct rpc_authops authnull_ops;
|
extern const struct rpc_authops authnull_ops;
|
||||||
|
|
||||||
void __init rpc_init_authunix(void);
|
int __init rpc_init_authunix(void);
|
||||||
void __init rpc_init_generic_auth(void);
|
int __init rpc_init_generic_auth(void);
|
||||||
void __init rpcauth_init_module(void);
|
int __init rpcauth_init_module(void);
|
||||||
void __exit rpcauth_remove_module(void);
|
void __exit rpcauth_remove_module(void);
|
||||||
void __exit rpc_destroy_generic_auth(void);
|
void __exit rpc_destroy_generic_auth(void);
|
||||||
|
void rpc_destroy_authunix(void);
|
||||||
|
|
||||||
struct rpc_cred * rpc_lookup_cred(void);
|
struct rpc_cred * rpc_lookup_cred(void);
|
||||||
struct rpc_cred * rpc_lookup_machine_cred(void);
|
struct rpc_cred * rpc_lookup_machine_cred(void);
|
||||||
|
@ -140,10 +135,8 @@ void rpcauth_release(struct rpc_auth *);
|
||||||
struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *, int);
|
struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *, int);
|
||||||
void rpcauth_init_cred(struct rpc_cred *, const struct auth_cred *, struct rpc_auth *, const struct rpc_credops *);
|
void rpcauth_init_cred(struct rpc_cred *, const struct auth_cred *, struct rpc_auth *, const struct rpc_credops *);
|
||||||
struct rpc_cred * rpcauth_lookupcred(struct rpc_auth *, int);
|
struct rpc_cred * rpcauth_lookupcred(struct rpc_auth *, int);
|
||||||
void rpcauth_bindcred(struct rpc_task *, struct rpc_cred *, int);
|
struct rpc_cred * rpcauth_generic_bind_cred(struct rpc_task *, struct rpc_cred *, int);
|
||||||
void rpcauth_generic_bind_cred(struct rpc_task *, struct rpc_cred *, int);
|
|
||||||
void put_rpccred(struct rpc_cred *);
|
void put_rpccred(struct rpc_cred *);
|
||||||
void rpcauth_unbindcred(struct rpc_task *);
|
|
||||||
__be32 * rpcauth_marshcred(struct rpc_task *, __be32 *);
|
__be32 * rpcauth_marshcred(struct rpc_task *, __be32 *);
|
||||||
__be32 * rpcauth_checkverf(struct rpc_task *, __be32 *);
|
__be32 * rpcauth_checkverf(struct rpc_task *, __be32 *);
|
||||||
int rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp, __be32 *data, void *obj);
|
int rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp, __be32 *data, void *obj);
|
||||||
|
|
|
@ -131,6 +131,7 @@ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
|
||||||
struct rpc_clnt *rpc_clone_client(struct rpc_clnt *);
|
struct rpc_clnt *rpc_clone_client(struct rpc_clnt *);
|
||||||
void rpc_shutdown_client(struct rpc_clnt *);
|
void rpc_shutdown_client(struct rpc_clnt *);
|
||||||
void rpc_release_client(struct rpc_clnt *);
|
void rpc_release_client(struct rpc_clnt *);
|
||||||
|
void rpc_task_release_client(struct rpc_task *);
|
||||||
|
|
||||||
int rpcb_register(u32, u32, int, unsigned short);
|
int rpcb_register(u32, u32, int, unsigned short);
|
||||||
int rpcb_v4_register(const u32 program, const u32 version,
|
int rpcb_v4_register(const u32 program, const u32 version,
|
||||||
|
@ -148,8 +149,8 @@ int rpc_call_sync(struct rpc_clnt *clnt,
|
||||||
const struct rpc_message *msg, int flags);
|
const struct rpc_message *msg, int flags);
|
||||||
struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred,
|
struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred,
|
||||||
int flags);
|
int flags);
|
||||||
void rpc_restart_call_prepare(struct rpc_task *);
|
int rpc_restart_call_prepare(struct rpc_task *);
|
||||||
void rpc_restart_call(struct rpc_task *);
|
int rpc_restart_call(struct rpc_task *);
|
||||||
void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int);
|
void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int);
|
||||||
size_t rpc_max_payload(struct rpc_clnt *);
|
size_t rpc_max_payload(struct rpc_clnt *);
|
||||||
void rpc_force_rebind(struct rpc_clnt *);
|
void rpc_force_rebind(struct rpc_clnt *);
|
||||||
|
|
|
@ -213,6 +213,7 @@ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
|
||||||
const struct rpc_call_ops *ops);
|
const struct rpc_call_ops *ops);
|
||||||
void rpc_put_task(struct rpc_task *);
|
void rpc_put_task(struct rpc_task *);
|
||||||
void rpc_exit_task(struct rpc_task *);
|
void rpc_exit_task(struct rpc_task *);
|
||||||
|
void rpc_exit(struct rpc_task *, int);
|
||||||
void rpc_release_calldata(const struct rpc_call_ops *, void *);
|
void rpc_release_calldata(const struct rpc_call_ops *, void *);
|
||||||
void rpc_killall_tasks(struct rpc_clnt *);
|
void rpc_killall_tasks(struct rpc_clnt *);
|
||||||
void rpc_execute(struct rpc_task *);
|
void rpc_execute(struct rpc_task *);
|
||||||
|
@ -241,12 +242,6 @@ void rpc_destroy_mempool(void);
|
||||||
extern struct workqueue_struct *rpciod_workqueue;
|
extern struct workqueue_struct *rpciod_workqueue;
|
||||||
void rpc_prepare_task(struct rpc_task *task);
|
void rpc_prepare_task(struct rpc_task *task);
|
||||||
|
|
||||||
static inline void rpc_exit(struct rpc_task *task, int status)
|
|
||||||
{
|
|
||||||
task->tk_status = status;
|
|
||||||
task->tk_action = rpc_exit_task;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int rpc_wait_for_completion_task(struct rpc_task *task)
|
static inline int rpc_wait_for_completion_task(struct rpc_task *task)
|
||||||
{
|
{
|
||||||
return __rpc_wait_for_completion_task(task, NULL);
|
return __rpc_wait_for_completion_task(task, NULL);
|
||||||
|
|
|
@ -64,6 +64,7 @@ struct rpc_rqst {
|
||||||
* This is the private part
|
* This is the private part
|
||||||
*/
|
*/
|
||||||
struct rpc_task * rq_task; /* RPC task data */
|
struct rpc_task * rq_task; /* RPC task data */
|
||||||
|
struct rpc_cred * rq_cred; /* Bound cred */
|
||||||
__be32 rq_xid; /* request XID */
|
__be32 rq_xid; /* request XID */
|
||||||
int rq_cong; /* has incremented xprt->cong */
|
int rq_cong; /* has incremented xprt->cong */
|
||||||
u32 rq_seqno; /* gss seq no. used on req. */
|
u32 rq_seqno; /* gss seq no. used on req. */
|
||||||
|
|
|
@ -19,6 +19,15 @@
|
||||||
# define RPCDBG_FACILITY RPCDBG_AUTH
|
# define RPCDBG_FACILITY RPCDBG_AUTH
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#define RPC_CREDCACHE_DEFAULT_HASHBITS (4)
|
||||||
|
struct rpc_cred_cache {
|
||||||
|
struct hlist_head *hashtable;
|
||||||
|
unsigned int hashbits;
|
||||||
|
spinlock_t lock;
|
||||||
|
};
|
||||||
|
|
||||||
|
static unsigned int auth_hashbits = RPC_CREDCACHE_DEFAULT_HASHBITS;
|
||||||
|
|
||||||
static DEFINE_SPINLOCK(rpc_authflavor_lock);
|
static DEFINE_SPINLOCK(rpc_authflavor_lock);
|
||||||
static const struct rpc_authops *auth_flavors[RPC_AUTH_MAXFLAVOR] = {
|
static const struct rpc_authops *auth_flavors[RPC_AUTH_MAXFLAVOR] = {
|
||||||
&authnull_ops, /* AUTH_NULL */
|
&authnull_ops, /* AUTH_NULL */
|
||||||
|
@ -29,6 +38,42 @@ static const struct rpc_authops *auth_flavors[RPC_AUTH_MAXFLAVOR] = {
|
||||||
static LIST_HEAD(cred_unused);
|
static LIST_HEAD(cred_unused);
|
||||||
static unsigned long number_cred_unused;
|
static unsigned long number_cred_unused;
|
||||||
|
|
||||||
|
#define MAX_HASHTABLE_BITS (10)
|
||||||
|
static int param_set_hashtbl_sz(const char *val, struct kernel_param *kp)
|
||||||
|
{
|
||||||
|
unsigned long num;
|
||||||
|
unsigned int nbits;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (!val)
|
||||||
|
goto out_inval;
|
||||||
|
ret = strict_strtoul(val, 0, &num);
|
||||||
|
if (ret == -EINVAL)
|
||||||
|
goto out_inval;
|
||||||
|
nbits = fls(num);
|
||||||
|
if (num > (1U << nbits))
|
||||||
|
nbits++;
|
||||||
|
if (nbits > MAX_HASHTABLE_BITS || nbits < 2)
|
||||||
|
goto out_inval;
|
||||||
|
*(unsigned int *)kp->arg = nbits;
|
||||||
|
return 0;
|
||||||
|
out_inval:
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int param_get_hashtbl_sz(char *buffer, struct kernel_param *kp)
|
||||||
|
{
|
||||||
|
unsigned int nbits;
|
||||||
|
|
||||||
|
nbits = *(unsigned int *)kp->arg;
|
||||||
|
return sprintf(buffer, "%u", 1U << nbits);
|
||||||
|
}
|
||||||
|
|
||||||
|
#define param_check_hashtbl_sz(name, p) __param_check(name, p, unsigned int);
|
||||||
|
|
||||||
|
module_param_named(auth_hashtable_size, auth_hashbits, hashtbl_sz, 0644);
|
||||||
|
MODULE_PARM_DESC(auth_hashtable_size, "RPC credential cache hashtable size");
|
||||||
|
|
||||||
static u32
|
static u32
|
||||||
pseudoflavor_to_flavor(u32 flavor) {
|
pseudoflavor_to_flavor(u32 flavor) {
|
||||||
if (flavor >= RPC_AUTH_MAXFLAVOR)
|
if (flavor >= RPC_AUTH_MAXFLAVOR)
|
||||||
|
@ -145,16 +190,23 @@ int
|
||||||
rpcauth_init_credcache(struct rpc_auth *auth)
|
rpcauth_init_credcache(struct rpc_auth *auth)
|
||||||
{
|
{
|
||||||
struct rpc_cred_cache *new;
|
struct rpc_cred_cache *new;
|
||||||
int i;
|
unsigned int hashsize;
|
||||||
|
|
||||||
new = kmalloc(sizeof(*new), GFP_KERNEL);
|
new = kmalloc(sizeof(*new), GFP_KERNEL);
|
||||||
if (!new)
|
if (!new)
|
||||||
return -ENOMEM;
|
goto out_nocache;
|
||||||
for (i = 0; i < RPC_CREDCACHE_NR; i++)
|
new->hashbits = auth_hashbits;
|
||||||
INIT_HLIST_HEAD(&new->hashtable[i]);
|
hashsize = 1U << new->hashbits;
|
||||||
|
new->hashtable = kcalloc(hashsize, sizeof(new->hashtable[0]), GFP_KERNEL);
|
||||||
|
if (!new->hashtable)
|
||||||
|
goto out_nohashtbl;
|
||||||
spin_lock_init(&new->lock);
|
spin_lock_init(&new->lock);
|
||||||
auth->au_credcache = new;
|
auth->au_credcache = new;
|
||||||
return 0;
|
return 0;
|
||||||
|
out_nohashtbl:
|
||||||
|
kfree(new);
|
||||||
|
out_nocache:
|
||||||
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(rpcauth_init_credcache);
|
EXPORT_SYMBOL_GPL(rpcauth_init_credcache);
|
||||||
|
|
||||||
|
@ -183,11 +235,12 @@ rpcauth_clear_credcache(struct rpc_cred_cache *cache)
|
||||||
LIST_HEAD(free);
|
LIST_HEAD(free);
|
||||||
struct hlist_head *head;
|
struct hlist_head *head;
|
||||||
struct rpc_cred *cred;
|
struct rpc_cred *cred;
|
||||||
|
unsigned int hashsize = 1U << cache->hashbits;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
spin_lock(&rpc_credcache_lock);
|
spin_lock(&rpc_credcache_lock);
|
||||||
spin_lock(&cache->lock);
|
spin_lock(&cache->lock);
|
||||||
for (i = 0; i < RPC_CREDCACHE_NR; i++) {
|
for (i = 0; i < hashsize; i++) {
|
||||||
head = &cache->hashtable[i];
|
head = &cache->hashtable[i];
|
||||||
while (!hlist_empty(head)) {
|
while (!hlist_empty(head)) {
|
||||||
cred = hlist_entry(head->first, struct rpc_cred, cr_hash);
|
cred = hlist_entry(head->first, struct rpc_cred, cr_hash);
|
||||||
|
@ -216,6 +269,7 @@ rpcauth_destroy_credcache(struct rpc_auth *auth)
|
||||||
if (cache) {
|
if (cache) {
|
||||||
auth->au_credcache = NULL;
|
auth->au_credcache = NULL;
|
||||||
rpcauth_clear_credcache(cache);
|
rpcauth_clear_credcache(cache);
|
||||||
|
kfree(cache->hashtable);
|
||||||
kfree(cache);
|
kfree(cache);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -297,7 +351,7 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred,
|
||||||
*entry, *new;
|
*entry, *new;
|
||||||
unsigned int nr;
|
unsigned int nr;
|
||||||
|
|
||||||
nr = hash_long(acred->uid, RPC_CREDCACHE_HASHBITS);
|
nr = hash_long(acred->uid, cache->hashbits);
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
hlist_for_each_entry_rcu(entry, pos, &cache->hashtable[nr], cr_hash) {
|
hlist_for_each_entry_rcu(entry, pos, &cache->hashtable[nr], cr_hash) {
|
||||||
|
@ -390,16 +444,16 @@ rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(rpcauth_init_cred);
|
EXPORT_SYMBOL_GPL(rpcauth_init_cred);
|
||||||
|
|
||||||
void
|
struct rpc_cred *
|
||||||
rpcauth_generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred, int lookupflags)
|
rpcauth_generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred, int lookupflags)
|
||||||
{
|
{
|
||||||
task->tk_msg.rpc_cred = get_rpccred(cred);
|
|
||||||
dprintk("RPC: %5u holding %s cred %p\n", task->tk_pid,
|
dprintk("RPC: %5u holding %s cred %p\n", task->tk_pid,
|
||||||
cred->cr_auth->au_ops->au_name, cred);
|
cred->cr_auth->au_ops->au_name, cred);
|
||||||
|
return get_rpccred(cred);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(rpcauth_generic_bind_cred);
|
EXPORT_SYMBOL_GPL(rpcauth_generic_bind_cred);
|
||||||
|
|
||||||
static void
|
static struct rpc_cred *
|
||||||
rpcauth_bind_root_cred(struct rpc_task *task, int lookupflags)
|
rpcauth_bind_root_cred(struct rpc_task *task, int lookupflags)
|
||||||
{
|
{
|
||||||
struct rpc_auth *auth = task->tk_client->cl_auth;
|
struct rpc_auth *auth = task->tk_client->cl_auth;
|
||||||
|
@ -407,45 +461,43 @@ rpcauth_bind_root_cred(struct rpc_task *task, int lookupflags)
|
||||||
.uid = 0,
|
.uid = 0,
|
||||||
.gid = 0,
|
.gid = 0,
|
||||||
};
|
};
|
||||||
struct rpc_cred *ret;
|
|
||||||
|
|
||||||
dprintk("RPC: %5u looking up %s cred\n",
|
dprintk("RPC: %5u looking up %s cred\n",
|
||||||
task->tk_pid, task->tk_client->cl_auth->au_ops->au_name);
|
task->tk_pid, task->tk_client->cl_auth->au_ops->au_name);
|
||||||
ret = auth->au_ops->lookup_cred(auth, &acred, lookupflags);
|
return auth->au_ops->lookup_cred(auth, &acred, lookupflags);
|
||||||
if (!IS_ERR(ret))
|
|
||||||
task->tk_msg.rpc_cred = ret;
|
|
||||||
else
|
|
||||||
task->tk_status = PTR_ERR(ret);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static struct rpc_cred *
|
||||||
rpcauth_bind_new_cred(struct rpc_task *task, int lookupflags)
|
rpcauth_bind_new_cred(struct rpc_task *task, int lookupflags)
|
||||||
{
|
{
|
||||||
struct rpc_auth *auth = task->tk_client->cl_auth;
|
struct rpc_auth *auth = task->tk_client->cl_auth;
|
||||||
struct rpc_cred *ret;
|
|
||||||
|
|
||||||
dprintk("RPC: %5u looking up %s cred\n",
|
dprintk("RPC: %5u looking up %s cred\n",
|
||||||
task->tk_pid, auth->au_ops->au_name);
|
task->tk_pid, auth->au_ops->au_name);
|
||||||
ret = rpcauth_lookupcred(auth, lookupflags);
|
return rpcauth_lookupcred(auth, lookupflags);
|
||||||
if (!IS_ERR(ret))
|
|
||||||
task->tk_msg.rpc_cred = ret;
|
|
||||||
else
|
|
||||||
task->tk_status = PTR_ERR(ret);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
static int
|
||||||
rpcauth_bindcred(struct rpc_task *task, struct rpc_cred *cred, int flags)
|
rpcauth_bindcred(struct rpc_task *task, struct rpc_cred *cred, int flags)
|
||||||
{
|
{
|
||||||
|
struct rpc_rqst *req = task->tk_rqstp;
|
||||||
|
struct rpc_cred *new;
|
||||||
int lookupflags = 0;
|
int lookupflags = 0;
|
||||||
|
|
||||||
if (flags & RPC_TASK_ASYNC)
|
if (flags & RPC_TASK_ASYNC)
|
||||||
lookupflags |= RPCAUTH_LOOKUP_NEW;
|
lookupflags |= RPCAUTH_LOOKUP_NEW;
|
||||||
if (cred != NULL)
|
if (cred != NULL)
|
||||||
cred->cr_ops->crbind(task, cred, lookupflags);
|
new = cred->cr_ops->crbind(task, cred, lookupflags);
|
||||||
else if (flags & RPC_TASK_ROOTCREDS)
|
else if (flags & RPC_TASK_ROOTCREDS)
|
||||||
rpcauth_bind_root_cred(task, lookupflags);
|
new = rpcauth_bind_root_cred(task, lookupflags);
|
||||||
else
|
else
|
||||||
rpcauth_bind_new_cred(task, lookupflags);
|
new = rpcauth_bind_new_cred(task, lookupflags);
|
||||||
|
if (IS_ERR(new))
|
||||||
|
return PTR_ERR(new);
|
||||||
|
if (req->rq_cred != NULL)
|
||||||
|
put_rpccred(req->rq_cred);
|
||||||
|
req->rq_cred = new;
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -484,22 +536,10 @@ out_nodestroy:
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(put_rpccred);
|
EXPORT_SYMBOL_GPL(put_rpccred);
|
||||||
|
|
||||||
void
|
|
||||||
rpcauth_unbindcred(struct rpc_task *task)
|
|
||||||
{
|
|
||||||
struct rpc_cred *cred = task->tk_msg.rpc_cred;
|
|
||||||
|
|
||||||
dprintk("RPC: %5u releasing %s cred %p\n",
|
|
||||||
task->tk_pid, cred->cr_auth->au_ops->au_name, cred);
|
|
||||||
|
|
||||||
put_rpccred(cred);
|
|
||||||
task->tk_msg.rpc_cred = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
__be32 *
|
__be32 *
|
||||||
rpcauth_marshcred(struct rpc_task *task, __be32 *p)
|
rpcauth_marshcred(struct rpc_task *task, __be32 *p)
|
||||||
{
|
{
|
||||||
struct rpc_cred *cred = task->tk_msg.rpc_cred;
|
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
|
||||||
|
|
||||||
dprintk("RPC: %5u marshaling %s cred %p\n",
|
dprintk("RPC: %5u marshaling %s cred %p\n",
|
||||||
task->tk_pid, cred->cr_auth->au_ops->au_name, cred);
|
task->tk_pid, cred->cr_auth->au_ops->au_name, cred);
|
||||||
|
@ -510,7 +550,7 @@ rpcauth_marshcred(struct rpc_task *task, __be32 *p)
|
||||||
__be32 *
|
__be32 *
|
||||||
rpcauth_checkverf(struct rpc_task *task, __be32 *p)
|
rpcauth_checkverf(struct rpc_task *task, __be32 *p)
|
||||||
{
|
{
|
||||||
struct rpc_cred *cred = task->tk_msg.rpc_cred;
|
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
|
||||||
|
|
||||||
dprintk("RPC: %5u validating %s cred %p\n",
|
dprintk("RPC: %5u validating %s cred %p\n",
|
||||||
task->tk_pid, cred->cr_auth->au_ops->au_name, cred);
|
task->tk_pid, cred->cr_auth->au_ops->au_name, cred);
|
||||||
|
@ -522,7 +562,7 @@ int
|
||||||
rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp,
|
rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp,
|
||||||
__be32 *data, void *obj)
|
__be32 *data, void *obj)
|
||||||
{
|
{
|
||||||
struct rpc_cred *cred = task->tk_msg.rpc_cred;
|
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
|
||||||
|
|
||||||
dprintk("RPC: %5u using %s cred %p to wrap rpc data\n",
|
dprintk("RPC: %5u using %s cred %p to wrap rpc data\n",
|
||||||
task->tk_pid, cred->cr_ops->cr_name, cred);
|
task->tk_pid, cred->cr_ops->cr_name, cred);
|
||||||
|
@ -536,7 +576,7 @@ int
|
||||||
rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp,
|
rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp,
|
||||||
__be32 *data, void *obj)
|
__be32 *data, void *obj)
|
||||||
{
|
{
|
||||||
struct rpc_cred *cred = task->tk_msg.rpc_cred;
|
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
|
||||||
|
|
||||||
dprintk("RPC: %5u using %s cred %p to unwrap rpc data\n",
|
dprintk("RPC: %5u using %s cred %p to unwrap rpc data\n",
|
||||||
task->tk_pid, cred->cr_ops->cr_name, cred);
|
task->tk_pid, cred->cr_ops->cr_name, cred);
|
||||||
|
@ -550,13 +590,21 @@ rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp,
|
||||||
int
|
int
|
||||||
rpcauth_refreshcred(struct rpc_task *task)
|
rpcauth_refreshcred(struct rpc_task *task)
|
||||||
{
|
{
|
||||||
struct rpc_cred *cred = task->tk_msg.rpc_cred;
|
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
cred = task->tk_rqstp->rq_cred;
|
||||||
|
if (cred == NULL) {
|
||||||
|
err = rpcauth_bindcred(task, task->tk_msg.rpc_cred, task->tk_flags);
|
||||||
|
if (err < 0)
|
||||||
|
goto out;
|
||||||
|
cred = task->tk_rqstp->rq_cred;
|
||||||
|
};
|
||||||
dprintk("RPC: %5u refreshing %s cred %p\n",
|
dprintk("RPC: %5u refreshing %s cred %p\n",
|
||||||
task->tk_pid, cred->cr_auth->au_ops->au_name, cred);
|
task->tk_pid, cred->cr_auth->au_ops->au_name, cred);
|
||||||
|
|
||||||
err = cred->cr_ops->crrefresh(task);
|
err = cred->cr_ops->crrefresh(task);
|
||||||
|
out:
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
task->tk_status = err;
|
task->tk_status = err;
|
||||||
return err;
|
return err;
|
||||||
|
@ -565,7 +613,7 @@ rpcauth_refreshcred(struct rpc_task *task)
|
||||||
void
|
void
|
||||||
rpcauth_invalcred(struct rpc_task *task)
|
rpcauth_invalcred(struct rpc_task *task)
|
||||||
{
|
{
|
||||||
struct rpc_cred *cred = task->tk_msg.rpc_cred;
|
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
|
||||||
|
|
||||||
dprintk("RPC: %5u invalidating %s cred %p\n",
|
dprintk("RPC: %5u invalidating %s cred %p\n",
|
||||||
task->tk_pid, cred->cr_auth->au_ops->au_name, cred);
|
task->tk_pid, cred->cr_auth->au_ops->au_name, cred);
|
||||||
|
@ -576,7 +624,7 @@ rpcauth_invalcred(struct rpc_task *task)
|
||||||
int
|
int
|
||||||
rpcauth_uptodatecred(struct rpc_task *task)
|
rpcauth_uptodatecred(struct rpc_task *task)
|
||||||
{
|
{
|
||||||
struct rpc_cred *cred = task->tk_msg.rpc_cred;
|
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
|
||||||
|
|
||||||
return cred == NULL ||
|
return cred == NULL ||
|
||||||
test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0;
|
test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0;
|
||||||
|
@ -587,14 +635,27 @@ static struct shrinker rpc_cred_shrinker = {
|
||||||
.seeks = DEFAULT_SEEKS,
|
.seeks = DEFAULT_SEEKS,
|
||||||
};
|
};
|
||||||
|
|
||||||
void __init rpcauth_init_module(void)
|
int __init rpcauth_init_module(void)
|
||||||
{
|
{
|
||||||
rpc_init_authunix();
|
int err;
|
||||||
rpc_init_generic_auth();
|
|
||||||
|
err = rpc_init_authunix();
|
||||||
|
if (err < 0)
|
||||||
|
goto out1;
|
||||||
|
err = rpc_init_generic_auth();
|
||||||
|
if (err < 0)
|
||||||
|
goto out2;
|
||||||
register_shrinker(&rpc_cred_shrinker);
|
register_shrinker(&rpc_cred_shrinker);
|
||||||
|
return 0;
|
||||||
|
out2:
|
||||||
|
rpc_destroy_authunix();
|
||||||
|
out1:
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
void __exit rpcauth_remove_module(void)
|
void __exit rpcauth_remove_module(void)
|
||||||
{
|
{
|
||||||
|
rpc_destroy_authunix();
|
||||||
|
rpc_destroy_generic_auth();
|
||||||
unregister_shrinker(&rpc_cred_shrinker);
|
unregister_shrinker(&rpc_cred_shrinker);
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,7 +27,6 @@ struct generic_cred {
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct rpc_auth generic_auth;
|
static struct rpc_auth generic_auth;
|
||||||
static struct rpc_cred_cache generic_cred_cache;
|
|
||||||
static const struct rpc_credops generic_credops;
|
static const struct rpc_credops generic_credops;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -55,18 +54,13 @@ struct rpc_cred *rpc_lookup_machine_cred(void)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(rpc_lookup_machine_cred);
|
EXPORT_SYMBOL_GPL(rpc_lookup_machine_cred);
|
||||||
|
|
||||||
static void
|
static struct rpc_cred *generic_bind_cred(struct rpc_task *task,
|
||||||
generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred, int lookupflags)
|
struct rpc_cred *cred, int lookupflags)
|
||||||
{
|
{
|
||||||
struct rpc_auth *auth = task->tk_client->cl_auth;
|
struct rpc_auth *auth = task->tk_client->cl_auth;
|
||||||
struct auth_cred *acred = &container_of(cred, struct generic_cred, gc_base)->acred;
|
struct auth_cred *acred = &container_of(cred, struct generic_cred, gc_base)->acred;
|
||||||
struct rpc_cred *ret;
|
|
||||||
|
|
||||||
ret = auth->au_ops->lookup_cred(auth, acred, lookupflags);
|
return auth->au_ops->lookup_cred(auth, acred, lookupflags);
|
||||||
if (!IS_ERR(ret))
|
|
||||||
task->tk_msg.rpc_cred = ret;
|
|
||||||
else
|
|
||||||
task->tk_status = PTR_ERR(ret);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -159,20 +153,16 @@ out_nomatch:
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init rpc_init_generic_auth(void)
|
int __init rpc_init_generic_auth(void)
|
||||||
{
|
{
|
||||||
spin_lock_init(&generic_cred_cache.lock);
|
return rpcauth_init_credcache(&generic_auth);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __exit rpc_destroy_generic_auth(void)
|
void __exit rpc_destroy_generic_auth(void)
|
||||||
{
|
{
|
||||||
rpcauth_clear_credcache(&generic_cred_cache);
|
rpcauth_destroy_credcache(&generic_auth);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct rpc_cred_cache generic_cred_cache = {
|
|
||||||
{{ NULL, },},
|
|
||||||
};
|
|
||||||
|
|
||||||
static const struct rpc_authops generic_auth_ops = {
|
static const struct rpc_authops generic_auth_ops = {
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
.au_name = "Generic",
|
.au_name = "Generic",
|
||||||
|
@ -183,7 +173,6 @@ static const struct rpc_authops generic_auth_ops = {
|
||||||
static struct rpc_auth generic_auth = {
|
static struct rpc_auth generic_auth = {
|
||||||
.au_ops = &generic_auth_ops,
|
.au_ops = &generic_auth_ops,
|
||||||
.au_count = ATOMIC_INIT(0),
|
.au_count = ATOMIC_INIT(0),
|
||||||
.au_credcache = &generic_cred_cache,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct rpc_credops generic_credops = {
|
static const struct rpc_credops generic_credops = {
|
||||||
|
|
|
@ -373,7 +373,7 @@ gss_handle_downcall_result(struct gss_cred *gss_cred, struct gss_upcall_msg *gss
|
||||||
static void
|
static void
|
||||||
gss_upcall_callback(struct rpc_task *task)
|
gss_upcall_callback(struct rpc_task *task)
|
||||||
{
|
{
|
||||||
struct gss_cred *gss_cred = container_of(task->tk_msg.rpc_cred,
|
struct gss_cred *gss_cred = container_of(task->tk_rqstp->rq_cred,
|
||||||
struct gss_cred, gc_base);
|
struct gss_cred, gc_base);
|
||||||
struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall;
|
struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall;
|
||||||
struct inode *inode = &gss_msg->inode->vfs_inode;
|
struct inode *inode = &gss_msg->inode->vfs_inode;
|
||||||
|
@ -502,7 +502,7 @@ static void warn_gssd(void)
|
||||||
static inline int
|
static inline int
|
||||||
gss_refresh_upcall(struct rpc_task *task)
|
gss_refresh_upcall(struct rpc_task *task)
|
||||||
{
|
{
|
||||||
struct rpc_cred *cred = task->tk_msg.rpc_cred;
|
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
|
||||||
struct gss_auth *gss_auth = container_of(cred->cr_auth,
|
struct gss_auth *gss_auth = container_of(cred->cr_auth,
|
||||||
struct gss_auth, rpc_auth);
|
struct gss_auth, rpc_auth);
|
||||||
struct gss_cred *gss_cred = container_of(cred,
|
struct gss_cred *gss_cred = container_of(cred,
|
||||||
|
@ -928,6 +928,7 @@ gss_do_free_ctx(struct gss_cl_ctx *ctx)
|
||||||
{
|
{
|
||||||
dprintk("RPC: gss_free_ctx\n");
|
dprintk("RPC: gss_free_ctx\n");
|
||||||
|
|
||||||
|
gss_delete_sec_context(&ctx->gc_gss_ctx);
|
||||||
kfree(ctx->gc_wire_ctx.data);
|
kfree(ctx->gc_wire_ctx.data);
|
||||||
kfree(ctx);
|
kfree(ctx);
|
||||||
}
|
}
|
||||||
|
@ -942,13 +943,7 @@ gss_free_ctx_callback(struct rcu_head *head)
|
||||||
static void
|
static void
|
||||||
gss_free_ctx(struct gss_cl_ctx *ctx)
|
gss_free_ctx(struct gss_cl_ctx *ctx)
|
||||||
{
|
{
|
||||||
struct gss_ctx *gc_gss_ctx;
|
|
||||||
|
|
||||||
gc_gss_ctx = rcu_dereference(ctx->gc_gss_ctx);
|
|
||||||
rcu_assign_pointer(ctx->gc_gss_ctx, NULL);
|
|
||||||
call_rcu(&ctx->gc_rcu, gss_free_ctx_callback);
|
call_rcu(&ctx->gc_rcu, gss_free_ctx_callback);
|
||||||
if (gc_gss_ctx)
|
|
||||||
gss_delete_sec_context(&gc_gss_ctx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -1064,12 +1059,12 @@ out:
|
||||||
static __be32 *
|
static __be32 *
|
||||||
gss_marshal(struct rpc_task *task, __be32 *p)
|
gss_marshal(struct rpc_task *task, __be32 *p)
|
||||||
{
|
{
|
||||||
struct rpc_cred *cred = task->tk_msg.rpc_cred;
|
struct rpc_rqst *req = task->tk_rqstp;
|
||||||
|
struct rpc_cred *cred = req->rq_cred;
|
||||||
struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
|
struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
|
||||||
gc_base);
|
gc_base);
|
||||||
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
|
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
|
||||||
__be32 *cred_len;
|
__be32 *cred_len;
|
||||||
struct rpc_rqst *req = task->tk_rqstp;
|
|
||||||
u32 maj_stat = 0;
|
u32 maj_stat = 0;
|
||||||
struct xdr_netobj mic;
|
struct xdr_netobj mic;
|
||||||
struct kvec iov;
|
struct kvec iov;
|
||||||
|
@ -1119,7 +1114,7 @@ out_put_ctx:
|
||||||
|
|
||||||
static int gss_renew_cred(struct rpc_task *task)
|
static int gss_renew_cred(struct rpc_task *task)
|
||||||
{
|
{
|
||||||
struct rpc_cred *oldcred = task->tk_msg.rpc_cred;
|
struct rpc_cred *oldcred = task->tk_rqstp->rq_cred;
|
||||||
struct gss_cred *gss_cred = container_of(oldcred,
|
struct gss_cred *gss_cred = container_of(oldcred,
|
||||||
struct gss_cred,
|
struct gss_cred,
|
||||||
gc_base);
|
gc_base);
|
||||||
|
@ -1133,7 +1128,7 @@ static int gss_renew_cred(struct rpc_task *task)
|
||||||
new = gss_lookup_cred(auth, &acred, RPCAUTH_LOOKUP_NEW);
|
new = gss_lookup_cred(auth, &acred, RPCAUTH_LOOKUP_NEW);
|
||||||
if (IS_ERR(new))
|
if (IS_ERR(new))
|
||||||
return PTR_ERR(new);
|
return PTR_ERR(new);
|
||||||
task->tk_msg.rpc_cred = new;
|
task->tk_rqstp->rq_cred = new;
|
||||||
put_rpccred(oldcred);
|
put_rpccred(oldcred);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1161,7 +1156,7 @@ static int gss_cred_is_negative_entry(struct rpc_cred *cred)
|
||||||
static int
|
static int
|
||||||
gss_refresh(struct rpc_task *task)
|
gss_refresh(struct rpc_task *task)
|
||||||
{
|
{
|
||||||
struct rpc_cred *cred = task->tk_msg.rpc_cred;
|
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (gss_cred_is_negative_entry(cred))
|
if (gss_cred_is_negative_entry(cred))
|
||||||
|
@ -1172,7 +1167,7 @@ gss_refresh(struct rpc_task *task)
|
||||||
ret = gss_renew_cred(task);
|
ret = gss_renew_cred(task);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out;
|
||||||
cred = task->tk_msg.rpc_cred;
|
cred = task->tk_rqstp->rq_cred;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
|
if (test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
|
||||||
|
@ -1191,7 +1186,7 @@ gss_refresh_null(struct rpc_task *task)
|
||||||
static __be32 *
|
static __be32 *
|
||||||
gss_validate(struct rpc_task *task, __be32 *p)
|
gss_validate(struct rpc_task *task, __be32 *p)
|
||||||
{
|
{
|
||||||
struct rpc_cred *cred = task->tk_msg.rpc_cred;
|
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
|
||||||
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
|
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
|
||||||
__be32 seq;
|
__be32 seq;
|
||||||
struct kvec iov;
|
struct kvec iov;
|
||||||
|
@ -1400,7 +1395,7 @@ static int
|
||||||
gss_wrap_req(struct rpc_task *task,
|
gss_wrap_req(struct rpc_task *task,
|
||||||
kxdrproc_t encode, void *rqstp, __be32 *p, void *obj)
|
kxdrproc_t encode, void *rqstp, __be32 *p, void *obj)
|
||||||
{
|
{
|
||||||
struct rpc_cred *cred = task->tk_msg.rpc_cred;
|
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
|
||||||
struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
|
struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
|
||||||
gc_base);
|
gc_base);
|
||||||
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
|
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
|
||||||
|
@ -1503,7 +1498,7 @@ static int
|
||||||
gss_unwrap_resp(struct rpc_task *task,
|
gss_unwrap_resp(struct rpc_task *task,
|
||||||
kxdrproc_t decode, void *rqstp, __be32 *p, void *obj)
|
kxdrproc_t decode, void *rqstp, __be32 *p, void *obj)
|
||||||
{
|
{
|
||||||
struct rpc_cred *cred = task->tk_msg.rpc_cred;
|
struct rpc_cred *cred = task->tk_rqstp->rq_cred;
|
||||||
struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
|
struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
|
||||||
gc_base);
|
gc_base);
|
||||||
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
|
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
|
||||||
|
|
|
@ -75,7 +75,7 @@ nul_marshal(struct rpc_task *task, __be32 *p)
|
||||||
static int
|
static int
|
||||||
nul_refresh(struct rpc_task *task)
|
nul_refresh(struct rpc_task *task)
|
||||||
{
|
{
|
||||||
set_bit(RPCAUTH_CRED_UPTODATE, &task->tk_msg.rpc_cred->cr_flags);
|
set_bit(RPCAUTH_CRED_UPTODATE, &task->tk_rqstp->rq_cred->cr_flags);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -29,7 +29,6 @@ struct unx_cred {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static struct rpc_auth unix_auth;
|
static struct rpc_auth unix_auth;
|
||||||
static struct rpc_cred_cache unix_cred_cache;
|
|
||||||
static const struct rpc_credops unix_credops;
|
static const struct rpc_credops unix_credops;
|
||||||
|
|
||||||
static struct rpc_auth *
|
static struct rpc_auth *
|
||||||
|
@ -141,7 +140,7 @@ static __be32 *
|
||||||
unx_marshal(struct rpc_task *task, __be32 *p)
|
unx_marshal(struct rpc_task *task, __be32 *p)
|
||||||
{
|
{
|
||||||
struct rpc_clnt *clnt = task->tk_client;
|
struct rpc_clnt *clnt = task->tk_client;
|
||||||
struct unx_cred *cred = container_of(task->tk_msg.rpc_cred, struct unx_cred, uc_base);
|
struct unx_cred *cred = container_of(task->tk_rqstp->rq_cred, struct unx_cred, uc_base);
|
||||||
__be32 *base, *hold;
|
__be32 *base, *hold;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -174,7 +173,7 @@ unx_marshal(struct rpc_task *task, __be32 *p)
|
||||||
static int
|
static int
|
||||||
unx_refresh(struct rpc_task *task)
|
unx_refresh(struct rpc_task *task)
|
||||||
{
|
{
|
||||||
set_bit(RPCAUTH_CRED_UPTODATE, &task->tk_msg.rpc_cred->cr_flags);
|
set_bit(RPCAUTH_CRED_UPTODATE, &task->tk_rqstp->rq_cred->cr_flags);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -197,15 +196,20 @@ unx_validate(struct rpc_task *task, __be32 *p)
|
||||||
printk("RPC: giant verf size: %u\n", size);
|
printk("RPC: giant verf size: %u\n", size);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
task->tk_msg.rpc_cred->cr_auth->au_rslack = (size >> 2) + 2;
|
task->tk_rqstp->rq_cred->cr_auth->au_rslack = (size >> 2) + 2;
|
||||||
p += (size >> 2);
|
p += (size >> 2);
|
||||||
|
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init rpc_init_authunix(void)
|
int __init rpc_init_authunix(void)
|
||||||
{
|
{
|
||||||
spin_lock_init(&unix_cred_cache.lock);
|
return rpcauth_init_credcache(&unix_auth);
|
||||||
|
}
|
||||||
|
|
||||||
|
void rpc_destroy_authunix(void)
|
||||||
|
{
|
||||||
|
rpcauth_destroy_credcache(&unix_auth);
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct rpc_authops authunix_ops = {
|
const struct rpc_authops authunix_ops = {
|
||||||
|
@ -218,10 +222,6 @@ const struct rpc_authops authunix_ops = {
|
||||||
.crcreate = unx_create_cred,
|
.crcreate = unx_create_cred,
|
||||||
};
|
};
|
||||||
|
|
||||||
static
|
|
||||||
struct rpc_cred_cache unix_cred_cache = {
|
|
||||||
};
|
|
||||||
|
|
||||||
static
|
static
|
||||||
struct rpc_auth unix_auth = {
|
struct rpc_auth unix_auth = {
|
||||||
.au_cslack = UNX_WRITESLACK,
|
.au_cslack = UNX_WRITESLACK,
|
||||||
|
@ -229,7 +229,6 @@ struct rpc_auth unix_auth = {
|
||||||
.au_ops = &authunix_ops,
|
.au_ops = &authunix_ops,
|
||||||
.au_flavor = RPC_AUTH_UNIX,
|
.au_flavor = RPC_AUTH_UNIX,
|
||||||
.au_count = ATOMIC_INIT(0),
|
.au_count = ATOMIC_INIT(0),
|
||||||
.au_credcache = &unix_cred_cache,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static
|
static
|
||||||
|
|
|
@ -413,6 +413,35 @@ out_no_clnt:
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(rpc_clone_client);
|
EXPORT_SYMBOL_GPL(rpc_clone_client);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Kill all tasks for the given client.
|
||||||
|
* XXX: kill their descendants as well?
|
||||||
|
*/
|
||||||
|
void rpc_killall_tasks(struct rpc_clnt *clnt)
|
||||||
|
{
|
||||||
|
struct rpc_task *rovr;
|
||||||
|
|
||||||
|
|
||||||
|
if (list_empty(&clnt->cl_tasks))
|
||||||
|
return;
|
||||||
|
dprintk("RPC: killing all tasks for client %p\n", clnt);
|
||||||
|
/*
|
||||||
|
* Spin lock all_tasks to prevent changes...
|
||||||
|
*/
|
||||||
|
spin_lock(&clnt->cl_lock);
|
||||||
|
list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) {
|
||||||
|
if (!RPC_IS_ACTIVATED(rovr))
|
||||||
|
continue;
|
||||||
|
if (!(rovr->tk_flags & RPC_TASK_KILLED)) {
|
||||||
|
rovr->tk_flags |= RPC_TASK_KILLED;
|
||||||
|
rpc_exit(rovr, -EIO);
|
||||||
|
rpc_wake_up_queued_task(rovr->tk_waitqueue, rovr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
spin_unlock(&clnt->cl_lock);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(rpc_killall_tasks);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Properly shut down an RPC client, terminating all outstanding
|
* Properly shut down an RPC client, terminating all outstanding
|
||||||
* requests.
|
* requests.
|
||||||
|
@ -538,6 +567,49 @@ out:
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(rpc_bind_new_program);
|
EXPORT_SYMBOL_GPL(rpc_bind_new_program);
|
||||||
|
|
||||||
|
void rpc_task_release_client(struct rpc_task *task)
|
||||||
|
{
|
||||||
|
struct rpc_clnt *clnt = task->tk_client;
|
||||||
|
|
||||||
|
if (clnt != NULL) {
|
||||||
|
/* Remove from client task list */
|
||||||
|
spin_lock(&clnt->cl_lock);
|
||||||
|
list_del(&task->tk_task);
|
||||||
|
spin_unlock(&clnt->cl_lock);
|
||||||
|
task->tk_client = NULL;
|
||||||
|
|
||||||
|
rpc_release_client(clnt);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static
|
||||||
|
void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
|
||||||
|
{
|
||||||
|
if (clnt != NULL) {
|
||||||
|
rpc_task_release_client(task);
|
||||||
|
task->tk_client = clnt;
|
||||||
|
kref_get(&clnt->cl_kref);
|
||||||
|
if (clnt->cl_softrtry)
|
||||||
|
task->tk_flags |= RPC_TASK_SOFT;
|
||||||
|
/* Add to the client's list of all tasks */
|
||||||
|
spin_lock(&clnt->cl_lock);
|
||||||
|
list_add_tail(&task->tk_task, &clnt->cl_tasks);
|
||||||
|
spin_unlock(&clnt->cl_lock);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg)
|
||||||
|
{
|
||||||
|
if (msg != NULL) {
|
||||||
|
task->tk_msg.rpc_proc = msg->rpc_proc;
|
||||||
|
task->tk_msg.rpc_argp = msg->rpc_argp;
|
||||||
|
task->tk_msg.rpc_resp = msg->rpc_resp;
|
||||||
|
if (msg->rpc_cred != NULL)
|
||||||
|
task->tk_msg.rpc_cred = get_rpccred(msg->rpc_cred);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Default callback for async RPC calls
|
* Default callback for async RPC calls
|
||||||
*/
|
*/
|
||||||
|
@ -562,6 +634,18 @@ struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
|
||||||
if (IS_ERR(task))
|
if (IS_ERR(task))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
rpc_task_set_client(task, task_setup_data->rpc_client);
|
||||||
|
rpc_task_set_rpc_message(task, task_setup_data->rpc_message);
|
||||||
|
|
||||||
|
if (task->tk_status != 0) {
|
||||||
|
int ret = task->tk_status;
|
||||||
|
rpc_put_task(task);
|
||||||
|
return ERR_PTR(ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (task->tk_action == NULL)
|
||||||
|
rpc_call_start(task);
|
||||||
|
|
||||||
atomic_inc(&task->tk_count);
|
atomic_inc(&task->tk_count);
|
||||||
rpc_execute(task);
|
rpc_execute(task);
|
||||||
out:
|
out:
|
||||||
|
@ -756,12 +840,13 @@ EXPORT_SYMBOL_GPL(rpc_force_rebind);
|
||||||
* Restart an (async) RPC call from the call_prepare state.
|
* Restart an (async) RPC call from the call_prepare state.
|
||||||
* Usually called from within the exit handler.
|
* Usually called from within the exit handler.
|
||||||
*/
|
*/
|
||||||
void
|
int
|
||||||
rpc_restart_call_prepare(struct rpc_task *task)
|
rpc_restart_call_prepare(struct rpc_task *task)
|
||||||
{
|
{
|
||||||
if (RPC_ASSASSINATED(task))
|
if (RPC_ASSASSINATED(task))
|
||||||
return;
|
return 0;
|
||||||
task->tk_action = rpc_prepare_task;
|
task->tk_action = rpc_prepare_task;
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(rpc_restart_call_prepare);
|
EXPORT_SYMBOL_GPL(rpc_restart_call_prepare);
|
||||||
|
|
||||||
|
@ -769,13 +854,13 @@ EXPORT_SYMBOL_GPL(rpc_restart_call_prepare);
|
||||||
* Restart an (async) RPC call. Usually called from within the
|
* Restart an (async) RPC call. Usually called from within the
|
||||||
* exit handler.
|
* exit handler.
|
||||||
*/
|
*/
|
||||||
void
|
int
|
||||||
rpc_restart_call(struct rpc_task *task)
|
rpc_restart_call(struct rpc_task *task)
|
||||||
{
|
{
|
||||||
if (RPC_ASSASSINATED(task))
|
if (RPC_ASSASSINATED(task))
|
||||||
return;
|
return 0;
|
||||||
|
|
||||||
task->tk_action = call_start;
|
task->tk_action = call_start;
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(rpc_restart_call);
|
EXPORT_SYMBOL_GPL(rpc_restart_call);
|
||||||
|
|
||||||
|
@ -824,11 +909,6 @@ call_reserve(struct rpc_task *task)
|
||||||
{
|
{
|
||||||
dprint_status(task);
|
dprint_status(task);
|
||||||
|
|
||||||
if (!rpcauth_uptodatecred(task)) {
|
|
||||||
task->tk_action = call_refresh;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
task->tk_status = 0;
|
task->tk_status = 0;
|
||||||
task->tk_action = call_reserveresult;
|
task->tk_action = call_reserveresult;
|
||||||
xprt_reserve(task);
|
xprt_reserve(task);
|
||||||
|
@ -892,7 +972,7 @@ call_reserveresult(struct rpc_task *task)
|
||||||
static void
|
static void
|
||||||
call_allocate(struct rpc_task *task)
|
call_allocate(struct rpc_task *task)
|
||||||
{
|
{
|
||||||
unsigned int slack = task->tk_msg.rpc_cred->cr_auth->au_cslack;
|
unsigned int slack = task->tk_client->cl_auth->au_cslack;
|
||||||
struct rpc_rqst *req = task->tk_rqstp;
|
struct rpc_rqst *req = task->tk_rqstp;
|
||||||
struct rpc_xprt *xprt = task->tk_xprt;
|
struct rpc_xprt *xprt = task->tk_xprt;
|
||||||
struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
|
struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
|
||||||
|
@ -900,7 +980,7 @@ call_allocate(struct rpc_task *task)
|
||||||
dprint_status(task);
|
dprint_status(task);
|
||||||
|
|
||||||
task->tk_status = 0;
|
task->tk_status = 0;
|
||||||
task->tk_action = call_bind;
|
task->tk_action = call_refresh;
|
||||||
|
|
||||||
if (req->rq_buffer)
|
if (req->rq_buffer)
|
||||||
return;
|
return;
|
||||||
|
@ -937,6 +1017,47 @@ call_allocate(struct rpc_task *task)
|
||||||
rpc_exit(task, -ERESTARTSYS);
|
rpc_exit(task, -ERESTARTSYS);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* 2a. Bind and/or refresh the credentials
|
||||||
|
*/
|
||||||
|
static void
|
||||||
|
call_refresh(struct rpc_task *task)
|
||||||
|
{
|
||||||
|
dprint_status(task);
|
||||||
|
|
||||||
|
task->tk_action = call_refreshresult;
|
||||||
|
task->tk_status = 0;
|
||||||
|
task->tk_client->cl_stats->rpcauthrefresh++;
|
||||||
|
rpcauth_refreshcred(task);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* 2b. Process the results of a credential refresh
|
||||||
|
*/
|
||||||
|
static void
|
||||||
|
call_refreshresult(struct rpc_task *task)
|
||||||
|
{
|
||||||
|
int status = task->tk_status;
|
||||||
|
|
||||||
|
dprint_status(task);
|
||||||
|
|
||||||
|
task->tk_status = 0;
|
||||||
|
task->tk_action = call_bind;
|
||||||
|
if (status >= 0 && rpcauth_uptodatecred(task))
|
||||||
|
return;
|
||||||
|
switch (status) {
|
||||||
|
case -EACCES:
|
||||||
|
rpc_exit(task, -EACCES);
|
||||||
|
return;
|
||||||
|
case -ENOMEM:
|
||||||
|
rpc_exit(task, -ENOMEM);
|
||||||
|
return;
|
||||||
|
case -ETIMEDOUT:
|
||||||
|
rpc_delay(task, 3*HZ);
|
||||||
|
}
|
||||||
|
task->tk_action = call_refresh;
|
||||||
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
rpc_task_need_encode(struct rpc_task *task)
|
rpc_task_need_encode(struct rpc_task *task)
|
||||||
{
|
{
|
||||||
|
@ -1472,43 +1593,6 @@ out_retry:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* 8. Refresh the credentials if rejected by the server
|
|
||||||
*/
|
|
||||||
static void
|
|
||||||
call_refresh(struct rpc_task *task)
|
|
||||||
{
|
|
||||||
dprint_status(task);
|
|
||||||
|
|
||||||
task->tk_action = call_refreshresult;
|
|
||||||
task->tk_status = 0;
|
|
||||||
task->tk_client->cl_stats->rpcauthrefresh++;
|
|
||||||
rpcauth_refreshcred(task);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* 8a. Process the results of a credential refresh
|
|
||||||
*/
|
|
||||||
static void
|
|
||||||
call_refreshresult(struct rpc_task *task)
|
|
||||||
{
|
|
||||||
int status = task->tk_status;
|
|
||||||
|
|
||||||
dprint_status(task);
|
|
||||||
|
|
||||||
task->tk_status = 0;
|
|
||||||
task->tk_action = call_reserve;
|
|
||||||
if (status >= 0 && rpcauth_uptodatecred(task))
|
|
||||||
return;
|
|
||||||
if (status == -EACCES) {
|
|
||||||
rpc_exit(task, -EACCES);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
task->tk_action = call_refresh;
|
|
||||||
if (status != -ETIMEDOUT)
|
|
||||||
rpc_delay(task, 3*HZ);
|
|
||||||
}
|
|
||||||
|
|
||||||
static __be32 *
|
static __be32 *
|
||||||
rpc_encode_header(struct rpc_task *task)
|
rpc_encode_header(struct rpc_task *task)
|
||||||
{
|
{
|
||||||
|
|
|
@ -246,17 +246,8 @@ static inline void rpc_task_set_debuginfo(struct rpc_task *task)
|
||||||
|
|
||||||
static void rpc_set_active(struct rpc_task *task)
|
static void rpc_set_active(struct rpc_task *task)
|
||||||
{
|
{
|
||||||
struct rpc_clnt *clnt;
|
|
||||||
if (test_and_set_bit(RPC_TASK_ACTIVE, &task->tk_runstate) != 0)
|
|
||||||
return;
|
|
||||||
rpc_task_set_debuginfo(task);
|
rpc_task_set_debuginfo(task);
|
||||||
/* Add to global list of all tasks */
|
set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
|
||||||
clnt = task->tk_client;
|
|
||||||
if (clnt != NULL) {
|
|
||||||
spin_lock(&clnt->cl_lock);
|
|
||||||
list_add_tail(&task->tk_task, &clnt->cl_tasks);
|
|
||||||
spin_unlock(&clnt->cl_lock);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -319,11 +310,6 @@ static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
|
||||||
dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
|
dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
|
||||||
task->tk_pid, rpc_qname(q), jiffies);
|
task->tk_pid, rpc_qname(q), jiffies);
|
||||||
|
|
||||||
if (!RPC_IS_ASYNC(task) && !RPC_IS_ACTIVATED(task)) {
|
|
||||||
printk(KERN_ERR "RPC: Inactive synchronous task put to sleep!\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
__rpc_add_wait_queue(q, task);
|
__rpc_add_wait_queue(q, task);
|
||||||
|
|
||||||
BUG_ON(task->tk_callback != NULL);
|
BUG_ON(task->tk_callback != NULL);
|
||||||
|
@ -334,8 +320,8 @@ static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
|
||||||
void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
|
void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
|
||||||
rpc_action action)
|
rpc_action action)
|
||||||
{
|
{
|
||||||
/* Mark the task as being activated if so needed */
|
/* We shouldn't ever put an inactive task to sleep */
|
||||||
rpc_set_active(task);
|
BUG_ON(!RPC_IS_ACTIVATED(task));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Protect the queue operations.
|
* Protect the queue operations.
|
||||||
|
@ -405,14 +391,6 @@ void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
|
EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
|
||||||
|
|
||||||
/*
|
|
||||||
* Wake up the specified task
|
|
||||||
*/
|
|
||||||
static void rpc_wake_up_task(struct rpc_task *task)
|
|
||||||
{
|
|
||||||
rpc_wake_up_queued_task(task->tk_waitqueue, task);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Wake up the next task on a priority queue.
|
* Wake up the next task on a priority queue.
|
||||||
*/
|
*/
|
||||||
|
@ -600,7 +578,15 @@ void rpc_exit_task(struct rpc_task *task)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(rpc_exit_task);
|
|
||||||
|
void rpc_exit(struct rpc_task *task, int status)
|
||||||
|
{
|
||||||
|
task->tk_status = status;
|
||||||
|
task->tk_action = rpc_exit_task;
|
||||||
|
if (RPC_IS_QUEUED(task))
|
||||||
|
rpc_wake_up_queued_task(task->tk_waitqueue, task);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(rpc_exit);
|
||||||
|
|
||||||
void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
|
void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
|
||||||
{
|
{
|
||||||
|
@ -690,7 +676,6 @@ static void __rpc_execute(struct rpc_task *task)
|
||||||
dprintk("RPC: %5u got signal\n", task->tk_pid);
|
dprintk("RPC: %5u got signal\n", task->tk_pid);
|
||||||
task->tk_flags |= RPC_TASK_KILLED;
|
task->tk_flags |= RPC_TASK_KILLED;
|
||||||
rpc_exit(task, -ERESTARTSYS);
|
rpc_exit(task, -ERESTARTSYS);
|
||||||
rpc_wake_up_task(task);
|
|
||||||
}
|
}
|
||||||
rpc_set_running(task);
|
rpc_set_running(task);
|
||||||
dprintk("RPC: %5u sync task resuming\n", task->tk_pid);
|
dprintk("RPC: %5u sync task resuming\n", task->tk_pid);
|
||||||
|
@ -714,8 +699,9 @@ static void __rpc_execute(struct rpc_task *task)
|
||||||
void rpc_execute(struct rpc_task *task)
|
void rpc_execute(struct rpc_task *task)
|
||||||
{
|
{
|
||||||
rpc_set_active(task);
|
rpc_set_active(task);
|
||||||
rpc_set_running(task);
|
rpc_make_runnable(task);
|
||||||
__rpc_execute(task);
|
if (!RPC_IS_ASYNC(task))
|
||||||
|
__rpc_execute(task);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rpc_async_schedule(struct work_struct *work)
|
static void rpc_async_schedule(struct work_struct *work)
|
||||||
|
@ -808,26 +794,9 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
|
||||||
/* Initialize workqueue for async tasks */
|
/* Initialize workqueue for async tasks */
|
||||||
task->tk_workqueue = task_setup_data->workqueue;
|
task->tk_workqueue = task_setup_data->workqueue;
|
||||||
|
|
||||||
task->tk_client = task_setup_data->rpc_client;
|
|
||||||
if (task->tk_client != NULL) {
|
|
||||||
kref_get(&task->tk_client->cl_kref);
|
|
||||||
if (task->tk_client->cl_softrtry)
|
|
||||||
task->tk_flags |= RPC_TASK_SOFT;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (task->tk_ops->rpc_call_prepare != NULL)
|
if (task->tk_ops->rpc_call_prepare != NULL)
|
||||||
task->tk_action = rpc_prepare_task;
|
task->tk_action = rpc_prepare_task;
|
||||||
|
|
||||||
if (task_setup_data->rpc_message != NULL) {
|
|
||||||
task->tk_msg.rpc_proc = task_setup_data->rpc_message->rpc_proc;
|
|
||||||
task->tk_msg.rpc_argp = task_setup_data->rpc_message->rpc_argp;
|
|
||||||
task->tk_msg.rpc_resp = task_setup_data->rpc_message->rpc_resp;
|
|
||||||
/* Bind the user cred */
|
|
||||||
rpcauth_bindcred(task, task_setup_data->rpc_message->rpc_cred, task_setup_data->flags);
|
|
||||||
if (task->tk_action == NULL)
|
|
||||||
rpc_call_start(task);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* starting timestamp */
|
/* starting timestamp */
|
||||||
task->tk_start = ktime_get();
|
task->tk_start = ktime_get();
|
||||||
|
|
||||||
|
@ -896,11 +865,8 @@ void rpc_put_task(struct rpc_task *task)
|
||||||
if (task->tk_rqstp)
|
if (task->tk_rqstp)
|
||||||
xprt_release(task);
|
xprt_release(task);
|
||||||
if (task->tk_msg.rpc_cred)
|
if (task->tk_msg.rpc_cred)
|
||||||
rpcauth_unbindcred(task);
|
put_rpccred(task->tk_msg.rpc_cred);
|
||||||
if (task->tk_client) {
|
rpc_task_release_client(task);
|
||||||
rpc_release_client(task->tk_client);
|
|
||||||
task->tk_client = NULL;
|
|
||||||
}
|
|
||||||
if (task->tk_workqueue != NULL) {
|
if (task->tk_workqueue != NULL) {
|
||||||
INIT_WORK(&task->u.tk_work, rpc_async_release);
|
INIT_WORK(&task->u.tk_work, rpc_async_release);
|
||||||
queue_work(task->tk_workqueue, &task->u.tk_work);
|
queue_work(task->tk_workqueue, &task->u.tk_work);
|
||||||
|
@ -913,13 +879,6 @@ static void rpc_release_task(struct rpc_task *task)
|
||||||
{
|
{
|
||||||
dprintk("RPC: %5u release task\n", task->tk_pid);
|
dprintk("RPC: %5u release task\n", task->tk_pid);
|
||||||
|
|
||||||
if (!list_empty(&task->tk_task)) {
|
|
||||||
struct rpc_clnt *clnt = task->tk_client;
|
|
||||||
/* Remove from client task list */
|
|
||||||
spin_lock(&clnt->cl_lock);
|
|
||||||
list_del(&task->tk_task);
|
|
||||||
spin_unlock(&clnt->cl_lock);
|
|
||||||
}
|
|
||||||
BUG_ON (RPC_IS_QUEUED(task));
|
BUG_ON (RPC_IS_QUEUED(task));
|
||||||
|
|
||||||
/* Wake up anyone who is waiting for task completion */
|
/* Wake up anyone who is waiting for task completion */
|
||||||
|
@ -928,35 +887,6 @@ static void rpc_release_task(struct rpc_task *task)
|
||||||
rpc_put_task(task);
|
rpc_put_task(task);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Kill all tasks for the given client.
|
|
||||||
* XXX: kill their descendants as well?
|
|
||||||
*/
|
|
||||||
void rpc_killall_tasks(struct rpc_clnt *clnt)
|
|
||||||
{
|
|
||||||
struct rpc_task *rovr;
|
|
||||||
|
|
||||||
|
|
||||||
if (list_empty(&clnt->cl_tasks))
|
|
||||||
return;
|
|
||||||
dprintk("RPC: killing all tasks for client %p\n", clnt);
|
|
||||||
/*
|
|
||||||
* Spin lock all_tasks to prevent changes...
|
|
||||||
*/
|
|
||||||
spin_lock(&clnt->cl_lock);
|
|
||||||
list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) {
|
|
||||||
if (! RPC_IS_ACTIVATED(rovr))
|
|
||||||
continue;
|
|
||||||
if (!(rovr->tk_flags & RPC_TASK_KILLED)) {
|
|
||||||
rovr->tk_flags |= RPC_TASK_KILLED;
|
|
||||||
rpc_exit(rovr, -EIO);
|
|
||||||
rpc_wake_up_task(rovr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
spin_unlock(&clnt->cl_lock);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(rpc_killall_tasks);
|
|
||||||
|
|
||||||
int rpciod_up(void)
|
int rpciod_up(void)
|
||||||
{
|
{
|
||||||
return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
|
return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
|
||||||
|
|
|
@ -33,10 +33,11 @@ init_sunrpc(void)
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
err = rpc_init_mempool();
|
err = rpc_init_mempool();
|
||||||
if (err) {
|
if (err)
|
||||||
unregister_rpc_pipefs();
|
goto out2;
|
||||||
goto out;
|
err = rpcauth_init_module();
|
||||||
}
|
if (err)
|
||||||
|
goto out3;
|
||||||
#ifdef RPC_DEBUG
|
#ifdef RPC_DEBUG
|
||||||
rpc_register_sysctl();
|
rpc_register_sysctl();
|
||||||
#endif
|
#endif
|
||||||
|
@ -47,7 +48,11 @@ init_sunrpc(void)
|
||||||
cache_register(&unix_gid_cache);
|
cache_register(&unix_gid_cache);
|
||||||
svc_init_xprt_sock(); /* svc sock transport */
|
svc_init_xprt_sock(); /* svc sock transport */
|
||||||
init_socket_xprt(); /* clnt sock transport */
|
init_socket_xprt(); /* clnt sock transport */
|
||||||
rpcauth_init_module();
|
return 0;
|
||||||
|
out3:
|
||||||
|
rpc_destroy_mempool();
|
||||||
|
out2:
|
||||||
|
unregister_rpc_pipefs();
|
||||||
out:
|
out:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1032,6 +1032,8 @@ void xprt_release(struct rpc_task *task)
|
||||||
spin_unlock_bh(&xprt->transport_lock);
|
spin_unlock_bh(&xprt->transport_lock);
|
||||||
if (req->rq_buffer)
|
if (req->rq_buffer)
|
||||||
xprt->ops->buf_free(req->rq_buffer);
|
xprt->ops->buf_free(req->rq_buffer);
|
||||||
|
if (req->rq_cred != NULL)
|
||||||
|
put_rpccred(req->rq_cred);
|
||||||
task->tk_rqstp = NULL;
|
task->tk_rqstp = NULL;
|
||||||
if (req->rq_release_snd_buf)
|
if (req->rq_release_snd_buf)
|
||||||
req->rq_release_snd_buf(req);
|
req->rq_release_snd_buf(req);
|
||||||
|
@ -1129,6 +1131,7 @@ static void xprt_destroy(struct kref *kref)
|
||||||
rpc_destroy_wait_queue(&xprt->sending);
|
rpc_destroy_wait_queue(&xprt->sending);
|
||||||
rpc_destroy_wait_queue(&xprt->resend);
|
rpc_destroy_wait_queue(&xprt->resend);
|
||||||
rpc_destroy_wait_queue(&xprt->backlog);
|
rpc_destroy_wait_queue(&xprt->backlog);
|
||||||
|
cancel_work_sync(&xprt->task_cleanup);
|
||||||
/*
|
/*
|
||||||
* Tear down transport state and free the rpc_xprt
|
* Tear down transport state and free the rpc_xprt
|
||||||
*/
|
*/
|
||||||
|
|
Loading…
Reference in New Issue