RPC: Clean up RPC task structure
Shrink the RPC task structure. Instead of storing separate pointers for task->tk_exit and task->tk_release, put them in a structure. Also pass the user data pointer as a parameter instead of passing it via task->tk_calldata. This enables us to nest callbacks. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
This commit is contained in:
parent
abbcf28f23
commit
963d8fe533
|
@ -26,11 +26,12 @@
|
|||
static int nlmclnt_test(struct nlm_rqst *, struct file_lock *);
|
||||
static int nlmclnt_lock(struct nlm_rqst *, struct file_lock *);
|
||||
static int nlmclnt_unlock(struct nlm_rqst *, struct file_lock *);
|
||||
static void nlmclnt_unlock_callback(struct rpc_task *);
|
||||
static void nlmclnt_cancel_callback(struct rpc_task *);
|
||||
static int nlm_stat_to_errno(u32 stat);
|
||||
static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host);
|
||||
|
||||
static const struct rpc_call_ops nlmclnt_unlock_ops;
|
||||
static const struct rpc_call_ops nlmclnt_cancel_ops;
|
||||
|
||||
/*
|
||||
* Cookie counter for NLM requests
|
||||
*/
|
||||
|
@ -399,8 +400,7 @@ in_grace_period:
|
|||
/*
|
||||
* Generic NLM call, async version.
|
||||
*/
|
||||
int
|
||||
nlmsvc_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback)
|
||||
int nlmsvc_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
|
||||
{
|
||||
struct nlm_host *host = req->a_host;
|
||||
struct rpc_clnt *clnt;
|
||||
|
@ -419,13 +419,12 @@ nlmsvc_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback)
|
|||
msg.rpc_proc = &clnt->cl_procinfo[proc];
|
||||
|
||||
/* bootstrap and kick off the async RPC call */
|
||||
status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, callback, req);
|
||||
status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, tk_ops, req);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static int
|
||||
nlmclnt_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback)
|
||||
static int nlmclnt_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
|
||||
{
|
||||
struct nlm_host *host = req->a_host;
|
||||
struct rpc_clnt *clnt;
|
||||
|
@ -448,7 +447,7 @@ nlmclnt_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback)
|
|||
/* Increment host refcount */
|
||||
nlm_get_host(host);
|
||||
/* bootstrap and kick off the async RPC call */
|
||||
status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, callback, req);
|
||||
status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, tk_ops, req);
|
||||
if (status < 0)
|
||||
nlm_release_host(host);
|
||||
return status;
|
||||
|
@ -664,7 +663,7 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
|
|||
|
||||
if (req->a_flags & RPC_TASK_ASYNC) {
|
||||
status = nlmclnt_async_call(req, NLMPROC_UNLOCK,
|
||||
nlmclnt_unlock_callback);
|
||||
&nlmclnt_unlock_ops);
|
||||
/* Hrmf... Do the unlock early since locks_remove_posix()
|
||||
* really expects us to free the lock synchronously */
|
||||
do_vfs_lock(fl);
|
||||
|
@ -692,10 +691,9 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
|
|||
return -ENOLCK;
|
||||
}
|
||||
|
||||
static void
|
||||
nlmclnt_unlock_callback(struct rpc_task *task)
|
||||
static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
|
||||
{
|
||||
struct nlm_rqst *req = (struct nlm_rqst *) task->tk_calldata;
|
||||
struct nlm_rqst *req = data;
|
||||
int status = req->a_res.status;
|
||||
|
||||
if (RPC_ASSASSINATED(task))
|
||||
|
@ -722,6 +720,10 @@ die:
|
|||
rpc_restart_call(task);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nlmclnt_unlock_ops = {
|
||||
.rpc_call_done = nlmclnt_unlock_callback,
|
||||
};
|
||||
|
||||
/*
|
||||
* Cancel a blocked lock request.
|
||||
* We always use an async RPC call for this in order not to hang a
|
||||
|
@ -750,8 +752,7 @@ nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl)
|
|||
|
||||
nlmclnt_setlockargs(req, fl);
|
||||
|
||||
status = nlmclnt_async_call(req, NLMPROC_CANCEL,
|
||||
nlmclnt_cancel_callback);
|
||||
status = nlmclnt_async_call(req, NLMPROC_CANCEL, &nlmclnt_cancel_ops);
|
||||
if (status < 0) {
|
||||
nlmclnt_release_lockargs(req);
|
||||
kfree(req);
|
||||
|
@ -765,10 +766,9 @@ nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl)
|
|||
return status;
|
||||
}
|
||||
|
||||
static void
|
||||
nlmclnt_cancel_callback(struct rpc_task *task)
|
||||
static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
|
||||
{
|
||||
struct nlm_rqst *req = (struct nlm_rqst *) task->tk_calldata;
|
||||
struct nlm_rqst *req = data;
|
||||
|
||||
if (RPC_ASSASSINATED(task))
|
||||
goto die;
|
||||
|
@ -807,6 +807,10 @@ retry_cancel:
|
|||
rpc_delay(task, 30 * HZ);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nlmclnt_cancel_ops = {
|
||||
.rpc_call_done = nlmclnt_cancel_callback,
|
||||
};
|
||||
|
||||
/*
|
||||
* Convert an NLM status code to a generic kernel errno
|
||||
*/
|
||||
|
|
|
@ -22,7 +22,8 @@
|
|||
#define NLMDBG_FACILITY NLMDBG_CLIENT
|
||||
|
||||
static u32 nlm4svc_callback(struct svc_rqst *, u32, struct nlm_res *);
|
||||
static void nlm4svc_callback_exit(struct rpc_task *);
|
||||
|
||||
static const struct rpc_call_ops nlm4svc_callback_ops;
|
||||
|
||||
/*
|
||||
* Obtain client and file from arguments
|
||||
|
@ -470,7 +471,6 @@ nlm4svc_proc_granted_res(struct svc_rqst *rqstp, struct nlm_res *argp,
|
|||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* This is the generic lockd callback for async RPC calls
|
||||
*/
|
||||
|
@ -494,7 +494,7 @@ nlm4svc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_res *resp)
|
|||
call->a_host = host;
|
||||
memcpy(&call->a_args, resp, sizeof(*resp));
|
||||
|
||||
if (nlmsvc_async_call(call, proc, nlm4svc_callback_exit) < 0)
|
||||
if (nlmsvc_async_call(call, proc, &nlm4svc_callback_ops) < 0)
|
||||
goto error;
|
||||
|
||||
return rpc_success;
|
||||
|
@ -504,10 +504,9 @@ nlm4svc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_res *resp)
|
|||
return rpc_system_err;
|
||||
}
|
||||
|
||||
static void
|
||||
nlm4svc_callback_exit(struct rpc_task *task)
|
||||
static void nlm4svc_callback_exit(struct rpc_task *task, void *data)
|
||||
{
|
||||
struct nlm_rqst *call = (struct nlm_rqst *) task->tk_calldata;
|
||||
struct nlm_rqst *call = data;
|
||||
|
||||
if (task->tk_status < 0) {
|
||||
dprintk("lockd: %4d callback failed (errno = %d)\n",
|
||||
|
@ -517,6 +516,10 @@ nlm4svc_callback_exit(struct rpc_task *task)
|
|||
kfree(call);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nlm4svc_callback_ops = {
|
||||
.rpc_call_done = nlm4svc_callback_exit,
|
||||
};
|
||||
|
||||
/*
|
||||
* NLM Server procedures.
|
||||
*/
|
||||
|
|
|
@ -41,7 +41,8 @@
|
|||
|
||||
static void nlmsvc_insert_block(struct nlm_block *block, unsigned long);
|
||||
static int nlmsvc_remove_block(struct nlm_block *block);
|
||||
static void nlmsvc_grant_callback(struct rpc_task *task);
|
||||
|
||||
static const struct rpc_call_ops nlmsvc_grant_ops;
|
||||
|
||||
/*
|
||||
* The list of blocked locks to retry
|
||||
|
@ -562,7 +563,7 @@ callback:
|
|||
/* Call the client */
|
||||
nlm_get_host(block->b_call.a_host);
|
||||
if (nlmsvc_async_call(&block->b_call, NLMPROC_GRANTED_MSG,
|
||||
nlmsvc_grant_callback) < 0)
|
||||
&nlmsvc_grant_ops) < 0)
|
||||
nlm_release_host(block->b_call.a_host);
|
||||
up(&file->f_sema);
|
||||
}
|
||||
|
@ -575,10 +576,9 @@ callback:
|
|||
* chain once more in order to have it removed by lockd itself (which can
|
||||
* then sleep on the file semaphore without disrupting e.g. the nfs client).
|
||||
*/
|
||||
static void
|
||||
nlmsvc_grant_callback(struct rpc_task *task)
|
||||
static void nlmsvc_grant_callback(struct rpc_task *task, void *data)
|
||||
{
|
||||
struct nlm_rqst *call = (struct nlm_rqst *) task->tk_calldata;
|
||||
struct nlm_rqst *call = data;
|
||||
struct nlm_block *block;
|
||||
unsigned long timeout;
|
||||
struct sockaddr_in *peer_addr = RPC_PEERADDR(task->tk_client);
|
||||
|
@ -614,6 +614,10 @@ nlmsvc_grant_callback(struct rpc_task *task)
|
|||
nlm_release_host(call->a_host);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nlmsvc_grant_ops = {
|
||||
.rpc_call_done = nlmsvc_grant_callback,
|
||||
};
|
||||
|
||||
/*
|
||||
* We received a GRANT_RES callback. Try to find the corresponding
|
||||
* block.
|
||||
|
|
|
@ -23,7 +23,8 @@
|
|||
#define NLMDBG_FACILITY NLMDBG_CLIENT
|
||||
|
||||
static u32 nlmsvc_callback(struct svc_rqst *, u32, struct nlm_res *);
|
||||
static void nlmsvc_callback_exit(struct rpc_task *);
|
||||
|
||||
static const struct rpc_call_ops nlmsvc_callback_ops;
|
||||
|
||||
#ifdef CONFIG_LOCKD_V4
|
||||
static u32
|
||||
|
@ -518,7 +519,7 @@ nlmsvc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_res *resp)
|
|||
call->a_host = host;
|
||||
memcpy(&call->a_args, resp, sizeof(*resp));
|
||||
|
||||
if (nlmsvc_async_call(call, proc, nlmsvc_callback_exit) < 0)
|
||||
if (nlmsvc_async_call(call, proc, &nlmsvc_callback_ops) < 0)
|
||||
goto error;
|
||||
|
||||
return rpc_success;
|
||||
|
@ -528,10 +529,9 @@ nlmsvc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_res *resp)
|
|||
return rpc_system_err;
|
||||
}
|
||||
|
||||
static void
|
||||
nlmsvc_callback_exit(struct rpc_task *task)
|
||||
static void nlmsvc_callback_exit(struct rpc_task *task, void *data)
|
||||
{
|
||||
struct nlm_rqst *call = (struct nlm_rqst *) task->tk_calldata;
|
||||
struct nlm_rqst *call = data;
|
||||
|
||||
if (task->tk_status < 0) {
|
||||
dprintk("lockd: %4d callback failed (errno = %d)\n",
|
||||
|
@ -541,6 +541,10 @@ nlmsvc_callback_exit(struct rpc_task *task)
|
|||
kfree(call);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nlmsvc_callback_ops = {
|
||||
.rpc_call_done = nlmsvc_callback_exit,
|
||||
};
|
||||
|
||||
/*
|
||||
* NLM Server procedures.
|
||||
*/
|
||||
|
|
|
@ -269,7 +269,6 @@ static void nfs_direct_read_schedule(struct nfs_direct_req *dreq,
|
|||
|
||||
data->task.tk_cookie = (unsigned long) inode;
|
||||
data->task.tk_calldata = data;
|
||||
data->task.tk_release = nfs_readdata_release;
|
||||
data->complete = nfs_direct_read_result;
|
||||
|
||||
lock_kernel();
|
||||
|
|
|
@ -732,19 +732,23 @@ nfs3_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
|
|||
|
||||
extern u32 *nfs3_decode_dirent(u32 *, struct nfs_entry *, int);
|
||||
|
||||
static void
|
||||
nfs3_read_done(struct rpc_task *task)
|
||||
static void nfs3_read_done(struct rpc_task *task, void *calldata)
|
||||
{
|
||||
struct nfs_read_data *data = (struct nfs_read_data *) task->tk_calldata;
|
||||
struct nfs_read_data *data = calldata;
|
||||
|
||||
if (nfs3_async_handle_jukebox(task))
|
||||
return;
|
||||
/* Call back common NFS readpage processing */
|
||||
if (task->tk_status >= 0)
|
||||
nfs_refresh_inode(data->inode, &data->fattr);
|
||||
nfs_readpage_result(task);
|
||||
nfs_readpage_result(task, calldata);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nfs3_read_ops = {
|
||||
.rpc_call_done = nfs3_read_done,
|
||||
.rpc_release = nfs_readdata_release,
|
||||
};
|
||||
|
||||
static void
|
||||
nfs3_proc_read_setup(struct nfs_read_data *data)
|
||||
{
|
||||
|
@ -762,23 +766,26 @@ nfs3_proc_read_setup(struct nfs_read_data *data)
|
|||
flags = RPC_TASK_ASYNC | (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0);
|
||||
|
||||
/* Finalize the task. */
|
||||
rpc_init_task(task, NFS_CLIENT(inode), nfs3_read_done, flags);
|
||||
rpc_init_task(task, NFS_CLIENT(inode), flags, &nfs3_read_ops, data);
|
||||
rpc_call_setup(task, &msg, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
nfs3_write_done(struct rpc_task *task)
|
||||
static void nfs3_write_done(struct rpc_task *task, void *calldata)
|
||||
{
|
||||
struct nfs_write_data *data;
|
||||
struct nfs_write_data *data = calldata;
|
||||
|
||||
if (nfs3_async_handle_jukebox(task))
|
||||
return;
|
||||
data = (struct nfs_write_data *)task->tk_calldata;
|
||||
if (task->tk_status >= 0)
|
||||
nfs_post_op_update_inode(data->inode, data->res.fattr);
|
||||
nfs_writeback_done(task);
|
||||
nfs_writeback_done(task, calldata);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nfs3_write_ops = {
|
||||
.rpc_call_done = nfs3_write_done,
|
||||
.rpc_release = nfs_writedata_release,
|
||||
};
|
||||
|
||||
static void
|
||||
nfs3_proc_write_setup(struct nfs_write_data *data, int how)
|
||||
{
|
||||
|
@ -806,23 +813,26 @@ nfs3_proc_write_setup(struct nfs_write_data *data, int how)
|
|||
flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
|
||||
|
||||
/* Finalize the task. */
|
||||
rpc_init_task(task, NFS_CLIENT(inode), nfs3_write_done, flags);
|
||||
rpc_init_task(task, NFS_CLIENT(inode), flags, &nfs3_write_ops, data);
|
||||
rpc_call_setup(task, &msg, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
nfs3_commit_done(struct rpc_task *task)
|
||||
static void nfs3_commit_done(struct rpc_task *task, void *calldata)
|
||||
{
|
||||
struct nfs_write_data *data;
|
||||
struct nfs_write_data *data = calldata;
|
||||
|
||||
if (nfs3_async_handle_jukebox(task))
|
||||
return;
|
||||
data = (struct nfs_write_data *)task->tk_calldata;
|
||||
if (task->tk_status >= 0)
|
||||
nfs_post_op_update_inode(data->inode, data->res.fattr);
|
||||
nfs_commit_done(task);
|
||||
nfs_commit_done(task, calldata);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nfs3_commit_ops = {
|
||||
.rpc_call_done = nfs3_commit_done,
|
||||
.rpc_release = nfs_commit_release,
|
||||
};
|
||||
|
||||
static void
|
||||
nfs3_proc_commit_setup(struct nfs_write_data *data, int how)
|
||||
{
|
||||
|
@ -840,7 +850,7 @@ nfs3_proc_commit_setup(struct nfs_write_data *data, int how)
|
|||
flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
|
||||
|
||||
/* Finalize the task. */
|
||||
rpc_init_task(task, NFS_CLIENT(inode), nfs3_commit_done, flags);
|
||||
rpc_init_task(task, NFS_CLIENT(inode), flags, &nfs3_commit_ops, data);
|
||||
rpc_call_setup(task, &msg, 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -196,14 +196,12 @@ static void update_changeattr(struct inode *inode, struct nfs4_change_info *cinf
|
|||
|
||||
/* Helper for asynchronous RPC calls */
|
||||
static int nfs4_call_async(struct rpc_clnt *clnt, rpc_action tk_begin,
|
||||
rpc_action tk_exit, void *calldata)
|
||||
const struct rpc_call_ops *tk_ops, void *calldata)
|
||||
{
|
||||
struct rpc_task *task;
|
||||
|
||||
if (!(task = rpc_new_task(clnt, tk_exit, RPC_TASK_ASYNC)))
|
||||
if (!(task = rpc_new_task(clnt, RPC_TASK_ASYNC, tk_ops, calldata)))
|
||||
return -ENOMEM;
|
||||
|
||||
task->tk_calldata = calldata;
|
||||
task->tk_action = tk_begin;
|
||||
rpc_execute(task);
|
||||
return 0;
|
||||
|
@ -867,10 +865,10 @@ struct nfs4_closedata {
|
|||
struct nfs_fattr fattr;
|
||||
};
|
||||
|
||||
static void nfs4_free_closedata(struct nfs4_closedata *calldata)
|
||||
static void nfs4_free_closedata(void *data)
|
||||
{
|
||||
struct nfs4_state *state = calldata->state;
|
||||
struct nfs4_state_owner *sp = state->owner;
|
||||
struct nfs4_closedata *calldata = data;
|
||||
struct nfs4_state_owner *sp = calldata->state->owner;
|
||||
|
||||
nfs4_put_open_state(calldata->state);
|
||||
nfs_free_seqid(calldata->arg.seqid);
|
||||
|
@ -878,9 +876,9 @@ static void nfs4_free_closedata(struct nfs4_closedata *calldata)
|
|||
kfree(calldata);
|
||||
}
|
||||
|
||||
static void nfs4_close_done(struct rpc_task *task)
|
||||
static void nfs4_close_done(struct rpc_task *task, void *data)
|
||||
{
|
||||
struct nfs4_closedata *calldata = (struct nfs4_closedata *)task->tk_calldata;
|
||||
struct nfs4_closedata *calldata = data;
|
||||
struct nfs4_state *state = calldata->state;
|
||||
struct nfs_server *server = NFS_SERVER(calldata->inode);
|
||||
|
||||
|
@ -904,7 +902,6 @@ static void nfs4_close_done(struct rpc_task *task)
|
|||
}
|
||||
}
|
||||
nfs_refresh_inode(calldata->inode, calldata->res.fattr);
|
||||
nfs4_free_closedata(calldata);
|
||||
}
|
||||
|
||||
static void nfs4_close_begin(struct rpc_task *task)
|
||||
|
@ -918,10 +915,8 @@ static void nfs4_close_begin(struct rpc_task *task)
|
|||
.rpc_cred = state->owner->so_cred,
|
||||
};
|
||||
int mode = 0, old_mode;
|
||||
int status;
|
||||
|
||||
status = nfs_wait_on_sequence(calldata->arg.seqid, task);
|
||||
if (status != 0)
|
||||
if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
|
||||
return;
|
||||
/* Recalculate the new open mode in case someone reopened the file
|
||||
* while we were waiting in line to be scheduled.
|
||||
|
@ -937,9 +932,8 @@ static void nfs4_close_begin(struct rpc_task *task)
|
|||
spin_unlock(&calldata->inode->i_lock);
|
||||
spin_unlock(&state->owner->so_lock);
|
||||
if (mode == old_mode || test_bit(NFS_DELEGATED_STATE, &state->flags)) {
|
||||
nfs4_free_closedata(calldata);
|
||||
task->tk_exit = NULL;
|
||||
rpc_exit(task, 0);
|
||||
/* Note: exit _without_ calling nfs4_close_done */
|
||||
task->tk_action = NULL;
|
||||
return;
|
||||
}
|
||||
nfs_fattr_init(calldata->res.fattr);
|
||||
|
@ -949,6 +943,11 @@ static void nfs4_close_begin(struct rpc_task *task)
|
|||
rpc_call_setup(task, &msg, 0);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nfs4_close_ops = {
|
||||
.rpc_call_done = nfs4_close_done,
|
||||
.rpc_release = nfs4_free_closedata,
|
||||
};
|
||||
|
||||
/*
|
||||
* It is possible for data to be read/written from a mem-mapped file
|
||||
* after the sys_close call (which hits the vfs layer as a flush).
|
||||
|
@ -982,7 +981,7 @@ int nfs4_do_close(struct inode *inode, struct nfs4_state *state)
|
|||
calldata->res.server = server;
|
||||
|
||||
status = nfs4_call_async(server->client, nfs4_close_begin,
|
||||
nfs4_close_done, calldata);
|
||||
&nfs4_close_ops, calldata);
|
||||
if (status == 0)
|
||||
goto out;
|
||||
|
||||
|
@ -2125,10 +2124,9 @@ static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
|
|||
return err;
|
||||
}
|
||||
|
||||
static void
|
||||
nfs4_read_done(struct rpc_task *task)
|
||||
static void nfs4_read_done(struct rpc_task *task, void *calldata)
|
||||
{
|
||||
struct nfs_read_data *data = (struct nfs_read_data *) task->tk_calldata;
|
||||
struct nfs_read_data *data = calldata;
|
||||
struct inode *inode = data->inode;
|
||||
|
||||
if (nfs4_async_handle_error(task, NFS_SERVER(inode)) == -EAGAIN) {
|
||||
|
@ -2138,9 +2136,14 @@ nfs4_read_done(struct rpc_task *task)
|
|||
if (task->tk_status > 0)
|
||||
renew_lease(NFS_SERVER(inode), data->timestamp);
|
||||
/* Call back common NFS readpage processing */
|
||||
nfs_readpage_result(task);
|
||||
nfs_readpage_result(task, calldata);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nfs4_read_ops = {
|
||||
.rpc_call_done = nfs4_read_done,
|
||||
.rpc_release = nfs_readdata_release,
|
||||
};
|
||||
|
||||
static void
|
||||
nfs4_proc_read_setup(struct nfs_read_data *data)
|
||||
{
|
||||
|
@ -2160,14 +2163,13 @@ nfs4_proc_read_setup(struct nfs_read_data *data)
|
|||
flags = RPC_TASK_ASYNC | (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0);
|
||||
|
||||
/* Finalize the task. */
|
||||
rpc_init_task(task, NFS_CLIENT(inode), nfs4_read_done, flags);
|
||||
rpc_init_task(task, NFS_CLIENT(inode), flags, &nfs4_read_ops, data);
|
||||
rpc_call_setup(task, &msg, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
nfs4_write_done(struct rpc_task *task)
|
||||
static void nfs4_write_done(struct rpc_task *task, void *calldata)
|
||||
{
|
||||
struct nfs_write_data *data = (struct nfs_write_data *) task->tk_calldata;
|
||||
struct nfs_write_data *data = calldata;
|
||||
struct inode *inode = data->inode;
|
||||
|
||||
if (nfs4_async_handle_error(task, NFS_SERVER(inode)) == -EAGAIN) {
|
||||
|
@ -2179,9 +2181,14 @@ nfs4_write_done(struct rpc_task *task)
|
|||
nfs_post_op_update_inode(inode, data->res.fattr);
|
||||
}
|
||||
/* Call back common NFS writeback processing */
|
||||
nfs_writeback_done(task);
|
||||
nfs_writeback_done(task, calldata);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nfs4_write_ops = {
|
||||
.rpc_call_done = nfs4_write_done,
|
||||
.rpc_release = nfs_writedata_release,
|
||||
};
|
||||
|
||||
static void
|
||||
nfs4_proc_write_setup(struct nfs_write_data *data, int how)
|
||||
{
|
||||
|
@ -2214,14 +2221,13 @@ nfs4_proc_write_setup(struct nfs_write_data *data, int how)
|
|||
flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
|
||||
|
||||
/* Finalize the task. */
|
||||
rpc_init_task(task, NFS_CLIENT(inode), nfs4_write_done, flags);
|
||||
rpc_init_task(task, NFS_CLIENT(inode), flags, &nfs4_write_ops, data);
|
||||
rpc_call_setup(task, &msg, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
nfs4_commit_done(struct rpc_task *task)
|
||||
static void nfs4_commit_done(struct rpc_task *task, void *calldata)
|
||||
{
|
||||
struct nfs_write_data *data = (struct nfs_write_data *) task->tk_calldata;
|
||||
struct nfs_write_data *data = calldata;
|
||||
struct inode *inode = data->inode;
|
||||
|
||||
if (nfs4_async_handle_error(task, NFS_SERVER(inode)) == -EAGAIN) {
|
||||
|
@ -2231,9 +2237,14 @@ nfs4_commit_done(struct rpc_task *task)
|
|||
if (task->tk_status >= 0)
|
||||
nfs_post_op_update_inode(inode, data->res.fattr);
|
||||
/* Call back common NFS writeback processing */
|
||||
nfs_commit_done(task);
|
||||
nfs_commit_done(task, calldata);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nfs4_commit_ops = {
|
||||
.rpc_call_done = nfs4_commit_done,
|
||||
.rpc_release = nfs_commit_release,
|
||||
};
|
||||
|
||||
static void
|
||||
nfs4_proc_commit_setup(struct nfs_write_data *data, int how)
|
||||
{
|
||||
|
@ -2255,7 +2266,7 @@ nfs4_proc_commit_setup(struct nfs_write_data *data, int how)
|
|||
flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
|
||||
|
||||
/* Finalize the task. */
|
||||
rpc_init_task(task, NFS_CLIENT(inode), nfs4_commit_done, flags);
|
||||
rpc_init_task(task, NFS_CLIENT(inode), flags, &nfs4_commit_ops, data);
|
||||
rpc_call_setup(task, &msg, 0);
|
||||
}
|
||||
|
||||
|
@ -2263,11 +2274,10 @@ nfs4_proc_commit_setup(struct nfs_write_data *data, int how)
|
|||
* nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
|
||||
* standalone procedure for queueing an asynchronous RENEW.
|
||||
*/
|
||||
static void
|
||||
renew_done(struct rpc_task *task)
|
||||
static void nfs4_renew_done(struct rpc_task *task, void *data)
|
||||
{
|
||||
struct nfs4_client *clp = (struct nfs4_client *)task->tk_msg.rpc_argp;
|
||||
unsigned long timestamp = (unsigned long)task->tk_calldata;
|
||||
unsigned long timestamp = (unsigned long)data;
|
||||
|
||||
if (task->tk_status < 0) {
|
||||
switch (task->tk_status) {
|
||||
|
@ -2284,6 +2294,10 @@ renew_done(struct rpc_task *task)
|
|||
spin_unlock(&clp->cl_lock);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nfs4_renew_ops = {
|
||||
.rpc_call_done = nfs4_renew_done,
|
||||
};
|
||||
|
||||
int
|
||||
nfs4_proc_async_renew(struct nfs4_client *clp)
|
||||
{
|
||||
|
@ -2294,7 +2308,7 @@ nfs4_proc_async_renew(struct nfs4_client *clp)
|
|||
};
|
||||
|
||||
return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT,
|
||||
renew_done, (void *)jiffies);
|
||||
&nfs4_renew_ops, (void *)jiffies);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -2866,15 +2880,16 @@ static void nfs4_locku_release_calldata(struct nfs4_unlockdata *calldata)
|
|||
}
|
||||
}
|
||||
|
||||
static void nfs4_locku_complete(struct nfs4_unlockdata *calldata)
|
||||
static void nfs4_locku_complete(void *data)
|
||||
{
|
||||
struct nfs4_unlockdata *calldata = data;
|
||||
complete(&calldata->completion);
|
||||
nfs4_locku_release_calldata(calldata);
|
||||
}
|
||||
|
||||
static void nfs4_locku_done(struct rpc_task *task)
|
||||
static void nfs4_locku_done(struct rpc_task *task, void *data)
|
||||
{
|
||||
struct nfs4_unlockdata *calldata = (struct nfs4_unlockdata *)task->tk_calldata;
|
||||
struct nfs4_unlockdata *calldata = data;
|
||||
|
||||
nfs_increment_lock_seqid(task->tk_status, calldata->luargs.seqid);
|
||||
switch (task->tk_status) {
|
||||
|
@ -2890,10 +2905,8 @@ static void nfs4_locku_done(struct rpc_task *task)
|
|||
default:
|
||||
if (nfs4_async_handle_error(task, calldata->res.server) == -EAGAIN) {
|
||||
rpc_restart_call(task);
|
||||
return;
|
||||
}
|
||||
}
|
||||
nfs4_locku_complete(calldata);
|
||||
}
|
||||
|
||||
static void nfs4_locku_begin(struct rpc_task *task)
|
||||
|
@ -2911,14 +2924,18 @@ static void nfs4_locku_begin(struct rpc_task *task)
|
|||
if (status != 0)
|
||||
return;
|
||||
if ((calldata->lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0) {
|
||||
nfs4_locku_complete(calldata);
|
||||
task->tk_exit = NULL;
|
||||
rpc_exit(task, 0);
|
||||
/* Note: exit _without_ running nfs4_locku_done */
|
||||
task->tk_action = NULL;
|
||||
return;
|
||||
}
|
||||
rpc_call_setup(task, &msg, 0);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nfs4_locku_ops = {
|
||||
.rpc_call_done = nfs4_locku_done,
|
||||
.rpc_release = nfs4_locku_complete,
|
||||
};
|
||||
|
||||
static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
|
||||
{
|
||||
struct nfs4_unlockdata *calldata;
|
||||
|
@ -2963,7 +2980,7 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *
|
|||
init_completion(&calldata->completion);
|
||||
|
||||
status = nfs4_call_async(NFS_SERVER(inode)->client, nfs4_locku_begin,
|
||||
nfs4_locku_done, calldata);
|
||||
&nfs4_locku_ops, calldata);
|
||||
if (status == 0)
|
||||
wait_for_completion_interruptible(&calldata->completion);
|
||||
do_vfs_lock(request->fl_file, request);
|
||||
|
|
|
@ -547,10 +547,9 @@ nfs_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
|
|||
|
||||
extern u32 * nfs_decode_dirent(u32 *, struct nfs_entry *, int);
|
||||
|
||||
static void
|
||||
nfs_read_done(struct rpc_task *task)
|
||||
static void nfs_read_done(struct rpc_task *task, void *calldata)
|
||||
{
|
||||
struct nfs_read_data *data = (struct nfs_read_data *) task->tk_calldata;
|
||||
struct nfs_read_data *data = calldata;
|
||||
|
||||
if (task->tk_status >= 0) {
|
||||
nfs_refresh_inode(data->inode, data->res.fattr);
|
||||
|
@ -560,9 +559,14 @@ nfs_read_done(struct rpc_task *task)
|
|||
if (data->args.offset + data->args.count >= data->res.fattr->size)
|
||||
data->res.eof = 1;
|
||||
}
|
||||
nfs_readpage_result(task);
|
||||
nfs_readpage_result(task, calldata);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nfs_read_ops = {
|
||||
.rpc_call_done = nfs_read_done,
|
||||
.rpc_release = nfs_readdata_release,
|
||||
};
|
||||
|
||||
static void
|
||||
nfs_proc_read_setup(struct nfs_read_data *data)
|
||||
{
|
||||
|
@ -580,20 +584,24 @@ nfs_proc_read_setup(struct nfs_read_data *data)
|
|||
flags = RPC_TASK_ASYNC | (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0);
|
||||
|
||||
/* Finalize the task. */
|
||||
rpc_init_task(task, NFS_CLIENT(inode), nfs_read_done, flags);
|
||||
rpc_init_task(task, NFS_CLIENT(inode), flags, &nfs_read_ops, data);
|
||||
rpc_call_setup(task, &msg, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
nfs_write_done(struct rpc_task *task)
|
||||
static void nfs_write_done(struct rpc_task *task, void *calldata)
|
||||
{
|
||||
struct nfs_write_data *data = (struct nfs_write_data *) task->tk_calldata;
|
||||
struct nfs_write_data *data = calldata;
|
||||
|
||||
if (task->tk_status >= 0)
|
||||
nfs_post_op_update_inode(data->inode, data->res.fattr);
|
||||
nfs_writeback_done(task);
|
||||
nfs_writeback_done(task, calldata);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nfs_write_ops = {
|
||||
.rpc_call_done = nfs_write_done,
|
||||
.rpc_release = nfs_writedata_release,
|
||||
};
|
||||
|
||||
static void
|
||||
nfs_proc_write_setup(struct nfs_write_data *data, int how)
|
||||
{
|
||||
|
@ -614,7 +622,7 @@ nfs_proc_write_setup(struct nfs_write_data *data, int how)
|
|||
flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
|
||||
|
||||
/* Finalize the task. */
|
||||
rpc_init_task(task, NFS_CLIENT(inode), nfs_write_done, flags);
|
||||
rpc_init_task(task, NFS_CLIENT(inode), flags, &nfs_write_ops, data);
|
||||
rpc_call_setup(task, &msg, 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -42,9 +42,8 @@ mempool_t *nfs_rdata_mempool;
|
|||
|
||||
#define MIN_POOL_READ (32)
|
||||
|
||||
void nfs_readdata_release(struct rpc_task *task)
|
||||
void nfs_readdata_release(void *data)
|
||||
{
|
||||
struct nfs_read_data *data = (struct nfs_read_data *)task->tk_calldata;
|
||||
nfs_readdata_free(data);
|
||||
}
|
||||
|
||||
|
@ -220,9 +219,6 @@ static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
|
|||
NFS_PROTO(inode)->read_setup(data);
|
||||
|
||||
data->task.tk_cookie = (unsigned long)inode;
|
||||
data->task.tk_calldata = data;
|
||||
/* Release requests */
|
||||
data->task.tk_release = nfs_readdata_release;
|
||||
|
||||
dprintk("NFS: %4d initiated read call (req %s/%Ld, %u bytes @ offset %Lu)\n",
|
||||
data->task.tk_pid,
|
||||
|
@ -452,9 +448,9 @@ static void nfs_readpage_result_full(struct nfs_read_data *data, int status)
|
|||
* This is the callback from RPC telling us whether a reply was
|
||||
* received or some error occurred (timeout or socket shutdown).
|
||||
*/
|
||||
void nfs_readpage_result(struct rpc_task *task)
|
||||
void nfs_readpage_result(struct rpc_task *task, void *calldata)
|
||||
{
|
||||
struct nfs_read_data *data = (struct nfs_read_data *)task->tk_calldata;
|
||||
struct nfs_read_data *data = calldata;
|
||||
struct nfs_readargs *argp = &data->args;
|
||||
struct nfs_readres *resp = &data->res;
|
||||
int status = task->tk_status;
|
||||
|
|
|
@ -116,10 +116,9 @@ nfs_async_unlink_init(struct rpc_task *task)
|
|||
*
|
||||
* Do the directory attribute update.
|
||||
*/
|
||||
static void
|
||||
nfs_async_unlink_done(struct rpc_task *task)
|
||||
static void nfs_async_unlink_done(struct rpc_task *task, void *calldata)
|
||||
{
|
||||
struct nfs_unlinkdata *data = (struct nfs_unlinkdata *)task->tk_calldata;
|
||||
struct nfs_unlinkdata *data = calldata;
|
||||
struct dentry *dir = data->dir;
|
||||
struct inode *dir_i;
|
||||
|
||||
|
@ -141,13 +140,17 @@ nfs_async_unlink_done(struct rpc_task *task)
|
|||
* We need to call nfs_put_unlinkdata as a 'tk_release' task since the
|
||||
* rpc_task would be freed too.
|
||||
*/
|
||||
static void
|
||||
nfs_async_unlink_release(struct rpc_task *task)
|
||||
static void nfs_async_unlink_release(void *calldata)
|
||||
{
|
||||
struct nfs_unlinkdata *data = (struct nfs_unlinkdata *)task->tk_calldata;
|
||||
struct nfs_unlinkdata *data = calldata;
|
||||
nfs_put_unlinkdata(data);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nfs_unlink_ops = {
|
||||
.rpc_call_done = nfs_async_unlink_done,
|
||||
.rpc_release = nfs_async_unlink_release,
|
||||
};
|
||||
|
||||
/**
|
||||
* nfs_async_unlink - asynchronous unlinking of a file
|
||||
* @dentry: dentry to unlink
|
||||
|
@ -179,10 +182,8 @@ nfs_async_unlink(struct dentry *dentry)
|
|||
data->count = 1;
|
||||
|
||||
task = &data->task;
|
||||
rpc_init_task(task, clnt, nfs_async_unlink_done , RPC_TASK_ASYNC);
|
||||
task->tk_calldata = data;
|
||||
rpc_init_task(task, clnt, RPC_TASK_ASYNC, &nfs_unlink_ops, data);
|
||||
task->tk_action = nfs_async_unlink_init;
|
||||
task->tk_release = nfs_async_unlink_release;
|
||||
|
||||
spin_lock(&dentry->d_lock);
|
||||
dentry->d_flags |= DCACHE_NFSFS_RENAMED;
|
||||
|
|
|
@ -104,9 +104,8 @@ static inline void nfs_commit_free(struct nfs_write_data *p)
|
|||
mempool_free(p, nfs_commit_mempool);
|
||||
}
|
||||
|
||||
static void nfs_writedata_release(struct rpc_task *task)
|
||||
void nfs_writedata_release(void *wdata)
|
||||
{
|
||||
struct nfs_write_data *wdata = (struct nfs_write_data *)task->tk_calldata;
|
||||
nfs_writedata_free(wdata);
|
||||
}
|
||||
|
||||
|
@ -871,9 +870,6 @@ static void nfs_write_rpcsetup(struct nfs_page *req,
|
|||
|
||||
data->task.tk_priority = flush_task_priority(how);
|
||||
data->task.tk_cookie = (unsigned long)inode;
|
||||
data->task.tk_calldata = data;
|
||||
/* Release requests */
|
||||
data->task.tk_release = nfs_writedata_release;
|
||||
|
||||
dprintk("NFS: %4d initiated write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
|
||||
data->task.tk_pid,
|
||||
|
@ -1131,9 +1127,9 @@ static void nfs_writeback_done_full(struct nfs_write_data *data, int status)
|
|||
/*
|
||||
* This function is called when the WRITE call is complete.
|
||||
*/
|
||||
void nfs_writeback_done(struct rpc_task *task)
|
||||
void nfs_writeback_done(struct rpc_task *task, void *calldata)
|
||||
{
|
||||
struct nfs_write_data *data = (struct nfs_write_data *) task->tk_calldata;
|
||||
struct nfs_write_data *data = calldata;
|
||||
struct nfs_writeargs *argp = &data->args;
|
||||
struct nfs_writeres *resp = &data->res;
|
||||
|
||||
|
@ -1200,9 +1196,8 @@ void nfs_writeback_done(struct rpc_task *task)
|
|||
|
||||
|
||||
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
|
||||
static void nfs_commit_release(struct rpc_task *task)
|
||||
void nfs_commit_release(void *wdata)
|
||||
{
|
||||
struct nfs_write_data *wdata = (struct nfs_write_data *)task->tk_calldata;
|
||||
nfs_commit_free(wdata);
|
||||
}
|
||||
|
||||
|
@ -1238,9 +1233,6 @@ static void nfs_commit_rpcsetup(struct list_head *head,
|
|||
|
||||
data->task.tk_priority = flush_task_priority(how);
|
||||
data->task.tk_cookie = (unsigned long)inode;
|
||||
data->task.tk_calldata = data;
|
||||
/* Release requests */
|
||||
data->task.tk_release = nfs_commit_release;
|
||||
|
||||
dprintk("NFS: %4d initiated commit call\n", data->task.tk_pid);
|
||||
}
|
||||
|
@ -1277,10 +1269,9 @@ nfs_commit_list(struct list_head *head, int how)
|
|||
/*
|
||||
* COMMIT call returned
|
||||
*/
|
||||
void
|
||||
nfs_commit_done(struct rpc_task *task)
|
||||
void nfs_commit_done(struct rpc_task *task, void *calldata)
|
||||
{
|
||||
struct nfs_write_data *data = (struct nfs_write_data *)task->tk_calldata;
|
||||
struct nfs_write_data *data = calldata;
|
||||
struct nfs_page *req;
|
||||
int res = 0;
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@
|
|||
#define NFSPROC4_CB_COMPOUND 1
|
||||
|
||||
/* declarations */
|
||||
static void nfs4_cb_null(struct rpc_task *task);
|
||||
static const struct rpc_call_ops nfs4_cb_null_ops;
|
||||
|
||||
/* Index of predefined Linux callback client operations */
|
||||
|
||||
|
@ -447,7 +447,7 @@ nfsd4_probe_callback(struct nfs4_client *clp)
|
|||
msg.rpc_cred = nfsd4_lookupcred(clp,0);
|
||||
if (IS_ERR(msg.rpc_cred))
|
||||
goto out_rpciod;
|
||||
status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, nfs4_cb_null, NULL);
|
||||
status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, &nfs4_cb_null_ops, NULL);
|
||||
put_rpccred(msg.rpc_cred);
|
||||
|
||||
if (status != 0) {
|
||||
|
@ -469,7 +469,7 @@ out_err:
|
|||
}
|
||||
|
||||
static void
|
||||
nfs4_cb_null(struct rpc_task *task)
|
||||
nfs4_cb_null(struct rpc_task *task, void *dummy)
|
||||
{
|
||||
struct nfs4_client *clp = (struct nfs4_client *)task->tk_msg.rpc_argp;
|
||||
struct nfs4_callback *cb = &clp->cl_callback;
|
||||
|
@ -488,6 +488,10 @@ out:
|
|||
put_nfs4_client(clp);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops nfs4_cb_null_ops = {
|
||||
.rpc_call_done = nfs4_cb_null,
|
||||
};
|
||||
|
||||
/*
|
||||
* called with dp->dl_count inc'ed.
|
||||
* nfs4_lock_state() may or may not have been called.
|
||||
|
|
|
@ -172,7 +172,7 @@ extern struct nlm_host *nlm_find_client(void);
|
|||
/*
|
||||
* Server-side lock handling
|
||||
*/
|
||||
int nlmsvc_async_call(struct nlm_rqst *, u32, rpc_action);
|
||||
int nlmsvc_async_call(struct nlm_rqst *, u32, const struct rpc_call_ops *);
|
||||
u32 nlmsvc_lock(struct svc_rqst *, struct nlm_file *,
|
||||
struct nlm_lock *, int, struct nlm_cookie *);
|
||||
u32 nlmsvc_unlock(struct nlm_file *, struct nlm_lock *);
|
||||
|
|
|
@ -406,10 +406,12 @@ extern int nfs_writepage(struct page *page, struct writeback_control *wbc);
|
|||
extern int nfs_writepages(struct address_space *, struct writeback_control *);
|
||||
extern int nfs_flush_incompatible(struct file *file, struct page *page);
|
||||
extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int);
|
||||
extern void nfs_writeback_done(struct rpc_task *task);
|
||||
extern void nfs_writeback_done(struct rpc_task *task, void *data);
|
||||
extern void nfs_writedata_release(void *data);
|
||||
|
||||
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
|
||||
extern void nfs_commit_done(struct rpc_task *);
|
||||
extern void nfs_commit_done(struct rpc_task *, void *data);
|
||||
extern void nfs_commit_release(void *data);
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -481,7 +483,9 @@ static inline void nfs_writedata_free(struct nfs_write_data *p)
|
|||
extern int nfs_readpage(struct file *, struct page *);
|
||||
extern int nfs_readpages(struct file *, struct address_space *,
|
||||
struct list_head *, unsigned);
|
||||
extern void nfs_readpage_result(struct rpc_task *);
|
||||
extern void nfs_readpage_result(struct rpc_task *, void *);
|
||||
extern void nfs_readdata_release(void *data);
|
||||
|
||||
|
||||
/*
|
||||
* Allocate and free nfs_read_data structures
|
||||
|
@ -501,8 +505,6 @@ static inline void nfs_readdata_free(struct nfs_read_data *p)
|
|||
mempool_free(p, nfs_rdata_mempool);
|
||||
}
|
||||
|
||||
extern void nfs_readdata_release(struct rpc_task *task);
|
||||
|
||||
/*
|
||||
* linux/fs/nfs3proc.c
|
||||
*/
|
||||
|
|
|
@ -126,7 +126,8 @@ int rpc_register(u32, u32, int, unsigned short, int *);
|
|||
void rpc_call_setup(struct rpc_task *, struct rpc_message *, int);
|
||||
|
||||
int rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg,
|
||||
int flags, rpc_action callback, void *clntdata);
|
||||
int flags, const struct rpc_call_ops *tk_ops,
|
||||
void *calldata);
|
||||
int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg,
|
||||
int flags);
|
||||
void rpc_restart_call(struct rpc_task *);
|
||||
|
|
|
@ -27,6 +27,7 @@ struct rpc_message {
|
|||
struct rpc_cred * rpc_cred; /* Credentials */
|
||||
};
|
||||
|
||||
struct rpc_call_ops;
|
||||
struct rpc_wait_queue;
|
||||
struct rpc_wait {
|
||||
struct list_head list; /* wait queue links */
|
||||
|
@ -61,13 +62,12 @@ struct rpc_task {
|
|||
* timeout_fn to be executed by timer bottom half
|
||||
* callback to be executed after waking up
|
||||
* action next procedure for async tasks
|
||||
* exit exit async task and report to caller
|
||||
* tk_ops caller callbacks
|
||||
*/
|
||||
void (*tk_timeout_fn)(struct rpc_task *);
|
||||
void (*tk_callback)(struct rpc_task *);
|
||||
void (*tk_action)(struct rpc_task *);
|
||||
void (*tk_exit)(struct rpc_task *);
|
||||
void (*tk_release)(struct rpc_task *);
|
||||
const struct rpc_call_ops *tk_ops;
|
||||
void * tk_calldata;
|
||||
|
||||
/*
|
||||
|
@ -111,6 +111,12 @@ struct rpc_task {
|
|||
|
||||
typedef void (*rpc_action)(struct rpc_task *);
|
||||
|
||||
struct rpc_call_ops {
|
||||
void (*rpc_call_done)(struct rpc_task *, void *);
|
||||
void (*rpc_release)(void *);
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* RPC task flags
|
||||
*/
|
||||
|
@ -228,10 +234,12 @@ struct rpc_wait_queue {
|
|||
/*
|
||||
* Function prototypes
|
||||
*/
|
||||
struct rpc_task *rpc_new_task(struct rpc_clnt *, rpc_action, int flags);
|
||||
struct rpc_task *rpc_new_task(struct rpc_clnt *, int flags,
|
||||
const struct rpc_call_ops *ops, void *data);
|
||||
struct rpc_task *rpc_new_child(struct rpc_clnt *, struct rpc_task *parent);
|
||||
void rpc_init_task(struct rpc_task *, struct rpc_clnt *,
|
||||
rpc_action exitfunc, int flags);
|
||||
void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt,
|
||||
int flags, const struct rpc_call_ops *ops,
|
||||
void *data);
|
||||
void rpc_release_task(struct rpc_task *);
|
||||
void rpc_exit_task(struct rpc_task *);
|
||||
void rpc_killall_tasks(struct rpc_clnt *);
|
||||
|
|
|
@ -374,10 +374,14 @@ out:
|
|||
* Default callback for async RPC calls
|
||||
*/
|
||||
static void
|
||||
rpc_default_callback(struct rpc_task *task)
|
||||
rpc_default_callback(struct rpc_task *task, void *data)
|
||||
{
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops rpc_default_ops = {
|
||||
.rpc_call_done = rpc_default_callback,
|
||||
};
|
||||
|
||||
/*
|
||||
* Export the signal mask handling for synchronous code that
|
||||
* sleeps on RPC calls
|
||||
|
@ -432,7 +436,7 @@ int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
|
|||
BUG_ON(flags & RPC_TASK_ASYNC);
|
||||
|
||||
status = -ENOMEM;
|
||||
task = rpc_new_task(clnt, NULL, flags);
|
||||
task = rpc_new_task(clnt, flags, &rpc_default_ops, NULL);
|
||||
if (task == NULL)
|
||||
goto out;
|
||||
|
||||
|
@ -459,7 +463,7 @@ out:
|
|||
*/
|
||||
int
|
||||
rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
|
||||
rpc_action callback, void *data)
|
||||
const struct rpc_call_ops *tk_ops, void *data)
|
||||
{
|
||||
struct rpc_task *task;
|
||||
sigset_t oldset;
|
||||
|
@ -472,12 +476,9 @@ rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
|
|||
flags |= RPC_TASK_ASYNC;
|
||||
|
||||
/* Create/initialize a new RPC task */
|
||||
if (!callback)
|
||||
callback = rpc_default_callback;
|
||||
status = -ENOMEM;
|
||||
if (!(task = rpc_new_task(clnt, callback, flags)))
|
||||
if (!(task = rpc_new_task(clnt, flags, tk_ops, data)))
|
||||
goto out;
|
||||
task->tk_calldata = data;
|
||||
|
||||
/* Mask signals on GSS_AUTH upcalls */
|
||||
rpc_task_sigmask(task, &oldset);
|
||||
|
|
|
@ -555,13 +555,13 @@ __rpc_atrun(struct rpc_task *task)
|
|||
}
|
||||
|
||||
/*
|
||||
* Helper that calls task->tk_exit if it exists
|
||||
* Helper that calls task->tk_ops->rpc_call_done if it exists
|
||||
*/
|
||||
void rpc_exit_task(struct rpc_task *task)
|
||||
{
|
||||
task->tk_action = NULL;
|
||||
if (task->tk_exit != NULL) {
|
||||
task->tk_exit(task);
|
||||
if (task->tk_ops->rpc_call_done != NULL) {
|
||||
task->tk_ops->rpc_call_done(task, task->tk_calldata);
|
||||
if (task->tk_action != NULL) {
|
||||
WARN_ON(RPC_ASSASSINATED(task));
|
||||
/* Always release the RPC slot and buffer memory */
|
||||
|
@ -747,7 +747,7 @@ rpc_free(struct rpc_task *task)
|
|||
/*
|
||||
* Creation and deletion of RPC task structures
|
||||
*/
|
||||
void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, rpc_action callback, int flags)
|
||||
void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata)
|
||||
{
|
||||
memset(task, 0, sizeof(*task));
|
||||
init_timer(&task->tk_timer);
|
||||
|
@ -755,7 +755,8 @@ void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, rpc_action call
|
|||
task->tk_timer.function = (void (*)(unsigned long)) rpc_run_timer;
|
||||
task->tk_client = clnt;
|
||||
task->tk_flags = flags;
|
||||
task->tk_exit = callback;
|
||||
task->tk_ops = tk_ops;
|
||||
task->tk_calldata = calldata;
|
||||
|
||||
/* Initialize retry counters */
|
||||
task->tk_garb_retry = 2;
|
||||
|
@ -784,6 +785,8 @@ void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, rpc_action call
|
|||
list_add_tail(&task->tk_task, &all_tasks);
|
||||
spin_unlock(&rpc_sched_lock);
|
||||
|
||||
BUG_ON(task->tk_ops == NULL);
|
||||
|
||||
dprintk("RPC: %4d new task procpid %d\n", task->tk_pid,
|
||||
current->pid);
|
||||
}
|
||||
|
@ -794,8 +797,7 @@ rpc_alloc_task(void)
|
|||
return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
|
||||
}
|
||||
|
||||
static void
|
||||
rpc_default_free_task(struct rpc_task *task)
|
||||
static void rpc_free_task(struct rpc_task *task)
|
||||
{
|
||||
dprintk("RPC: %4d freeing task\n", task->tk_pid);
|
||||
mempool_free(task, rpc_task_mempool);
|
||||
|
@ -806,8 +808,7 @@ rpc_default_free_task(struct rpc_task *task)
|
|||
* clean up after an allocation failure, as the client may
|
||||
* have specified "oneshot".
|
||||
*/
|
||||
struct rpc_task *
|
||||
rpc_new_task(struct rpc_clnt *clnt, rpc_action callback, int flags)
|
||||
struct rpc_task *rpc_new_task(struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata)
|
||||
{
|
||||
struct rpc_task *task;
|
||||
|
||||
|
@ -815,10 +816,7 @@ rpc_new_task(struct rpc_clnt *clnt, rpc_action callback, int flags)
|
|||
if (!task)
|
||||
goto cleanup;
|
||||
|
||||
rpc_init_task(task, clnt, callback, flags);
|
||||
|
||||
/* Replace tk_release */
|
||||
task->tk_release = rpc_default_free_task;
|
||||
rpc_init_task(task, clnt, flags, tk_ops, calldata);
|
||||
|
||||
dprintk("RPC: %4d allocated task\n", task->tk_pid);
|
||||
task->tk_flags |= RPC_TASK_DYNAMIC;
|
||||
|
@ -838,6 +836,8 @@ cleanup:
|
|||
|
||||
void rpc_release_task(struct rpc_task *task)
|
||||
{
|
||||
const struct rpc_call_ops *tk_ops = task->tk_ops;
|
||||
void *calldata = task->tk_calldata;
|
||||
dprintk("RPC: %4d release task\n", task->tk_pid);
|
||||
|
||||
#ifdef RPC_DEBUG
|
||||
|
@ -869,8 +869,10 @@ void rpc_release_task(struct rpc_task *task)
|
|||
#ifdef RPC_DEBUG
|
||||
task->tk_magic = 0;
|
||||
#endif
|
||||
if (task->tk_release)
|
||||
task->tk_release(task);
|
||||
if (task->tk_flags & RPC_TASK_DYNAMIC)
|
||||
rpc_free_task(task);
|
||||
if (tk_ops->rpc_release)
|
||||
tk_ops->rpc_release(calldata);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -883,12 +885,11 @@ void rpc_release_task(struct rpc_task *task)
|
|||
*
|
||||
* Caller must hold childq.lock
|
||||
*/
|
||||
static inline struct rpc_task *rpc_find_parent(struct rpc_task *child)
|
||||
static inline struct rpc_task *rpc_find_parent(struct rpc_task *child, struct rpc_task *parent)
|
||||
{
|
||||
struct rpc_task *task, *parent;
|
||||
struct rpc_task *task;
|
||||
struct list_head *le;
|
||||
|
||||
parent = (struct rpc_task *) child->tk_calldata;
|
||||
task_for_each(task, le, &childq.tasks[0])
|
||||
if (task == parent)
|
||||
return parent;
|
||||
|
@ -896,18 +897,22 @@ static inline struct rpc_task *rpc_find_parent(struct rpc_task *child)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void rpc_child_exit(struct rpc_task *child)
|
||||
static void rpc_child_exit(struct rpc_task *child, void *calldata)
|
||||
{
|
||||
struct rpc_task *parent;
|
||||
|
||||
spin_lock_bh(&childq.lock);
|
||||
if ((parent = rpc_find_parent(child)) != NULL) {
|
||||
if ((parent = rpc_find_parent(child, calldata)) != NULL) {
|
||||
parent->tk_status = child->tk_status;
|
||||
__rpc_wake_up_task(parent);
|
||||
}
|
||||
spin_unlock_bh(&childq.lock);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops rpc_child_ops = {
|
||||
.rpc_call_done = rpc_child_exit,
|
||||
};
|
||||
|
||||
/*
|
||||
* Note: rpc_new_task releases the client after a failure.
|
||||
*/
|
||||
|
@ -916,11 +921,9 @@ rpc_new_child(struct rpc_clnt *clnt, struct rpc_task *parent)
|
|||
{
|
||||
struct rpc_task *task;
|
||||
|
||||
task = rpc_new_task(clnt, NULL, RPC_TASK_ASYNC | RPC_TASK_CHILD);
|
||||
task = rpc_new_task(clnt, RPC_TASK_ASYNC | RPC_TASK_CHILD, &rpc_child_ops, parent);
|
||||
if (!task)
|
||||
goto fail;
|
||||
task->tk_exit = rpc_child_exit;
|
||||
task->tk_calldata = parent;
|
||||
return task;
|
||||
|
||||
fail:
|
||||
|
@ -1056,7 +1059,7 @@ void rpc_show_tasks(void)
|
|||
return;
|
||||
}
|
||||
printk("-pid- proc flgs status -client- -prog- --rqstp- -timeout "
|
||||
"-rpcwait -action- --exit--\n");
|
||||
"-rpcwait -action- ---ops--\n");
|
||||
alltask_for_each(t, le, &all_tasks) {
|
||||
const char *rpc_waitq = "none";
|
||||
|
||||
|
@ -1071,7 +1074,7 @@ void rpc_show_tasks(void)
|
|||
(t->tk_client ? t->tk_client->cl_prog : 0),
|
||||
t->tk_rqstp, t->tk_timeout,
|
||||
rpc_waitq,
|
||||
t->tk_action, t->tk_exit);
|
||||
t->tk_action, t->tk_ops);
|
||||
}
|
||||
spin_unlock(&rpc_sched_lock);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue