2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* fs/nfs/nfs4proc.c
|
|
|
|
*
|
|
|
|
* Client-side procedure declarations for NFSv4.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2002 The Regents of the University of Michigan.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Kendrick Smith <kmsmith@umich.edu>
|
|
|
|
* Andy Adamson <andros@umich.edu>
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
*
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. Neither the name of the University nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
|
|
|
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
|
|
|
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
|
|
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
|
|
|
|
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
|
|
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
|
|
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
|
|
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/string.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
|
|
|
#include <linux/slab.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/sunrpc/clnt.h>
|
2011-03-25 01:12:31 +08:00
|
|
|
#include <linux/sunrpc/gss_api.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/nfs.h>
|
|
|
|
#include <linux/nfs4.h>
|
|
|
|
#include <linux/nfs_fs.h>
|
|
|
|
#include <linux/nfs_page.h>
|
|
|
|
#include <linux/namei.h>
|
2005-10-19 05:20:17 +08:00
|
|
|
#include <linux/mount.h>
|
2009-04-01 21:22:29 +08:00
|
|
|
#include <linux/module.h>
|
2009-04-01 21:23:18 +08:00
|
|
|
#include <linux/sunrpc/bc_xprt.h>
|
2010-12-09 19:35:25 +08:00
|
|
|
#include <linux/xattr.h>
|
2011-01-26 08:15:32 +08:00
|
|
|
#include <linux/utsname.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-06-23 01:16:21 +08:00
|
|
|
#include "nfs4_fs.h"
|
2005-04-17 06:20:36 +08:00
|
|
|
#include "delegation.h"
|
2008-02-20 09:04:23 +08:00
|
|
|
#include "internal.h"
|
2006-03-21 02:44:14 +08:00
|
|
|
#include "iostat.h"
|
2009-04-01 21:22:31 +08:00
|
|
|
#include "callback.h"
|
2010-10-20 12:18:03 +08:00
|
|
|
#include "pnfs.h"
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#define NFSDBG_FACILITY NFSDBG_PROC
|
|
|
|
|
2006-09-15 20:30:46 +08:00
|
|
|
#define NFS4_POLL_RETRY_MIN (HZ/10)
|
2005-04-17 06:20:36 +08:00
|
|
|
#define NFS4_POLL_RETRY_MAX (15*HZ)
|
|
|
|
|
2009-08-10 03:06:19 +08:00
|
|
|
#define NFS4_MAX_LOOP_ON_RECOVER (10)
|
|
|
|
|
2006-01-03 16:55:12 +08:00
|
|
|
struct nfs4_opendata;
|
2006-01-03 16:55:15 +08:00
|
|
|
static int _nfs4_proc_open(struct nfs4_opendata *data);
|
2009-12-15 13:27:57 +08:00
|
|
|
static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
|
2005-04-17 06:20:36 +08:00
|
|
|
static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
|
2008-12-24 04:21:46 +08:00
|
|
|
static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *);
|
2011-03-25 01:12:24 +08:00
|
|
|
static int _nfs4_proc_lookup(struct rpc_clnt *client, struct inode *dir,
|
|
|
|
const struct qstr *name, struct nfs_fh *fhandle,
|
|
|
|
struct nfs_fattr *fattr);
|
2007-07-18 09:52:41 +08:00
|
|
|
static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
|
2010-04-17 04:22:51 +08:00
|
|
|
static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
|
|
|
|
struct nfs_fattr *fattr, struct iattr *sattr,
|
|
|
|
struct nfs4_state *state);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Prevent leaks of NFSv4 errors into userland */
|
2008-12-31 05:35:55 +08:00
|
|
|
static int nfs4_map_errors(int err)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2009-10-24 02:46:42 +08:00
|
|
|
if (err >= -1000)
|
|
|
|
return err;
|
|
|
|
switch (err) {
|
|
|
|
case -NFS4ERR_RESOURCE:
|
|
|
|
return -EREMOTEIO;
|
2011-03-25 01:12:30 +08:00
|
|
|
case -NFS4ERR_WRONGSEC:
|
|
|
|
return -EPERM;
|
2011-02-23 07:44:31 +08:00
|
|
|
case -NFS4ERR_BADOWNER:
|
|
|
|
case -NFS4ERR_BADNAME:
|
|
|
|
return -EINVAL;
|
2009-10-24 02:46:42 +08:00
|
|
|
default:
|
2005-04-17 06:20:36 +08:00
|
|
|
dprintk("%s could not handle NFSv4 error %d\n",
|
2008-05-03 04:42:44 +08:00
|
|
|
__func__, -err);
|
2009-10-24 02:46:42 +08:00
|
|
|
break;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2009-10-24 02:46:42 +08:00
|
|
|
return -EIO;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is our standard bitmap for GETATTR requests.
|
|
|
|
*/
|
|
|
|
const u32 nfs4_fattr_bitmap[2] = {
|
|
|
|
FATTR4_WORD0_TYPE
|
|
|
|
| FATTR4_WORD0_CHANGE
|
|
|
|
| FATTR4_WORD0_SIZE
|
|
|
|
| FATTR4_WORD0_FSID
|
|
|
|
| FATTR4_WORD0_FILEID,
|
|
|
|
FATTR4_WORD1_MODE
|
|
|
|
| FATTR4_WORD1_NUMLINKS
|
|
|
|
| FATTR4_WORD1_OWNER
|
|
|
|
| FATTR4_WORD1_OWNER_GROUP
|
|
|
|
| FATTR4_WORD1_RAWDEV
|
|
|
|
| FATTR4_WORD1_SPACE_USED
|
|
|
|
| FATTR4_WORD1_TIME_ACCESS
|
|
|
|
| FATTR4_WORD1_TIME_METADATA
|
|
|
|
| FATTR4_WORD1_TIME_MODIFY
|
|
|
|
};
|
|
|
|
|
|
|
|
const u32 nfs4_statfs_bitmap[2] = {
|
|
|
|
FATTR4_WORD0_FILES_AVAIL
|
|
|
|
| FATTR4_WORD0_FILES_FREE
|
|
|
|
| FATTR4_WORD0_FILES_TOTAL,
|
|
|
|
FATTR4_WORD1_SPACE_AVAIL
|
|
|
|
| FATTR4_WORD1_SPACE_FREE
|
|
|
|
| FATTR4_WORD1_SPACE_TOTAL
|
|
|
|
};
|
|
|
|
|
2005-06-23 01:16:21 +08:00
|
|
|
const u32 nfs4_pathconf_bitmap[2] = {
|
2005-04-17 06:20:36 +08:00
|
|
|
FATTR4_WORD0_MAXLINK
|
|
|
|
| FATTR4_WORD0_MAXNAME,
|
|
|
|
0
|
|
|
|
};
|
|
|
|
|
|
|
|
const u32 nfs4_fsinfo_bitmap[2] = { FATTR4_WORD0_MAXFILESIZE
|
|
|
|
| FATTR4_WORD0_MAXREAD
|
|
|
|
| FATTR4_WORD0_MAXWRITE
|
|
|
|
| FATTR4_WORD0_LEASE_TIME,
|
2010-10-13 07:30:06 +08:00
|
|
|
FATTR4_WORD1_TIME_DELTA
|
2010-10-20 12:17:57 +08:00
|
|
|
| FATTR4_WORD1_FS_LAYOUT_TYPES
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2006-06-09 21:34:25 +08:00
|
|
|
const u32 nfs4_fs_locations_bitmap[2] = {
|
|
|
|
FATTR4_WORD0_TYPE
|
|
|
|
| FATTR4_WORD0_CHANGE
|
|
|
|
| FATTR4_WORD0_SIZE
|
|
|
|
| FATTR4_WORD0_FSID
|
|
|
|
| FATTR4_WORD0_FILEID
|
|
|
|
| FATTR4_WORD0_FS_LOCATIONS,
|
|
|
|
FATTR4_WORD1_MODE
|
|
|
|
| FATTR4_WORD1_NUMLINKS
|
|
|
|
| FATTR4_WORD1_OWNER
|
|
|
|
| FATTR4_WORD1_OWNER_GROUP
|
|
|
|
| FATTR4_WORD1_RAWDEV
|
|
|
|
| FATTR4_WORD1_SPACE_USED
|
|
|
|
| FATTR4_WORD1_TIME_ACCESS
|
|
|
|
| FATTR4_WORD1_TIME_METADATA
|
|
|
|
| FATTR4_WORD1_TIME_MODIFY
|
|
|
|
| FATTR4_WORD1_MOUNTED_ON_FILEID
|
|
|
|
};
|
|
|
|
|
2006-10-20 14:28:51 +08:00
|
|
|
static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
|
2005-04-17 06:20:36 +08:00
|
|
|
struct nfs4_readdir_arg *readdir)
|
|
|
|
{
|
2006-10-20 14:28:49 +08:00
|
|
|
__be32 *start, *p;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
BUG_ON(readdir->count < 80);
|
|
|
|
if (cookie > 2) {
|
2005-06-23 01:16:28 +08:00
|
|
|
readdir->cookie = cookie;
|
2005-04-17 06:20:36 +08:00
|
|
|
memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
readdir->cookie = 0;
|
|
|
|
memset(&readdir->verifier, 0, sizeof(readdir->verifier));
|
|
|
|
if (cookie == 2)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* NFSv4 servers do not return entries for '.' and '..'
|
|
|
|
* Therefore, we fake these entries here. We let '.'
|
|
|
|
* have cookie 0 and '..' have cookie 1. Note that
|
|
|
|
* when talking to the server, we always send cookie 0
|
|
|
|
* instead of 1 or 2.
|
|
|
|
*/
|
2006-10-20 14:28:49 +08:00
|
|
|
start = p = kmap_atomic(*readdir->pages, KM_USER0);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (cookie == 0) {
|
|
|
|
*p++ = xdr_one; /* next */
|
|
|
|
*p++ = xdr_zero; /* cookie, first word */
|
|
|
|
*p++ = xdr_one; /* cookie, second word */
|
|
|
|
*p++ = xdr_one; /* entry len */
|
|
|
|
memcpy(p, ".\0\0\0", 4); /* entry */
|
|
|
|
p++;
|
|
|
|
*p++ = xdr_one; /* bitmap length */
|
|
|
|
*p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
|
|
|
|
*p++ = htonl(8); /* attribute buffer length */
|
2007-08-04 03:07:10 +08:00
|
|
|
p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_inode));
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
*p++ = xdr_one; /* next */
|
|
|
|
*p++ = xdr_zero; /* cookie, first word */
|
|
|
|
*p++ = xdr_two; /* cookie, second word */
|
|
|
|
*p++ = xdr_two; /* entry len */
|
|
|
|
memcpy(p, "..\0\0", 4); /* entry */
|
|
|
|
p++;
|
|
|
|
*p++ = xdr_one; /* bitmap length */
|
|
|
|
*p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
|
|
|
|
*p++ = htonl(8); /* attribute buffer length */
|
2007-08-04 03:07:10 +08:00
|
|
|
p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_parent->d_inode));
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
readdir->pgbase = (char *)p - (char *)start;
|
|
|
|
readdir->count -= readdir->pgbase;
|
|
|
|
kunmap_atomic(start, KM_USER0);
|
|
|
|
}
|
|
|
|
|
2008-12-24 04:21:44 +08:00
|
|
|
static int nfs4_wait_clnt_recover(struct nfs_client *clp)
|
|
|
|
{
|
|
|
|
int res;
|
|
|
|
|
|
|
|
might_sleep();
|
|
|
|
|
2008-12-24 04:21:48 +08:00
|
|
|
res = wait_on_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING,
|
2009-03-12 02:10:30 +08:00
|
|
|
nfs_wait_bit_killable, TASK_KILLABLE);
|
2008-12-24 04:21:44 +08:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
|
|
|
|
{
|
|
|
|
int res = 0;
|
|
|
|
|
|
|
|
might_sleep();
|
|
|
|
|
|
|
|
if (*timeout <= 0)
|
|
|
|
*timeout = NFS4_POLL_RETRY_MIN;
|
|
|
|
if (*timeout > NFS4_POLL_RETRY_MAX)
|
|
|
|
*timeout = NFS4_POLL_RETRY_MAX;
|
|
|
|
schedule_timeout_killable(*timeout);
|
|
|
|
if (fatal_signal_pending(current))
|
|
|
|
res = -ERESTARTSYS;
|
|
|
|
*timeout <<= 1;
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This is the error handling routine for processes that are allowed
|
|
|
|
* to sleep.
|
|
|
|
*/
|
2011-02-23 07:44:32 +08:00
|
|
|
static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
|
2008-12-24 04:21:44 +08:00
|
|
|
{
|
|
|
|
struct nfs_client *clp = server->nfs_client;
|
2008-12-24 04:21:46 +08:00
|
|
|
struct nfs4_state *state = exception->state;
|
2008-12-24 04:21:44 +08:00
|
|
|
int ret = errorcode;
|
|
|
|
|
|
|
|
exception->retry = 0;
|
|
|
|
switch(errorcode) {
|
|
|
|
case 0:
|
|
|
|
return 0;
|
2008-12-24 04:21:46 +08:00
|
|
|
case -NFS4ERR_ADMIN_REVOKED:
|
|
|
|
case -NFS4ERR_BAD_STATEID:
|
|
|
|
case -NFS4ERR_OPENMODE:
|
|
|
|
if (state == NULL)
|
|
|
|
break;
|
2011-03-10 05:00:53 +08:00
|
|
|
nfs4_schedule_stateid_recovery(server, state);
|
|
|
|
goto wait_on_recovery;
|
2008-12-24 04:21:44 +08:00
|
|
|
case -NFS4ERR_STALE_STATEID:
|
2010-01-27 04:42:47 +08:00
|
|
|
case -NFS4ERR_STALE_CLIENTID:
|
2008-12-24 04:21:44 +08:00
|
|
|
case -NFS4ERR_EXPIRED:
|
2011-03-10 05:00:53 +08:00
|
|
|
nfs4_schedule_lease_recovery(clp);
|
|
|
|
goto wait_on_recovery;
|
2010-01-27 04:42:38 +08:00
|
|
|
#if defined(CONFIG_NFS_V4_1)
|
nfs41: kick start nfs41 session recovery when handling errors
Remove checking for any errors that the SEQUENCE operation does not return.
-NFS4ERR_STALE_CLIENTID, NFS4ERR_EXPIRED, NFS4ERR_CB_PATH_DOWN, NFS4ERR_BACK_CHAN_BUSY, NFS4ERR_OP_NOT_IN_SESSION.
SEQUENCE operation error recovery is very primative, we only reset the session.
Remove checking for any errors that are returned by the SEQUENCE operation, but
that resetting the session won't address.
NFS4ERR_RETRY_UNCACHED_REP, NFS4ERR_SEQUENCE_POS,NFS4ERR_TOO_MANY_OPS.
Add error checking for missing SEQUENCE errors that a session reset will
address.
NFS4ERR_BAD_HIGH_SLOT, NFS4ERR_DEADSESSION, NFS4ERR_SEQ_FALSE_RETRY.
A reset of the session is currently our only response to a SEQUENCE operation
error. Don't reset the session on errors where a new session won't help.
Don't reset the session on errors where a new session won't help.
[nfs41: nfs4_async_handle_error update error checking]
Signed-off-by: Andy Adamson <andros@netapp.com>
Signed-off-by: Benny Halevy <bhalevy@panasas.com>
[nfs41: trigger the state manager for session reset]
Replace session state bit with nfs_client state bit. Set the
NFS4CLNT_SESSION_SETUP bit upon a session related error in the sync/async
error handlers.
[nfs41: _nfs4_async_handle_error fix session reset error list]
Sequence operation errors that session reset could help.
NFS4ERR_BADSESSION
NFS4ERR_BADSLOT
NFS4ERR_BAD_HIGH_SLOT
NFS4ERR_DEADSESSION
NFS4ERR_CONN_NOT_BOUND_TO_SESSION
NFS4ERR_SEQ_FALSE_RETRY
NFS4ERR_SEQ_MISORDERED
Sequence operation errors that a session reset would not help
NFS4ERR_BADXDR
NFS4ERR_DELAY
NFS4ERR_REP_TOO_BIG
NFS4ERR_REP_TOO_BIG_TO_CACHE
NFS4ERR_REQ_TOO_BIG
NFS4ERR_RETRY_UNCACHED_REP
NFS4ERR_SEQUENCE_POS
NFS4ERR_TOO_MANY_OPS
Signed-off-by: Andy Adamson <andros@netapp.com>
[nfs41 nfs4_handle_exception fix session reset error list]
Signed-off-by: Andy Adamson <andros@netapp.com>
Signed-off-by: Benny Halevy <bhalevy@panasas.com>
[moved nfs41_sequece_call_done code to nfs41: sequence operation]
Signed-off-by: Andy Adamson <andros@netapp.com>
Signed-off-by: Benny Halevy <bhalevy@panasas.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2009-04-01 21:22:42 +08:00
|
|
|
case -NFS4ERR_BADSESSION:
|
|
|
|
case -NFS4ERR_BADSLOT:
|
|
|
|
case -NFS4ERR_BAD_HIGH_SLOT:
|
|
|
|
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
|
|
|
|
case -NFS4ERR_DEADSESSION:
|
|
|
|
case -NFS4ERR_SEQ_FALSE_RETRY:
|
|
|
|
case -NFS4ERR_SEQ_MISORDERED:
|
|
|
|
dprintk("%s ERROR: %d Reset session\n", __func__,
|
|
|
|
errorcode);
|
2011-03-10 05:00:53 +08:00
|
|
|
nfs4_schedule_session_recovery(clp->cl_session);
|
nfs41: kick start nfs41 session recovery when handling errors
Remove checking for any errors that the SEQUENCE operation does not return.
-NFS4ERR_STALE_CLIENTID, NFS4ERR_EXPIRED, NFS4ERR_CB_PATH_DOWN, NFS4ERR_BACK_CHAN_BUSY, NFS4ERR_OP_NOT_IN_SESSION.
SEQUENCE operation error recovery is very primative, we only reset the session.
Remove checking for any errors that are returned by the SEQUENCE operation, but
that resetting the session won't address.
NFS4ERR_RETRY_UNCACHED_REP, NFS4ERR_SEQUENCE_POS,NFS4ERR_TOO_MANY_OPS.
Add error checking for missing SEQUENCE errors that a session reset will
address.
NFS4ERR_BAD_HIGH_SLOT, NFS4ERR_DEADSESSION, NFS4ERR_SEQ_FALSE_RETRY.
A reset of the session is currently our only response to a SEQUENCE operation
error. Don't reset the session on errors where a new session won't help.
Don't reset the session on errors where a new session won't help.
[nfs41: nfs4_async_handle_error update error checking]
Signed-off-by: Andy Adamson <andros@netapp.com>
Signed-off-by: Benny Halevy <bhalevy@panasas.com>
[nfs41: trigger the state manager for session reset]
Replace session state bit with nfs_client state bit. Set the
NFS4CLNT_SESSION_SETUP bit upon a session related error in the sync/async
error handlers.
[nfs41: _nfs4_async_handle_error fix session reset error list]
Sequence operation errors that session reset could help.
NFS4ERR_BADSESSION
NFS4ERR_BADSLOT
NFS4ERR_BAD_HIGH_SLOT
NFS4ERR_DEADSESSION
NFS4ERR_CONN_NOT_BOUND_TO_SESSION
NFS4ERR_SEQ_FALSE_RETRY
NFS4ERR_SEQ_MISORDERED
Sequence operation errors that a session reset would not help
NFS4ERR_BADXDR
NFS4ERR_DELAY
NFS4ERR_REP_TOO_BIG
NFS4ERR_REP_TOO_BIG_TO_CACHE
NFS4ERR_REQ_TOO_BIG
NFS4ERR_RETRY_UNCACHED_REP
NFS4ERR_SEQUENCE_POS
NFS4ERR_TOO_MANY_OPS
Signed-off-by: Andy Adamson <andros@netapp.com>
[nfs41 nfs4_handle_exception fix session reset error list]
Signed-off-by: Andy Adamson <andros@netapp.com>
Signed-off-by: Benny Halevy <bhalevy@panasas.com>
[moved nfs41_sequece_call_done code to nfs41: sequence operation]
Signed-off-by: Andy Adamson <andros@netapp.com>
Signed-off-by: Benny Halevy <bhalevy@panasas.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2009-04-01 21:22:42 +08:00
|
|
|
exception->retry = 1;
|
2009-12-05 04:55:32 +08:00
|
|
|
break;
|
2010-01-27 04:42:38 +08:00
|
|
|
#endif /* defined(CONFIG_NFS_V4_1) */
|
2008-12-24 04:21:44 +08:00
|
|
|
case -NFS4ERR_FILE_OPEN:
|
2009-12-04 04:58:56 +08:00
|
|
|
if (exception->timeout > HZ) {
|
|
|
|
/* We have retried a decent amount, time to
|
|
|
|
* fail
|
|
|
|
*/
|
|
|
|
ret = -EBUSY;
|
|
|
|
break;
|
|
|
|
}
|
2008-12-24 04:21:44 +08:00
|
|
|
case -NFS4ERR_GRACE:
|
|
|
|
case -NFS4ERR_DELAY:
|
2010-01-07 22:42:03 +08:00
|
|
|
case -EKEYEXPIRED:
|
2008-12-24 04:21:44 +08:00
|
|
|
ret = nfs4_delay(server->client, &exception->timeout);
|
|
|
|
if (ret != 0)
|
|
|
|
break;
|
|
|
|
case -NFS4ERR_OLD_STATEID:
|
|
|
|
exception->retry = 1;
|
2011-02-23 07:44:32 +08:00
|
|
|
break;
|
|
|
|
case -NFS4ERR_BADOWNER:
|
|
|
|
/* The following works around a Linux server bug! */
|
|
|
|
case -NFS4ERR_BADNAME:
|
|
|
|
if (server->caps & NFS_CAP_UIDGID_NOMAP) {
|
|
|
|
server->caps &= ~NFS_CAP_UIDGID_NOMAP;
|
|
|
|
exception->retry = 1;
|
|
|
|
printk(KERN_WARNING "NFS: v4 server %s "
|
|
|
|
"does not accept raw "
|
|
|
|
"uid/gids. "
|
|
|
|
"Reenabling the idmapper.\n",
|
|
|
|
server->nfs_client->cl_hostname);
|
|
|
|
}
|
2008-12-24 04:21:44 +08:00
|
|
|
}
|
|
|
|
/* We failed to handle the error */
|
|
|
|
return nfs4_map_errors(ret);
|
2011-03-10 05:00:53 +08:00
|
|
|
wait_on_recovery:
|
2010-01-27 04:42:47 +08:00
|
|
|
ret = nfs4_wait_clnt_recover(clp);
|
|
|
|
if (ret == 0)
|
|
|
|
exception->retry = 1;
|
|
|
|
return ret;
|
2008-12-24 04:21:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-08-01 02:29:06 +08:00
|
|
|
static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
spin_lock(&clp->cl_lock);
|
|
|
|
if (time_before(clp->cl_last_renewal,timestamp))
|
|
|
|
clp->cl_last_renewal = timestamp;
|
|
|
|
spin_unlock(&clp->cl_lock);
|
|
|
|
}
|
|
|
|
|
2010-08-01 02:29:06 +08:00
|
|
|
static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
|
|
|
|
{
|
|
|
|
do_renew_lease(server->nfs_client, timestamp);
|
|
|
|
}
|
|
|
|
|
2009-04-01 21:22:03 +08:00
|
|
|
#if defined(CONFIG_NFS_V4_1)
|
|
|
|
|
2009-04-01 21:22:16 +08:00
|
|
|
/*
|
|
|
|
* nfs4_free_slot - free a slot and efficiently update slot table.
|
|
|
|
*
|
|
|
|
* freeing a slot is trivially done by clearing its respective bit
|
|
|
|
* in the bitmap.
|
|
|
|
* If the freed slotid equals highest_used_slotid we want to update it
|
|
|
|
* so that the server would be able to size down the slot table if needed,
|
|
|
|
* otherwise we know that the highest_used_slotid is still in use.
|
|
|
|
* When updating highest_used_slotid there may be "holes" in the bitmap
|
|
|
|
* so we need to scan down from highest_used_slotid to 0 looking for the now
|
|
|
|
* highest slotid in use.
|
|
|
|
* If none found, highest_used_slotid is set to -1.
|
2009-12-06 08:32:19 +08:00
|
|
|
*
|
|
|
|
* Must be called while holding tbl->slot_tbl_lock
|
2009-04-01 21:22:16 +08:00
|
|
|
*/
|
|
|
|
static void
|
2010-09-24 21:17:01 +08:00
|
|
|
nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *free_slot)
|
2009-04-01 21:22:16 +08:00
|
|
|
{
|
2010-09-24 21:17:01 +08:00
|
|
|
int free_slotid = free_slot - tbl->slots;
|
2009-04-01 21:22:16 +08:00
|
|
|
int slotid = free_slotid;
|
|
|
|
|
2010-09-24 21:17:01 +08:00
|
|
|
BUG_ON(slotid < 0 || slotid >= NFS4_MAX_SLOT_TABLE);
|
2009-04-01 21:22:16 +08:00
|
|
|
/* clear used bit in bitmap */
|
|
|
|
__clear_bit(slotid, tbl->used_slots);
|
|
|
|
|
|
|
|
/* update highest_used_slotid when it is freed */
|
|
|
|
if (slotid == tbl->highest_used_slotid) {
|
|
|
|
slotid = find_last_bit(tbl->used_slots, tbl->max_slots);
|
2009-12-06 08:32:19 +08:00
|
|
|
if (slotid < tbl->max_slots)
|
2009-04-01 21:22:16 +08:00
|
|
|
tbl->highest_used_slotid = slotid;
|
|
|
|
else
|
|
|
|
tbl->highest_used_slotid = -1;
|
|
|
|
}
|
|
|
|
dprintk("%s: free_slotid %u highest_used_slotid %d\n", __func__,
|
|
|
|
free_slotid, tbl->highest_used_slotid);
|
|
|
|
}
|
|
|
|
|
2009-12-09 17:50:11 +08:00
|
|
|
/*
|
2011-01-06 10:04:34 +08:00
|
|
|
* Signal state manager thread if session fore channel is drained
|
2009-12-09 17:50:11 +08:00
|
|
|
*/
|
2011-01-06 10:04:34 +08:00
|
|
|
static void nfs4_check_drain_fc_complete(struct nfs4_session *ses)
|
2009-12-09 17:50:11 +08:00
|
|
|
{
|
2009-12-15 13:27:56 +08:00
|
|
|
struct rpc_task *task;
|
|
|
|
|
2010-06-16 21:52:26 +08:00
|
|
|
if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
|
2009-12-15 13:27:56 +08:00
|
|
|
task = rpc_wake_up_next(&ses->fc_slot_table.slot_tbl_waitq);
|
|
|
|
if (task)
|
|
|
|
rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
|
2009-12-09 17:50:11 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ses->fc_slot_table.highest_used_slotid != -1)
|
|
|
|
return;
|
|
|
|
|
2011-01-06 10:04:34 +08:00
|
|
|
dprintk("%s COMPLETE: Session Fore Channel Drained\n", __func__);
|
|
|
|
complete(&ses->fc_slot_table.complete);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Signal state manager thread if session back channel is drained
|
|
|
|
*/
|
|
|
|
void nfs4_check_drain_bc_complete(struct nfs4_session *ses)
|
|
|
|
{
|
|
|
|
if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state) ||
|
|
|
|
ses->bc_slot_table.highest_used_slotid != -1)
|
|
|
|
return;
|
|
|
|
dprintk("%s COMPLETE: Session Back Channel Drained\n", __func__);
|
|
|
|
complete(&ses->bc_slot_table.complete);
|
2009-12-09 17:50:11 +08:00
|
|
|
}
|
|
|
|
|
2010-06-16 21:52:25 +08:00
|
|
|
static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
|
2009-04-01 21:22:17 +08:00
|
|
|
{
|
|
|
|
struct nfs4_slot_table *tbl;
|
|
|
|
|
2010-06-16 21:52:25 +08:00
|
|
|
tbl = &res->sr_session->fc_slot_table;
|
2010-09-24 21:17:01 +08:00
|
|
|
if (!res->sr_slot) {
|
2009-04-01 21:22:17 +08:00
|
|
|
/* just wake up the next guy waiting since
|
|
|
|
* we may have not consumed a slot after all */
|
2009-12-05 04:55:39 +08:00
|
|
|
dprintk("%s: No slot\n", __func__);
|
2009-12-06 08:32:19 +08:00
|
|
|
return;
|
2009-04-01 21:22:17 +08:00
|
|
|
}
|
2009-12-05 04:55:38 +08:00
|
|
|
|
2009-12-06 08:32:19 +08:00
|
|
|
spin_lock(&tbl->slot_tbl_lock);
|
2010-09-24 21:17:01 +08:00
|
|
|
nfs4_free_slot(tbl, res->sr_slot);
|
2011-01-06 10:04:34 +08:00
|
|
|
nfs4_check_drain_fc_complete(res->sr_session);
|
2009-12-06 08:32:19 +08:00
|
|
|
spin_unlock(&tbl->slot_tbl_lock);
|
2010-09-24 21:17:01 +08:00
|
|
|
res->sr_slot = NULL;
|
2009-04-01 21:22:17 +08:00
|
|
|
}
|
|
|
|
|
2010-08-01 02:29:06 +08:00
|
|
|
static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
|
2009-04-01 21:22:18 +08:00
|
|
|
{
|
|
|
|
unsigned long timestamp;
|
2010-08-01 02:29:06 +08:00
|
|
|
struct nfs_client *clp;
|
2009-04-01 21:22:18 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* sr_status remains 1 if an RPC level error occurred. The server
|
|
|
|
* may or may not have processed the sequence operation..
|
|
|
|
* Proceed as if the server received and processed the sequence
|
|
|
|
* operation.
|
|
|
|
*/
|
|
|
|
if (res->sr_status == 1)
|
|
|
|
res->sr_status = NFS_OK;
|
|
|
|
|
|
|
|
/* -ERESTARTSYS can result in skipping nfs41_sequence_setup */
|
2010-09-24 21:17:01 +08:00
|
|
|
if (!res->sr_slot)
|
2009-04-01 21:22:18 +08:00
|
|
|
goto out;
|
|
|
|
|
2009-12-05 04:55:39 +08:00
|
|
|
/* Check the SEQUENCE operation status */
|
2010-08-01 02:29:06 +08:00
|
|
|
switch (res->sr_status) {
|
|
|
|
case 0:
|
2009-04-01 21:22:18 +08:00
|
|
|
/* Update the slot's sequence and clientid lease timer */
|
2010-09-24 21:17:01 +08:00
|
|
|
++res->sr_slot->seq_nr;
|
2009-04-01 21:22:18 +08:00
|
|
|
timestamp = res->sr_renewal_time;
|
2010-08-01 02:29:06 +08:00
|
|
|
clp = res->sr_session->clp;
|
2010-08-01 02:29:06 +08:00
|
|
|
do_renew_lease(clp, timestamp);
|
2009-12-06 02:46:14 +08:00
|
|
|
/* Check sequence flags */
|
2011-03-10 05:00:55 +08:00
|
|
|
if (res->sr_status_flags != 0)
|
|
|
|
nfs4_schedule_lease_recovery(clp);
|
2010-08-01 02:29:06 +08:00
|
|
|
break;
|
|
|
|
case -NFS4ERR_DELAY:
|
|
|
|
/* The server detected a resend of the RPC call and
|
|
|
|
* returned NFS4ERR_DELAY as per Section 2.10.6.2
|
|
|
|
* of RFC5661.
|
|
|
|
*/
|
2010-10-29 02:06:19 +08:00
|
|
|
dprintk("%s: slot=%td seq=%d: Operation in progress\n",
|
2010-09-24 21:17:01 +08:00
|
|
|
__func__,
|
|
|
|
res->sr_slot - res->sr_session->fc_slot_table.slots,
|
|
|
|
res->sr_slot->seq_nr);
|
2010-08-01 02:29:06 +08:00
|
|
|
goto out_retry;
|
|
|
|
default:
|
|
|
|
/* Just update the slot sequence no. */
|
2010-09-24 21:17:01 +08:00
|
|
|
++res->sr_slot->seq_nr;
|
2009-04-01 21:22:18 +08:00
|
|
|
}
|
|
|
|
out:
|
|
|
|
/* The session may be reset by one of the error handlers. */
|
|
|
|
dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
|
2010-06-16 21:52:25 +08:00
|
|
|
nfs41_sequence_free_slot(res);
|
2010-08-01 02:29:06 +08:00
|
|
|
return 1;
|
|
|
|
out_retry:
|
2010-08-01 02:29:07 +08:00
|
|
|
if (!rpc_restart_call(task))
|
2010-08-01 02:29:06 +08:00
|
|
|
goto out;
|
|
|
|
rpc_delay(task, NFS4_POLL_RETRY_MAX);
|
|
|
|
return 0;
|
2009-04-01 21:22:18 +08:00
|
|
|
}
|
|
|
|
|
2010-08-01 02:29:06 +08:00
|
|
|
static int nfs4_sequence_done(struct rpc_task *task,
|
|
|
|
struct nfs4_sequence_res *res)
|
2010-06-16 21:52:26 +08:00
|
|
|
{
|
2010-08-01 02:29:06 +08:00
|
|
|
if (res->sr_session == NULL)
|
|
|
|
return 1;
|
|
|
|
return nfs41_sequence_done(task, res);
|
2010-06-16 21:52:26 +08:00
|
|
|
}
|
|
|
|
|
2009-04-01 21:22:14 +08:00
|
|
|
/*
|
|
|
|
* nfs4_find_slot - efficiently look for a free slot
|
|
|
|
*
|
|
|
|
* nfs4_find_slot looks for an unset bit in the used_slots bitmap.
|
|
|
|
* If found, we mark the slot as used, update the highest_used_slotid,
|
|
|
|
* and respectively set up the sequence operation args.
|
|
|
|
* The slot number is returned if found, or NFS4_MAX_SLOT_TABLE otherwise.
|
2009-04-01 21:22:15 +08:00
|
|
|
*
|
|
|
|
* Note: must be called with under the slot_tbl_lock.
|
2009-04-01 21:22:14 +08:00
|
|
|
*/
|
|
|
|
static u8
|
2009-12-15 13:27:54 +08:00
|
|
|
nfs4_find_slot(struct nfs4_slot_table *tbl)
|
2009-04-01 21:22:14 +08:00
|
|
|
{
|
|
|
|
int slotid;
|
|
|
|
u8 ret_id = NFS4_MAX_SLOT_TABLE;
|
|
|
|
BUILD_BUG_ON((u8)NFS4_MAX_SLOT_TABLE != (int)NFS4_MAX_SLOT_TABLE);
|
|
|
|
|
|
|
|
dprintk("--> %s used_slots=%04lx highest_used=%d max_slots=%d\n",
|
|
|
|
__func__, tbl->used_slots[0], tbl->highest_used_slotid,
|
|
|
|
tbl->max_slots);
|
|
|
|
slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slots);
|
|
|
|
if (slotid >= tbl->max_slots)
|
|
|
|
goto out;
|
|
|
|
__set_bit(slotid, tbl->used_slots);
|
|
|
|
if (slotid > tbl->highest_used_slotid)
|
|
|
|
tbl->highest_used_slotid = slotid;
|
|
|
|
ret_id = slotid;
|
|
|
|
out:
|
|
|
|
dprintk("<-- %s used_slots=%04lx highest_used=%d slotid=%d \n",
|
|
|
|
__func__, tbl->used_slots[0], tbl->highest_used_slotid, ret_id);
|
|
|
|
return ret_id;
|
|
|
|
}
|
|
|
|
|
2011-03-01 09:34:19 +08:00
|
|
|
int nfs41_setup_sequence(struct nfs4_session *session,
|
2009-04-01 21:22:13 +08:00
|
|
|
struct nfs4_sequence_args *args,
|
|
|
|
struct nfs4_sequence_res *res,
|
|
|
|
int cache_reply,
|
|
|
|
struct rpc_task *task)
|
|
|
|
{
|
2009-04-01 21:22:15 +08:00
|
|
|
struct nfs4_slot *slot;
|
|
|
|
struct nfs4_slot_table *tbl;
|
|
|
|
u8 slotid;
|
|
|
|
|
|
|
|
dprintk("--> %s\n", __func__);
|
2009-04-01 21:22:13 +08:00
|
|
|
/* slot already allocated? */
|
2010-09-24 21:17:01 +08:00
|
|
|
if (res->sr_slot != NULL)
|
2009-04-01 21:22:13 +08:00
|
|
|
return 0;
|
|
|
|
|
2009-04-01 21:22:15 +08:00
|
|
|
tbl = &session->fc_slot_table;
|
|
|
|
|
|
|
|
spin_lock(&tbl->slot_tbl_lock);
|
2010-06-16 21:52:26 +08:00
|
|
|
if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) &&
|
2009-12-15 13:27:58 +08:00
|
|
|
!rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
|
2009-12-05 04:55:38 +08:00
|
|
|
/*
|
|
|
|
* The state manager will wait until the slot table is empty.
|
|
|
|
* Schedule the reset thread
|
|
|
|
*/
|
2009-12-05 04:55:37 +08:00
|
|
|
rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
|
2009-04-01 21:22:43 +08:00
|
|
|
spin_unlock(&tbl->slot_tbl_lock);
|
2009-12-06 08:32:19 +08:00
|
|
|
dprintk("%s Schedule Session Reset\n", __func__);
|
2009-12-05 04:55:37 +08:00
|
|
|
return -EAGAIN;
|
2009-04-01 21:22:43 +08:00
|
|
|
}
|
|
|
|
|
2009-12-15 13:27:56 +08:00
|
|
|
if (!rpc_queue_empty(&tbl->slot_tbl_waitq) &&
|
|
|
|
!rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
|
|
|
|
rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
|
|
|
|
spin_unlock(&tbl->slot_tbl_lock);
|
|
|
|
dprintk("%s enforce FIFO order\n", __func__);
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
|
2009-12-15 13:27:54 +08:00
|
|
|
slotid = nfs4_find_slot(tbl);
|
2009-04-01 21:22:15 +08:00
|
|
|
if (slotid == NFS4_MAX_SLOT_TABLE) {
|
|
|
|
rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
|
|
|
|
spin_unlock(&tbl->slot_tbl_lock);
|
|
|
|
dprintk("<-- %s: no free slots\n", __func__);
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
spin_unlock(&tbl->slot_tbl_lock);
|
|
|
|
|
2009-12-15 13:27:56 +08:00
|
|
|
rpc_task_set_priority(task, RPC_PRIORITY_NORMAL);
|
2009-04-01 21:22:15 +08:00
|
|
|
slot = tbl->slots + slotid;
|
2009-04-01 21:22:29 +08:00
|
|
|
args->sa_session = session;
|
2009-04-01 21:22:15 +08:00
|
|
|
args->sa_slotid = slotid;
|
|
|
|
args->sa_cache_this = cache_reply;
|
|
|
|
|
|
|
|
dprintk("<-- %s slotid=%d seqid=%d\n", __func__, slotid, slot->seq_nr);
|
|
|
|
|
2009-04-01 21:22:29 +08:00
|
|
|
res->sr_session = session;
|
2010-09-24 21:17:01 +08:00
|
|
|
res->sr_slot = slot;
|
2009-04-01 21:22:15 +08:00
|
|
|
res->sr_renewal_time = jiffies;
|
2010-06-16 21:52:25 +08:00
|
|
|
res->sr_status_flags = 0;
|
2009-04-01 21:22:15 +08:00
|
|
|
/*
|
|
|
|
* sr_status is only set in decode_sequence, and so will remain
|
|
|
|
* set to 1 if an rpc level failure occurs.
|
|
|
|
*/
|
|
|
|
res->sr_status = 1;
|
2009-04-01 21:22:13 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2011-03-01 09:34:19 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs41_setup_sequence);
|
2009-04-01 21:22:13 +08:00
|
|
|
|
2010-06-16 21:52:26 +08:00
|
|
|
int nfs4_setup_sequence(const struct nfs_server *server,
|
2009-04-01 21:22:13 +08:00
|
|
|
struct nfs4_sequence_args *args,
|
|
|
|
struct nfs4_sequence_res *res,
|
|
|
|
int cache_reply,
|
|
|
|
struct rpc_task *task)
|
|
|
|
{
|
2010-06-16 21:52:26 +08:00
|
|
|
struct nfs4_session *session = nfs4_get_session(server);
|
2009-04-01 21:22:13 +08:00
|
|
|
int ret = 0;
|
|
|
|
|
2010-06-16 21:52:26 +08:00
|
|
|
if (session == NULL) {
|
|
|
|
args->sa_session = NULL;
|
|
|
|
res->sr_session = NULL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2010-10-29 02:06:19 +08:00
|
|
|
dprintk("--> %s clp %p session %p sr_slot %td\n",
|
2010-09-24 21:17:01 +08:00
|
|
|
__func__, session->clp, session, res->sr_slot ?
|
|
|
|
res->sr_slot - session->fc_slot_table.slots : -1);
|
2009-04-01 21:22:13 +08:00
|
|
|
|
2010-06-16 21:52:26 +08:00
|
|
|
ret = nfs41_setup_sequence(session, args, res, cache_reply,
|
2009-04-01 21:22:13 +08:00
|
|
|
task);
|
|
|
|
out:
|
|
|
|
dprintk("<-- %s status=%d\n", __func__, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct nfs41_call_sync_data {
|
2010-06-16 21:52:26 +08:00
|
|
|
const struct nfs_server *seq_server;
|
2009-04-01 21:22:13 +08:00
|
|
|
struct nfs4_sequence_args *seq_args;
|
|
|
|
struct nfs4_sequence_res *seq_res;
|
|
|
|
int cache_reply;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs41_call_sync_data *data = calldata;
|
|
|
|
|
2010-06-16 21:52:26 +08:00
|
|
|
dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
|
|
|
|
|
|
|
|
if (nfs4_setup_sequence(data->seq_server, data->seq_args,
|
2009-04-01 21:22:13 +08:00
|
|
|
data->seq_res, data->cache_reply, task))
|
|
|
|
return;
|
|
|
|
rpc_call_start(task);
|
|
|
|
}
|
|
|
|
|
2009-12-15 13:27:57 +08:00
|
|
|
static void nfs41_call_priv_sync_prepare(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
|
|
|
rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
|
|
|
|
nfs41_call_sync_prepare(task, calldata);
|
|
|
|
}
|
|
|
|
|
2009-04-01 21:22:19 +08:00
|
|
|
static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs41_call_sync_data *data = calldata;
|
|
|
|
|
2010-08-01 02:29:06 +08:00
|
|
|
nfs41_sequence_done(task, data->seq_res);
|
2009-04-01 21:22:19 +08:00
|
|
|
}
|
|
|
|
|
2009-04-01 21:22:13 +08:00
|
|
|
struct rpc_call_ops nfs41_call_sync_ops = {
|
|
|
|
.rpc_call_prepare = nfs41_call_sync_prepare,
|
2009-04-01 21:22:19 +08:00
|
|
|
.rpc_call_done = nfs41_call_sync_done,
|
2009-04-01 21:22:13 +08:00
|
|
|
};
|
|
|
|
|
2009-12-15 13:27:57 +08:00
|
|
|
struct rpc_call_ops nfs41_call_priv_sync_ops = {
|
|
|
|
.rpc_call_prepare = nfs41_call_priv_sync_prepare,
|
|
|
|
.rpc_call_done = nfs41_call_sync_done,
|
|
|
|
};
|
|
|
|
|
2011-03-25 01:12:24 +08:00
|
|
|
static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
|
|
|
|
struct nfs_server *server,
|
2009-04-01 21:22:13 +08:00
|
|
|
struct rpc_message *msg,
|
|
|
|
struct nfs4_sequence_args *args,
|
|
|
|
struct nfs4_sequence_res *res,
|
2009-12-15 13:27:57 +08:00
|
|
|
int cache_reply,
|
|
|
|
int privileged)
|
2009-04-01 21:22:13 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct rpc_task *task;
|
|
|
|
struct nfs41_call_sync_data data = {
|
2010-06-16 21:52:26 +08:00
|
|
|
.seq_server = server,
|
2009-04-01 21:22:13 +08:00
|
|
|
.seq_args = args,
|
|
|
|
.seq_res = res,
|
|
|
|
.cache_reply = cache_reply,
|
|
|
|
};
|
|
|
|
struct rpc_task_setup task_setup = {
|
2011-03-25 01:12:24 +08:00
|
|
|
.rpc_client = clnt,
|
2009-04-01 21:22:13 +08:00
|
|
|
.rpc_message = msg,
|
|
|
|
.callback_ops = &nfs41_call_sync_ops,
|
|
|
|
.callback_data = &data
|
|
|
|
};
|
|
|
|
|
2010-09-24 21:17:01 +08:00
|
|
|
res->sr_slot = NULL;
|
2009-12-15 13:27:57 +08:00
|
|
|
if (privileged)
|
|
|
|
task_setup.callback_ops = &nfs41_call_priv_sync_ops;
|
2009-04-01 21:22:13 +08:00
|
|
|
task = rpc_run_task(&task_setup);
|
|
|
|
if (IS_ERR(task))
|
|
|
|
ret = PTR_ERR(task);
|
|
|
|
else {
|
|
|
|
ret = task->tk_status;
|
|
|
|
rpc_put_task(task);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-03-25 01:12:24 +08:00
|
|
|
int _nfs4_call_sync_session(struct rpc_clnt *clnt,
|
|
|
|
struct nfs_server *server,
|
2009-04-01 21:22:03 +08:00
|
|
|
struct rpc_message *msg,
|
|
|
|
struct nfs4_sequence_args *args,
|
|
|
|
struct nfs4_sequence_res *res,
|
|
|
|
int cache_reply)
|
|
|
|
{
|
2011-03-25 01:12:24 +08:00
|
|
|
return nfs4_call_sync_sequence(clnt, server, msg, args, res, cache_reply, 0);
|
2009-04-01 21:22:03 +08:00
|
|
|
}
|
|
|
|
|
2010-06-16 21:52:26 +08:00
|
|
|
#else
|
2010-08-01 02:29:06 +08:00
|
|
|
static int nfs4_sequence_done(struct rpc_task *task,
|
|
|
|
struct nfs4_sequence_res *res)
|
2010-06-16 21:52:26 +08:00
|
|
|
{
|
2010-08-01 02:29:06 +08:00
|
|
|
return 1;
|
2010-06-16 21:52:26 +08:00
|
|
|
}
|
2009-04-01 21:22:03 +08:00
|
|
|
#endif /* CONFIG_NFS_V4_1 */
|
|
|
|
|
2011-03-25 01:12:24 +08:00
|
|
|
int _nfs4_call_sync(struct rpc_clnt *clnt,
|
|
|
|
struct nfs_server *server,
|
2009-04-01 21:22:03 +08:00
|
|
|
struct rpc_message *msg,
|
|
|
|
struct nfs4_sequence_args *args,
|
|
|
|
struct nfs4_sequence_res *res,
|
|
|
|
int cache_reply)
|
|
|
|
{
|
2009-04-01 21:22:04 +08:00
|
|
|
args->sa_session = res->sr_session = NULL;
|
2011-03-25 01:12:24 +08:00
|
|
|
return rpc_call_sync(clnt, msg, 0);
|
2009-04-01 21:22:03 +08:00
|
|
|
}
|
|
|
|
|
2011-03-25 01:12:23 +08:00
|
|
|
static inline
|
2011-03-25 01:12:24 +08:00
|
|
|
int nfs4_call_sync(struct rpc_clnt *clnt,
|
|
|
|
struct nfs_server *server,
|
2011-03-25 01:12:23 +08:00
|
|
|
struct rpc_message *msg,
|
|
|
|
struct nfs4_sequence_args *args,
|
|
|
|
struct nfs4_sequence_res *res,
|
|
|
|
int cache_reply)
|
|
|
|
{
|
2011-03-25 01:12:24 +08:00
|
|
|
return server->nfs_client->cl_mvops->call_sync(clnt, server, msg,
|
|
|
|
args, res, cache_reply);
|
2011-03-25 01:12:23 +08:00
|
|
|
}
|
2009-04-01 21:22:03 +08:00
|
|
|
|
2006-05-25 13:40:57 +08:00
|
|
|
static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-05-25 13:40:57 +08:00
|
|
|
struct nfs_inode *nfsi = NFS_I(dir);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-05-25 13:40:57 +08:00
|
|
|
spin_lock(&dir->i_lock);
|
2007-10-08 21:24:22 +08:00
|
|
|
nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA;
|
|
|
|
if (!cinfo->atomic || cinfo->before != nfsi->change_attr)
|
2007-10-16 06:18:29 +08:00
|
|
|
nfs_force_lookup_revalidate(dir);
|
2007-10-08 21:24:22 +08:00
|
|
|
nfsi->change_attr = cinfo->after;
|
2006-05-25 13:40:57 +08:00
|
|
|
spin_unlock(&dir->i_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2006-01-03 16:55:08 +08:00
|
|
|
struct nfs4_opendata {
|
2007-06-18 04:02:44 +08:00
|
|
|
struct kref kref;
|
2006-01-03 16:55:08 +08:00
|
|
|
struct nfs_openargs o_arg;
|
|
|
|
struct nfs_openres o_res;
|
2006-01-03 16:55:12 +08:00
|
|
|
struct nfs_open_confirmargs c_arg;
|
|
|
|
struct nfs_open_confirmres c_res;
|
2006-01-03 16:55:08 +08:00
|
|
|
struct nfs_fattr f_attr;
|
|
|
|
struct nfs_fattr dir_attr;
|
2007-06-06 00:30:00 +08:00
|
|
|
struct path path;
|
2006-01-03 16:55:08 +08:00
|
|
|
struct dentry *dir;
|
|
|
|
struct nfs4_state_owner *owner;
|
2007-07-06 07:02:21 +08:00
|
|
|
struct nfs4_state *state;
|
2006-01-03 16:55:08 +08:00
|
|
|
struct iattr attrs;
|
2006-01-03 16:55:21 +08:00
|
|
|
unsigned long timestamp;
|
2007-07-08 01:19:59 +08:00
|
|
|
unsigned int rpc_done : 1;
|
2006-01-03 16:55:11 +08:00
|
|
|
int rpc_status;
|
|
|
|
int cancelled;
|
2006-01-03 16:55:08 +08:00
|
|
|
};
|
|
|
|
|
2007-07-04 11:48:13 +08:00
|
|
|
|
|
|
|
static void nfs4_init_opendata_res(struct nfs4_opendata *p)
|
|
|
|
{
|
|
|
|
p->o_res.f_attr = &p->f_attr;
|
|
|
|
p->o_res.dir_attr = &p->dir_attr;
|
2008-04-08 01:20:54 +08:00
|
|
|
p->o_res.seqid = p->o_arg.seqid;
|
|
|
|
p->c_res.seqid = p->c_arg.seqid;
|
2007-07-04 11:48:13 +08:00
|
|
|
p->o_res.server = p->o_arg.server;
|
|
|
|
nfs_fattr_init(&p->f_attr);
|
|
|
|
nfs_fattr_init(&p->dir_attr);
|
|
|
|
}
|
|
|
|
|
2007-06-06 00:30:00 +08:00
|
|
|
static struct nfs4_opendata *nfs4_opendata_alloc(struct path *path,
|
2008-12-24 04:21:56 +08:00
|
|
|
struct nfs4_state_owner *sp, fmode_t fmode, int flags,
|
2010-05-14 00:51:01 +08:00
|
|
|
const struct iattr *attrs,
|
|
|
|
gfp_t gfp_mask)
|
2006-01-03 16:55:08 +08:00
|
|
|
{
|
2007-06-06 00:30:00 +08:00
|
|
|
struct dentry *parent = dget_parent(path->dentry);
|
2006-01-03 16:55:08 +08:00
|
|
|
struct inode *dir = parent->d_inode;
|
|
|
|
struct nfs_server *server = NFS_SERVER(dir);
|
|
|
|
struct nfs4_opendata *p;
|
|
|
|
|
2010-05-14 00:51:01 +08:00
|
|
|
p = kzalloc(sizeof(*p), gfp_mask);
|
2006-01-03 16:55:08 +08:00
|
|
|
if (p == NULL)
|
|
|
|
goto err;
|
2010-05-14 00:51:01 +08:00
|
|
|
p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask);
|
2006-01-03 16:55:08 +08:00
|
|
|
if (p->o_arg.seqid == NULL)
|
|
|
|
goto err_free;
|
2010-01-31 02:51:04 +08:00
|
|
|
path_get(path);
|
|
|
|
p->path = *path;
|
2006-01-03 16:55:08 +08:00
|
|
|
p->dir = parent;
|
|
|
|
p->owner = sp;
|
|
|
|
atomic_inc(&sp->so_count);
|
|
|
|
p->o_arg.fh = NFS_FH(dir);
|
2008-12-24 04:21:56 +08:00
|
|
|
p->o_arg.open_flags = flags;
|
|
|
|
p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
|
2006-08-23 08:06:09 +08:00
|
|
|
p->o_arg.clientid = server->nfs_client->cl_clientid;
|
2007-07-03 01:58:33 +08:00
|
|
|
p->o_arg.id = sp->so_owner_id.id;
|
2007-06-06 00:30:00 +08:00
|
|
|
p->o_arg.name = &p->path.dentry->d_name;
|
2006-01-03 16:55:08 +08:00
|
|
|
p->o_arg.server = server;
|
|
|
|
p->o_arg.bitmask = server->attr_bitmask;
|
|
|
|
p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
|
2010-06-16 21:52:27 +08:00
|
|
|
if (flags & O_CREAT) {
|
|
|
|
u32 *s;
|
|
|
|
|
2006-01-03 16:55:08 +08:00
|
|
|
p->o_arg.u.attrs = &p->attrs;
|
|
|
|
memcpy(&p->attrs, attrs, sizeof(p->attrs));
|
2010-06-16 21:52:27 +08:00
|
|
|
s = (u32 *) p->o_arg.u.verifier.data;
|
|
|
|
s[0] = jiffies;
|
|
|
|
s[1] = current->pid;
|
2006-01-03 16:55:08 +08:00
|
|
|
}
|
2006-01-03 16:55:12 +08:00
|
|
|
p->c_arg.fh = &p->o_res.fh;
|
|
|
|
p->c_arg.stateid = &p->o_res.stateid;
|
|
|
|
p->c_arg.seqid = p->o_arg.seqid;
|
2007-07-04 11:48:13 +08:00
|
|
|
nfs4_init_opendata_res(p);
|
2007-06-18 04:02:44 +08:00
|
|
|
kref_init(&p->kref);
|
2006-01-03 16:55:08 +08:00
|
|
|
return p;
|
|
|
|
err_free:
|
|
|
|
kfree(p);
|
|
|
|
err:
|
|
|
|
dput(parent);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2007-06-18 04:02:44 +08:00
|
|
|
static void nfs4_opendata_free(struct kref *kref)
|
2006-01-03 16:55:08 +08:00
|
|
|
{
|
2007-06-18 04:02:44 +08:00
|
|
|
struct nfs4_opendata *p = container_of(kref,
|
|
|
|
struct nfs4_opendata, kref);
|
|
|
|
|
|
|
|
nfs_free_seqid(p->o_arg.seqid);
|
2007-07-06 07:02:21 +08:00
|
|
|
if (p->state != NULL)
|
|
|
|
nfs4_put_open_state(p->state);
|
2007-06-18 04:02:44 +08:00
|
|
|
nfs4_put_state_owner(p->owner);
|
|
|
|
dput(p->dir);
|
2008-05-03 04:42:45 +08:00
|
|
|
path_put(&p->path);
|
2007-06-18 04:02:44 +08:00
|
|
|
kfree(p);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs4_opendata_put(struct nfs4_opendata *p)
|
|
|
|
{
|
|
|
|
if (p != NULL)
|
|
|
|
kref_put(&p->kref, nfs4_opendata_free);
|
2006-01-03 16:55:08 +08:00
|
|
|
}
|
|
|
|
|
2006-01-03 16:55:07 +08:00
|
|
|
static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = rpc_wait_for_completion_task(task);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-12-24 04:21:56 +08:00
|
|
|
static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
|
2007-07-09 02:11:36 +08:00
|
|
|
{
|
|
|
|
int ret = 0;
|
2008-12-24 04:21:56 +08:00
|
|
|
|
|
|
|
if (open_mode & O_EXCL)
|
|
|
|
goto out;
|
|
|
|
switch (mode & (FMODE_READ|FMODE_WRITE)) {
|
2007-07-09 02:11:36 +08:00
|
|
|
case FMODE_READ:
|
2009-12-08 21:33:16 +08:00
|
|
|
ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
|
|
|
|
&& state->n_rdonly != 0;
|
2007-07-09 02:11:36 +08:00
|
|
|
break;
|
|
|
|
case FMODE_WRITE:
|
2009-12-08 21:33:16 +08:00
|
|
|
ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
|
|
|
|
&& state->n_wronly != 0;
|
2007-07-09 02:11:36 +08:00
|
|
|
break;
|
|
|
|
case FMODE_READ|FMODE_WRITE:
|
2009-12-08 21:33:16 +08:00
|
|
|
ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
|
|
|
|
&& state->n_rdwr != 0;
|
2007-07-09 02:11:36 +08:00
|
|
|
}
|
2008-12-24 04:21:56 +08:00
|
|
|
out:
|
2007-07-09 02:11:36 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-12-24 04:21:56 +08:00
|
|
|
static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
|
2007-07-06 07:02:21 +08:00
|
|
|
{
|
2008-12-24 04:21:56 +08:00
|
|
|
if ((delegation->type & fmode) != fmode)
|
2007-07-06 07:02:21 +08:00
|
|
|
return 0;
|
2008-12-24 04:21:39 +08:00
|
|
|
if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
|
2007-07-06 07:02:21 +08:00
|
|
|
return 0;
|
2008-12-24 04:21:52 +08:00
|
|
|
nfs_mark_delegation_referenced(delegation);
|
2007-07-06 07:02:21 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2008-12-24 04:21:56 +08:00
|
|
|
static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
|
2006-01-03 16:55:13 +08:00
|
|
|
{
|
2008-12-24 04:21:56 +08:00
|
|
|
switch (fmode) {
|
2006-01-03 16:55:13 +08:00
|
|
|
case FMODE_WRITE:
|
|
|
|
state->n_wronly++;
|
|
|
|
break;
|
|
|
|
case FMODE_READ:
|
|
|
|
state->n_rdonly++;
|
|
|
|
break;
|
|
|
|
case FMODE_READ|FMODE_WRITE:
|
|
|
|
state->n_rdwr++;
|
|
|
|
}
|
2008-12-24 04:21:56 +08:00
|
|
|
nfs4_state_set_mode_locked(state, state->state | fmode);
|
2007-07-06 06:07:55 +08:00
|
|
|
}
|
|
|
|
|
2008-12-24 04:21:56 +08:00
|
|
|
static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
|
2007-07-06 06:07:55 +08:00
|
|
|
{
|
|
|
|
if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
|
|
|
|
memcpy(state->stateid.data, stateid->data, sizeof(state->stateid.data));
|
|
|
|
memcpy(state->open_stateid.data, stateid->data, sizeof(state->open_stateid.data));
|
2008-12-24 04:21:56 +08:00
|
|
|
switch (fmode) {
|
2007-07-06 06:07:55 +08:00
|
|
|
case FMODE_READ:
|
|
|
|
set_bit(NFS_O_RDONLY_STATE, &state->flags);
|
|
|
|
break;
|
|
|
|
case FMODE_WRITE:
|
|
|
|
set_bit(NFS_O_WRONLY_STATE, &state->flags);
|
|
|
|
break;
|
|
|
|
case FMODE_READ|FMODE_WRITE:
|
|
|
|
set_bit(NFS_O_RDWR_STATE, &state->flags);
|
|
|
|
}
|
2006-01-03 16:55:13 +08:00
|
|
|
}
|
|
|
|
|
2008-12-24 04:21:56 +08:00
|
|
|
static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
|
2007-07-06 06:07:55 +08:00
|
|
|
{
|
2007-07-09 22:45:42 +08:00
|
|
|
write_seqlock(&state->seqlock);
|
2008-12-24 04:21:56 +08:00
|
|
|
nfs_set_open_stateid_locked(state, stateid, fmode);
|
2007-07-09 22:45:42 +08:00
|
|
|
write_sequnlock(&state->seqlock);
|
2007-07-06 06:07:55 +08:00
|
|
|
}
|
|
|
|
|
2008-12-24 04:21:56 +08:00
|
|
|
static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2007-07-09 22:45:42 +08:00
|
|
|
/*
|
|
|
|
* Protect the call to nfs4_state_set_mode_locked and
|
|
|
|
* serialise the stateid update
|
|
|
|
*/
|
|
|
|
write_seqlock(&state->seqlock);
|
2007-07-06 06:07:55 +08:00
|
|
|
if (deleg_stateid != NULL) {
|
|
|
|
memcpy(state->stateid.data, deleg_stateid->data, sizeof(state->stateid.data));
|
|
|
|
set_bit(NFS_DELEGATED_STATE, &state->flags);
|
|
|
|
}
|
|
|
|
if (open_stateid != NULL)
|
2008-12-24 04:21:56 +08:00
|
|
|
nfs_set_open_stateid_locked(state, open_stateid, fmode);
|
2007-07-09 22:45:42 +08:00
|
|
|
write_sequnlock(&state->seqlock);
|
|
|
|
spin_lock(&state->owner->so_lock);
|
2008-12-24 04:21:56 +08:00
|
|
|
update_open_stateflags(state, fmode);
|
2005-10-21 05:22:47 +08:00
|
|
|
spin_unlock(&state->owner->so_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2008-12-24 04:21:56 +08:00
|
|
|
static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode)
|
2008-12-24 04:21:38 +08:00
|
|
|
{
|
|
|
|
struct nfs_inode *nfsi = NFS_I(state->inode);
|
|
|
|
struct nfs_delegation *deleg_cur;
|
|
|
|
int ret = 0;
|
|
|
|
|
2008-12-24 04:21:56 +08:00
|
|
|
fmode &= (FMODE_READ|FMODE_WRITE);
|
2008-12-24 04:21:38 +08:00
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
deleg_cur = rcu_dereference(nfsi->delegation);
|
|
|
|
if (deleg_cur == NULL)
|
|
|
|
goto no_delegation;
|
|
|
|
|
|
|
|
spin_lock(&deleg_cur->lock);
|
|
|
|
if (nfsi->delegation != deleg_cur ||
|
2008-12-24 04:21:56 +08:00
|
|
|
(deleg_cur->type & fmode) != fmode)
|
2008-12-24 04:21:38 +08:00
|
|
|
goto no_delegation_unlock;
|
|
|
|
|
|
|
|
if (delegation == NULL)
|
|
|
|
delegation = &deleg_cur->stateid;
|
|
|
|
else if (memcmp(deleg_cur->stateid.data, delegation->data, NFS4_STATEID_SIZE) != 0)
|
|
|
|
goto no_delegation_unlock;
|
|
|
|
|
2008-12-24 04:21:52 +08:00
|
|
|
nfs_mark_delegation_referenced(deleg_cur);
|
2008-12-24 04:21:56 +08:00
|
|
|
__update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode);
|
2008-12-24 04:21:38 +08:00
|
|
|
ret = 1;
|
|
|
|
no_delegation_unlock:
|
|
|
|
spin_unlock(&deleg_cur->lock);
|
|
|
|
no_delegation:
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
if (!ret && open_stateid != NULL) {
|
2008-12-24 04:21:56 +08:00
|
|
|
__update_open_stateid(state, open_stateid, NULL, fmode);
|
2008-12-24 04:21:38 +08:00
|
|
|
ret = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-12-24 04:21:56 +08:00
|
|
|
static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
|
2007-07-06 07:02:21 +08:00
|
|
|
{
|
|
|
|
struct nfs_delegation *delegation;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
delegation = rcu_dereference(NFS_I(inode)->delegation);
|
2008-12-24 04:21:56 +08:00
|
|
|
if (delegation == NULL || (delegation->type & fmode) == fmode) {
|
2007-07-06 07:02:21 +08:00
|
|
|
rcu_read_unlock();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
nfs_inode_return_delegation(inode);
|
|
|
|
}
|
|
|
|
|
2007-07-09 02:11:36 +08:00
|
|
|
static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
|
2007-07-06 07:02:21 +08:00
|
|
|
{
|
|
|
|
struct nfs4_state *state = opendata->state;
|
|
|
|
struct nfs_inode *nfsi = NFS_I(state->inode);
|
|
|
|
struct nfs_delegation *delegation;
|
2008-12-24 04:21:56 +08:00
|
|
|
int open_mode = opendata->o_arg.open_flags & O_EXCL;
|
|
|
|
fmode_t fmode = opendata->o_arg.fmode;
|
2007-07-06 07:02:21 +08:00
|
|
|
nfs4_stateid stateid;
|
|
|
|
int ret = -EAGAIN;
|
|
|
|
|
|
|
|
for (;;) {
|
2008-12-24 04:21:56 +08:00
|
|
|
if (can_open_cached(state, fmode, open_mode)) {
|
2007-07-09 02:11:36 +08:00
|
|
|
spin_lock(&state->owner->so_lock);
|
2008-12-24 04:21:56 +08:00
|
|
|
if (can_open_cached(state, fmode, open_mode)) {
|
|
|
|
update_open_stateflags(state, fmode);
|
2007-07-09 02:11:36 +08:00
|
|
|
spin_unlock(&state->owner->so_lock);
|
|
|
|
goto out_return_state;
|
|
|
|
}
|
|
|
|
spin_unlock(&state->owner->so_lock);
|
|
|
|
}
|
2008-12-24 04:21:38 +08:00
|
|
|
rcu_read_lock();
|
|
|
|
delegation = rcu_dereference(nfsi->delegation);
|
|
|
|
if (delegation == NULL ||
|
2008-12-24 04:21:56 +08:00
|
|
|
!can_open_delegated(delegation, fmode)) {
|
2008-12-24 04:21:38 +08:00
|
|
|
rcu_read_unlock();
|
2007-07-06 07:02:21 +08:00
|
|
|
break;
|
2008-12-24 04:21:38 +08:00
|
|
|
}
|
2007-07-06 07:02:21 +08:00
|
|
|
/* Save the delegation */
|
|
|
|
memcpy(stateid.data, delegation->stateid.data, sizeof(stateid.data));
|
|
|
|
rcu_read_unlock();
|
2007-08-11 05:45:10 +08:00
|
|
|
ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
|
2007-07-06 07:02:21 +08:00
|
|
|
if (ret != 0)
|
|
|
|
goto out;
|
|
|
|
ret = -EAGAIN;
|
2008-12-24 04:21:38 +08:00
|
|
|
|
|
|
|
/* Try to update the stateid using the delegation */
|
2008-12-24 04:21:56 +08:00
|
|
|
if (update_open_stateid(state, NULL, &stateid, fmode))
|
2008-12-24 04:21:38 +08:00
|
|
|
goto out_return_state;
|
2007-07-06 07:02:21 +08:00
|
|
|
}
|
|
|
|
out:
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
out_return_state:
|
|
|
|
atomic_inc(&state->count);
|
|
|
|
return state;
|
|
|
|
}
|
|
|
|
|
2006-01-03 16:55:11 +08:00
|
|
|
static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
|
|
|
|
{
|
|
|
|
struct inode *inode;
|
|
|
|
struct nfs4_state *state = NULL;
|
2007-07-06 06:07:55 +08:00
|
|
|
struct nfs_delegation *delegation;
|
2007-07-07 20:04:47 +08:00
|
|
|
int ret;
|
2006-01-03 16:55:11 +08:00
|
|
|
|
2007-07-06 07:02:21 +08:00
|
|
|
if (!data->rpc_done) {
|
2007-07-09 02:11:36 +08:00
|
|
|
state = nfs4_try_open_cached(data);
|
2007-07-06 07:02:21 +08:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2007-07-07 20:04:47 +08:00
|
|
|
ret = -EAGAIN;
|
2006-01-03 16:55:11 +08:00
|
|
|
if (!(data->f_attr.valid & NFS_ATTR_FATTR))
|
2007-07-07 20:04:47 +08:00
|
|
|
goto err;
|
2006-01-03 16:55:11 +08:00
|
|
|
inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr);
|
2007-07-07 20:04:47 +08:00
|
|
|
ret = PTR_ERR(inode);
|
2006-03-21 02:44:48 +08:00
|
|
|
if (IS_ERR(inode))
|
2007-07-07 20:04:47 +08:00
|
|
|
goto err;
|
|
|
|
ret = -ENOMEM;
|
2006-01-03 16:55:11 +08:00
|
|
|
state = nfs4_get_open_state(inode, data->owner);
|
|
|
|
if (state == NULL)
|
2007-07-07 20:04:47 +08:00
|
|
|
goto err_put_inode;
|
2007-07-04 04:42:45 +08:00
|
|
|
if (data->o_res.delegation_type != 0) {
|
|
|
|
int delegation_flags = 0;
|
|
|
|
|
2007-07-06 06:07:55 +08:00
|
|
|
rcu_read_lock();
|
|
|
|
delegation = rcu_dereference(NFS_I(inode)->delegation);
|
|
|
|
if (delegation)
|
|
|
|
delegation_flags = delegation->flags;
|
|
|
|
rcu_read_unlock();
|
2008-12-24 04:21:39 +08:00
|
|
|
if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
|
2007-07-04 04:42:45 +08:00
|
|
|
nfs_inode_set_delegation(state->inode,
|
|
|
|
data->owner->so_cred,
|
|
|
|
&data->o_res);
|
|
|
|
else
|
|
|
|
nfs_inode_reclaim_delegation(state->inode,
|
|
|
|
data->owner->so_cred,
|
|
|
|
&data->o_res);
|
|
|
|
}
|
2008-12-24 04:21:38 +08:00
|
|
|
|
|
|
|
update_open_stateid(state, &data->o_res.stateid, NULL,
|
2008-12-24 04:21:56 +08:00
|
|
|
data->o_arg.fmode);
|
2006-01-03 16:55:11 +08:00
|
|
|
iput(inode);
|
2007-07-06 07:02:21 +08:00
|
|
|
out:
|
2006-01-03 16:55:11 +08:00
|
|
|
return state;
|
2007-07-07 20:04:47 +08:00
|
|
|
err_put_inode:
|
|
|
|
iput(inode);
|
|
|
|
err:
|
|
|
|
return ERR_PTR(ret);
|
2006-01-03 16:55:11 +08:00
|
|
|
}
|
|
|
|
|
2006-01-03 16:55:15 +08:00
|
|
|
static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
|
|
|
|
{
|
|
|
|
struct nfs_inode *nfsi = NFS_I(state->inode);
|
|
|
|
struct nfs_open_context *ctx;
|
|
|
|
|
|
|
|
spin_lock(&state->inode->i_lock);
|
|
|
|
list_for_each_entry(ctx, &nfsi->open_files, list) {
|
|
|
|
if (ctx->state != state)
|
|
|
|
continue;
|
|
|
|
get_nfs_open_context(ctx);
|
|
|
|
spin_unlock(&state->inode->i_lock);
|
|
|
|
return ctx;
|
|
|
|
}
|
|
|
|
spin_unlock(&state->inode->i_lock);
|
|
|
|
return ERR_PTR(-ENOENT);
|
|
|
|
}
|
|
|
|
|
2007-07-18 09:50:45 +08:00
|
|
|
static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, struct nfs4_state *state)
|
|
|
|
{
|
|
|
|
struct nfs4_opendata *opendata;
|
|
|
|
|
2010-05-14 00:51:01 +08:00
|
|
|
opendata = nfs4_opendata_alloc(&ctx->path, state->owner, 0, 0, NULL, GFP_NOFS);
|
2007-07-18 09:50:45 +08:00
|
|
|
if (opendata == NULL)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
opendata->state = state;
|
|
|
|
atomic_inc(&state->count);
|
|
|
|
return opendata;
|
|
|
|
}
|
|
|
|
|
2008-12-24 04:21:56 +08:00
|
|
|
static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res)
|
2006-01-03 16:55:15 +08:00
|
|
|
{
|
2007-07-04 11:48:13 +08:00
|
|
|
struct nfs4_state *newstate;
|
2006-01-03 16:55:15 +08:00
|
|
|
int ret;
|
|
|
|
|
2008-12-24 04:21:56 +08:00
|
|
|
opendata->o_arg.open_flags = 0;
|
|
|
|
opendata->o_arg.fmode = fmode;
|
2007-07-04 11:48:13 +08:00
|
|
|
memset(&opendata->o_res, 0, sizeof(opendata->o_res));
|
|
|
|
memset(&opendata->c_res, 0, sizeof(opendata->c_res));
|
|
|
|
nfs4_init_opendata_res(opendata);
|
2009-12-15 13:27:57 +08:00
|
|
|
ret = _nfs4_recover_proc_open(opendata);
|
2006-01-03 16:55:15 +08:00
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
2007-07-04 11:48:13 +08:00
|
|
|
newstate = nfs4_opendata_to_nfs4_state(opendata);
|
2007-07-07 20:04:47 +08:00
|
|
|
if (IS_ERR(newstate))
|
|
|
|
return PTR_ERR(newstate);
|
2008-12-24 04:21:56 +08:00
|
|
|
nfs4_close_state(&opendata->path, newstate, fmode);
|
2007-07-04 11:48:13 +08:00
|
|
|
*res = newstate;
|
2006-01-03 16:55:15 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
|
|
|
|
{
|
|
|
|
struct nfs4_state *newstate;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* memory barrier prior to reading state->n_* */
|
2007-07-04 11:48:13 +08:00
|
|
|
clear_bit(NFS_DELEGATED_STATE, &state->flags);
|
2006-01-03 16:55:15 +08:00
|
|
|
smp_rmb();
|
|
|
|
if (state->n_rdwr != 0) {
|
2010-10-05 05:59:08 +08:00
|
|
|
clear_bit(NFS_O_RDWR_STATE, &state->flags);
|
2007-07-04 11:48:13 +08:00
|
|
|
ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate);
|
2006-01-03 16:55:15 +08:00
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
2007-07-04 11:48:13 +08:00
|
|
|
if (newstate != state)
|
|
|
|
return -ESTALE;
|
2006-01-03 16:55:15 +08:00
|
|
|
}
|
|
|
|
if (state->n_wronly != 0) {
|
2010-10-05 05:59:08 +08:00
|
|
|
clear_bit(NFS_O_WRONLY_STATE, &state->flags);
|
2007-07-04 11:48:13 +08:00
|
|
|
ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate);
|
2006-01-03 16:55:15 +08:00
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
2007-07-04 11:48:13 +08:00
|
|
|
if (newstate != state)
|
|
|
|
return -ESTALE;
|
2006-01-03 16:55:15 +08:00
|
|
|
}
|
|
|
|
if (state->n_rdonly != 0) {
|
2010-10-05 05:59:08 +08:00
|
|
|
clear_bit(NFS_O_RDONLY_STATE, &state->flags);
|
2007-07-04 11:48:13 +08:00
|
|
|
ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate);
|
2006-01-03 16:55:15 +08:00
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
2007-07-04 11:48:13 +08:00
|
|
|
if (newstate != state)
|
|
|
|
return -ESTALE;
|
2006-01-03 16:55:15 +08:00
|
|
|
}
|
2007-07-09 09:04:15 +08:00
|
|
|
/*
|
|
|
|
* We may have performed cached opens for all three recoveries.
|
|
|
|
* Check if we need to update the current stateid.
|
|
|
|
*/
|
|
|
|
if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
|
|
|
|
memcmp(state->stateid.data, state->open_stateid.data, sizeof(state->stateid.data)) != 0) {
|
2007-07-09 22:45:42 +08:00
|
|
|
write_seqlock(&state->seqlock);
|
2007-07-09 09:04:15 +08:00
|
|
|
if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
|
|
|
|
memcpy(state->stateid.data, state->open_stateid.data, sizeof(state->stateid.data));
|
2007-07-09 22:45:42 +08:00
|
|
|
write_sequnlock(&state->seqlock);
|
2007-07-09 09:04:15 +08:00
|
|
|
}
|
2006-01-03 16:55:15 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* OPEN_RECLAIM:
|
|
|
|
* reclaim state on the server after a reboot.
|
|
|
|
*/
|
2007-06-05 23:46:42 +08:00
|
|
|
static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2007-07-09 09:04:15 +08:00
|
|
|
struct nfs_delegation *delegation;
|
2006-01-03 16:55:15 +08:00
|
|
|
struct nfs4_opendata *opendata;
|
2008-12-24 04:21:56 +08:00
|
|
|
fmode_t delegation_type = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
int status;
|
|
|
|
|
2007-07-18 09:50:45 +08:00
|
|
|
opendata = nfs4_open_recoverdata_alloc(ctx, state);
|
|
|
|
if (IS_ERR(opendata))
|
|
|
|
return PTR_ERR(opendata);
|
2006-01-03 16:55:15 +08:00
|
|
|
opendata->o_arg.claim = NFS4_OPEN_CLAIM_PREVIOUS;
|
|
|
|
opendata->o_arg.fh = NFS_FH(state->inode);
|
2007-07-09 09:04:15 +08:00
|
|
|
rcu_read_lock();
|
|
|
|
delegation = rcu_dereference(NFS_I(state->inode)->delegation);
|
2008-12-24 04:21:39 +08:00
|
|
|
if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
|
2007-08-27 21:57:46 +08:00
|
|
|
delegation_type = delegation->type;
|
2007-07-09 09:04:15 +08:00
|
|
|
rcu_read_unlock();
|
2006-01-03 16:55:15 +08:00
|
|
|
opendata->o_arg.u.delegation_type = delegation_type;
|
|
|
|
status = nfs4_open_recover(opendata, state);
|
2007-06-18 04:02:44 +08:00
|
|
|
nfs4_opendata_put(opendata);
|
2005-04-17 06:20:36 +08:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2007-06-05 23:46:42 +08:00
|
|
|
static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct nfs_server *server = NFS_SERVER(state->inode);
|
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int err;
|
|
|
|
do {
|
2007-06-05 23:46:42 +08:00
|
|
|
err = _nfs4_do_open_reclaim(ctx, state);
|
2010-10-20 07:47:49 +08:00
|
|
|
if (err != -NFS4ERR_DELAY)
|
2005-06-23 01:16:29 +08:00
|
|
|
break;
|
|
|
|
nfs4_handle_exception(server, err, &exception);
|
2005-04-17 06:20:36 +08:00
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2006-01-03 16:55:15 +08:00
|
|
|
static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
|
|
|
|
{
|
|
|
|
struct nfs_open_context *ctx;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ctx = nfs4_state_find_open_context(state);
|
|
|
|
if (IS_ERR(ctx))
|
|
|
|
return PTR_ERR(ctx);
|
2007-06-05 23:46:42 +08:00
|
|
|
ret = nfs4_do_open_reclaim(ctx, state);
|
2006-01-03 16:55:15 +08:00
|
|
|
put_nfs_open_context(ctx);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2007-07-07 03:10:43 +08:00
|
|
|
static int _nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-01-03 16:55:08 +08:00
|
|
|
struct nfs4_opendata *opendata;
|
2006-01-03 16:55:15 +08:00
|
|
|
int ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-07-18 09:50:45 +08:00
|
|
|
opendata = nfs4_open_recoverdata_alloc(ctx, state);
|
|
|
|
if (IS_ERR(opendata))
|
|
|
|
return PTR_ERR(opendata);
|
2006-01-03 16:55:08 +08:00
|
|
|
opendata->o_arg.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR;
|
2007-07-07 03:10:43 +08:00
|
|
|
memcpy(opendata->o_arg.u.delegation.data, stateid->data,
|
2006-01-03 16:55:08 +08:00
|
|
|
sizeof(opendata->o_arg.u.delegation.data));
|
2006-01-03 16:55:15 +08:00
|
|
|
ret = nfs4_open_recover(opendata, state);
|
2007-06-18 04:02:44 +08:00
|
|
|
nfs4_opendata_put(opendata);
|
2006-01-03 16:55:15 +08:00
|
|
|
return ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2007-07-07 03:10:43 +08:00
|
|
|
int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct nfs4_exception exception = { };
|
2007-06-05 23:46:42 +08:00
|
|
|
struct nfs_server *server = NFS_SERVER(state->inode);
|
2005-04-17 06:20:36 +08:00
|
|
|
int err;
|
|
|
|
do {
|
2007-07-07 03:10:43 +08:00
|
|
|
err = _nfs4_open_delegation_recall(ctx, state, stateid);
|
2005-04-17 06:20:36 +08:00
|
|
|
switch (err) {
|
|
|
|
case 0:
|
2009-06-18 04:22:59 +08:00
|
|
|
case -ENOENT:
|
|
|
|
case -ESTALE:
|
|
|
|
goto out;
|
2009-12-07 22:22:29 +08:00
|
|
|
case -NFS4ERR_BADSESSION:
|
|
|
|
case -NFS4ERR_BADSLOT:
|
|
|
|
case -NFS4ERR_BAD_HIGH_SLOT:
|
|
|
|
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
|
|
|
|
case -NFS4ERR_DEADSESSION:
|
2011-03-10 05:00:53 +08:00
|
|
|
nfs4_schedule_session_recovery(server->nfs_client->cl_session);
|
2009-12-07 22:22:29 +08:00
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
case -NFS4ERR_STALE_CLIENTID:
|
|
|
|
case -NFS4ERR_STALE_STATEID:
|
|
|
|
case -NFS4ERR_EXPIRED:
|
|
|
|
/* Don't recall a delegation if it was lost */
|
2011-03-10 05:00:53 +08:00
|
|
|
nfs4_schedule_lease_recovery(server->nfs_client);
|
2009-06-18 04:22:59 +08:00
|
|
|
goto out;
|
|
|
|
case -ERESTARTSYS:
|
|
|
|
/*
|
|
|
|
* The show must go on: exit, but mark the
|
|
|
|
* stateid as needing recovery.
|
|
|
|
*/
|
|
|
|
case -NFS4ERR_ADMIN_REVOKED:
|
|
|
|
case -NFS4ERR_BAD_STATEID:
|
2011-03-10 05:00:53 +08:00
|
|
|
nfs4_schedule_stateid_recovery(server, state);
|
2010-10-20 07:47:49 +08:00
|
|
|
case -EKEYEXPIRED:
|
|
|
|
/*
|
|
|
|
* User RPCSEC_GSS context has expired.
|
|
|
|
* We cannot recover this stateid now, so
|
|
|
|
* skip it and allow recovery thread to
|
|
|
|
* proceed.
|
|
|
|
*/
|
2009-06-18 04:22:59 +08:00
|
|
|
case -ENOMEM:
|
|
|
|
err = 0;
|
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
err = nfs4_handle_exception(server, err, &exception);
|
|
|
|
} while (exception.retry);
|
2009-06-18 04:22:59 +08:00
|
|
|
out:
|
2005-04-17 06:20:36 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2006-01-03 16:55:12 +08:00
|
|
|
static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_opendata *data = calldata;
|
|
|
|
|
|
|
|
data->rpc_status = task->tk_status;
|
2006-01-03 16:55:21 +08:00
|
|
|
if (data->rpc_status == 0) {
|
2006-01-03 16:55:12 +08:00
|
|
|
memcpy(data->o_res.stateid.data, data->c_res.stateid.data,
|
|
|
|
sizeof(data->o_res.stateid.data));
|
2008-01-03 04:19:18 +08:00
|
|
|
nfs_confirm_seqid(&data->owner->so_seqid, 0);
|
2006-01-03 16:55:21 +08:00
|
|
|
renew_lease(data->o_res.server, data->timestamp);
|
2007-07-08 01:19:59 +08:00
|
|
|
data->rpc_done = 1;
|
2006-01-03 16:55:21 +08:00
|
|
|
}
|
2006-01-03 16:55:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs4_open_confirm_release(void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_opendata *data = calldata;
|
|
|
|
struct nfs4_state *state = NULL;
|
|
|
|
|
|
|
|
/* If this request hasn't been cancelled, do nothing */
|
|
|
|
if (data->cancelled == 0)
|
|
|
|
goto out_free;
|
|
|
|
/* In case of error, no cleanup! */
|
2007-07-08 01:19:59 +08:00
|
|
|
if (!data->rpc_done)
|
2006-01-03 16:55:12 +08:00
|
|
|
goto out_free;
|
|
|
|
state = nfs4_opendata_to_nfs4_state(data);
|
2007-07-07 20:04:47 +08:00
|
|
|
if (!IS_ERR(state))
|
2008-12-24 04:21:56 +08:00
|
|
|
nfs4_close_state(&data->path, state, data->o_arg.fmode);
|
2006-01-03 16:55:12 +08:00
|
|
|
out_free:
|
2007-06-18 04:02:44 +08:00
|
|
|
nfs4_opendata_put(data);
|
2006-01-03 16:55:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct rpc_call_ops nfs4_open_confirm_ops = {
|
|
|
|
.rpc_call_done = nfs4_open_confirm_done,
|
|
|
|
.rpc_release = nfs4_open_confirm_release,
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
|
|
|
|
*/
|
|
|
|
static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
|
|
|
|
{
|
|
|
|
struct nfs_server *server = NFS_SERVER(data->dir->d_inode);
|
|
|
|
struct rpc_task *task;
|
2007-07-15 03:40:01 +08:00
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
|
|
|
|
.rpc_argp = &data->c_arg,
|
|
|
|
.rpc_resp = &data->c_res,
|
|
|
|
.rpc_cred = data->owner->so_cred,
|
|
|
|
};
|
2007-07-15 03:39:59 +08:00
|
|
|
struct rpc_task_setup task_setup_data = {
|
|
|
|
.rpc_client = server->client,
|
2007-07-15 03:40:01 +08:00
|
|
|
.rpc_message = &msg,
|
2007-07-15 03:39:59 +08:00
|
|
|
.callback_ops = &nfs4_open_confirm_ops,
|
|
|
|
.callback_data = data,
|
2008-02-20 09:04:23 +08:00
|
|
|
.workqueue = nfsiod_workqueue,
|
2007-07-15 03:39:59 +08:00
|
|
|
.flags = RPC_TASK_ASYNC,
|
|
|
|
};
|
2005-04-17 06:20:36 +08:00
|
|
|
int status;
|
|
|
|
|
2007-06-18 04:02:44 +08:00
|
|
|
kref_get(&data->kref);
|
2007-07-08 01:19:59 +08:00
|
|
|
data->rpc_done = 0;
|
|
|
|
data->rpc_status = 0;
|
2007-07-15 03:40:01 +08:00
|
|
|
data->timestamp = jiffies;
|
2007-07-15 03:39:59 +08:00
|
|
|
task = rpc_run_task(&task_setup_data);
|
2006-03-21 07:11:10 +08:00
|
|
|
if (IS_ERR(task))
|
2006-01-03 16:55:12 +08:00
|
|
|
return PTR_ERR(task);
|
|
|
|
status = nfs4_wait_for_completion_rpc_task(task);
|
|
|
|
if (status != 0) {
|
|
|
|
data->cancelled = 1;
|
|
|
|
smp_wmb();
|
|
|
|
} else
|
|
|
|
status = data->rpc_status;
|
2006-11-12 11:18:03 +08:00
|
|
|
rpc_put_task(task);
|
2005-04-17 06:20:36 +08:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2006-01-03 16:55:11 +08:00
|
|
|
static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-01-03 16:55:11 +08:00
|
|
|
struct nfs4_opendata *data = calldata;
|
|
|
|
struct nfs4_state_owner *sp = data->owner;
|
2007-07-15 03:40:01 +08:00
|
|
|
|
2006-01-03 16:55:11 +08:00
|
|
|
if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
|
|
|
|
return;
|
2007-07-06 07:02:21 +08:00
|
|
|
/*
|
|
|
|
* Check if we still need to send an OPEN call, or if we can use
|
|
|
|
* a delegation instead.
|
|
|
|
*/
|
|
|
|
if (data->state != NULL) {
|
|
|
|
struct nfs_delegation *delegation;
|
|
|
|
|
2008-12-24 04:21:56 +08:00
|
|
|
if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
|
2007-07-09 02:11:36 +08:00
|
|
|
goto out_no_action;
|
2007-07-06 07:02:21 +08:00
|
|
|
rcu_read_lock();
|
|
|
|
delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
|
|
|
|
if (delegation != NULL &&
|
2008-12-24 04:21:39 +08:00
|
|
|
test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) == 0) {
|
2007-07-06 07:02:21 +08:00
|
|
|
rcu_read_unlock();
|
2007-07-09 02:11:36 +08:00
|
|
|
goto out_no_action;
|
2007-07-06 07:02:21 +08:00
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
2006-01-03 16:55:11 +08:00
|
|
|
/* Update sequence id. */
|
2007-07-03 01:58:33 +08:00
|
|
|
data->o_arg.id = sp->so_owner_id.id;
|
2010-06-25 03:11:43 +08:00
|
|
|
data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid;
|
2007-07-18 09:50:45 +08:00
|
|
|
if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) {
|
2007-07-15 03:40:01 +08:00
|
|
|
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
|
2007-07-18 09:50:45 +08:00
|
|
|
nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
|
|
|
|
}
|
2006-01-03 16:55:21 +08:00
|
|
|
data->timestamp = jiffies;
|
2010-06-16 21:52:26 +08:00
|
|
|
if (nfs4_setup_sequence(data->o_arg.server,
|
2009-04-01 21:22:21 +08:00
|
|
|
&data->o_arg.seq_args,
|
|
|
|
&data->o_res.seq_res, 1, task))
|
|
|
|
return;
|
2007-07-15 03:40:01 +08:00
|
|
|
rpc_call_start(task);
|
2007-07-09 02:11:36 +08:00
|
|
|
return;
|
|
|
|
out_no_action:
|
|
|
|
task->tk_action = NULL;
|
|
|
|
|
2006-01-03 16:55:11 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-12-15 13:27:57 +08:00
|
|
|
static void nfs4_recover_open_prepare(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
|
|
|
rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
|
|
|
|
nfs4_open_prepare(task, calldata);
|
|
|
|
}
|
|
|
|
|
2006-01-03 16:55:11 +08:00
|
|
|
static void nfs4_open_done(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_opendata *data = calldata;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-01-03 16:55:11 +08:00
|
|
|
data->rpc_status = task->tk_status;
|
2009-04-01 21:22:21 +08:00
|
|
|
|
2010-08-01 02:29:06 +08:00
|
|
|
if (!nfs4_sequence_done(task, &data->o_res.seq_res))
|
|
|
|
return;
|
2009-04-01 21:22:21 +08:00
|
|
|
|
2006-01-03 16:55:11 +08:00
|
|
|
if (task->tk_status == 0) {
|
|
|
|
switch (data->o_res.f_attr->mode & S_IFMT) {
|
2005-10-19 05:20:18 +08:00
|
|
|
case S_IFREG:
|
|
|
|
break;
|
|
|
|
case S_IFLNK:
|
2006-01-03 16:55:11 +08:00
|
|
|
data->rpc_status = -ELOOP;
|
2005-10-19 05:20:18 +08:00
|
|
|
break;
|
|
|
|
case S_IFDIR:
|
2006-01-03 16:55:11 +08:00
|
|
|
data->rpc_status = -EISDIR;
|
2005-10-19 05:20:18 +08:00
|
|
|
break;
|
|
|
|
default:
|
2006-01-03 16:55:11 +08:00
|
|
|
data->rpc_status = -ENOTDIR;
|
2005-10-19 05:20:18 +08:00
|
|
|
}
|
2006-01-03 16:55:21 +08:00
|
|
|
renew_lease(data->o_res.server, data->timestamp);
|
2007-07-09 04:19:56 +08:00
|
|
|
if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
|
|
|
|
nfs_confirm_seqid(&data->owner->so_seqid, 0);
|
2005-10-19 05:20:18 +08:00
|
|
|
}
|
2007-07-08 01:19:59 +08:00
|
|
|
data->rpc_done = 1;
|
2006-01-03 16:55:11 +08:00
|
|
|
}
|
2005-10-19 05:20:18 +08:00
|
|
|
|
2006-01-03 16:55:11 +08:00
|
|
|
static void nfs4_open_release(void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_opendata *data = calldata;
|
|
|
|
struct nfs4_state *state = NULL;
|
|
|
|
|
|
|
|
/* If this request hasn't been cancelled, do nothing */
|
|
|
|
if (data->cancelled == 0)
|
|
|
|
goto out_free;
|
|
|
|
/* In case of error, no cleanup! */
|
2007-07-08 01:19:59 +08:00
|
|
|
if (data->rpc_status != 0 || !data->rpc_done)
|
2006-01-03 16:55:11 +08:00
|
|
|
goto out_free;
|
|
|
|
/* In case we need an open_confirm, no cleanup! */
|
|
|
|
if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
|
|
|
|
goto out_free;
|
|
|
|
state = nfs4_opendata_to_nfs4_state(data);
|
2007-07-07 20:04:47 +08:00
|
|
|
if (!IS_ERR(state))
|
2008-12-24 04:21:56 +08:00
|
|
|
nfs4_close_state(&data->path, state, data->o_arg.fmode);
|
2006-01-03 16:55:11 +08:00
|
|
|
out_free:
|
2007-06-18 04:02:44 +08:00
|
|
|
nfs4_opendata_put(data);
|
2006-01-03 16:55:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct rpc_call_ops nfs4_open_ops = {
|
|
|
|
.rpc_call_prepare = nfs4_open_prepare,
|
|
|
|
.rpc_call_done = nfs4_open_done,
|
|
|
|
.rpc_release = nfs4_open_release,
|
|
|
|
};
|
|
|
|
|
2009-12-15 13:27:57 +08:00
|
|
|
static const struct rpc_call_ops nfs4_recover_open_ops = {
|
|
|
|
.rpc_call_prepare = nfs4_recover_open_prepare,
|
|
|
|
.rpc_call_done = nfs4_open_done,
|
|
|
|
.rpc_release = nfs4_open_release,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
|
2006-01-03 16:55:11 +08:00
|
|
|
{
|
|
|
|
struct inode *dir = data->dir->d_inode;
|
|
|
|
struct nfs_server *server = NFS_SERVER(dir);
|
|
|
|
struct nfs_openargs *o_arg = &data->o_arg;
|
|
|
|
struct nfs_openres *o_res = &data->o_res;
|
|
|
|
struct rpc_task *task;
|
2007-07-15 03:40:01 +08:00
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
|
|
|
|
.rpc_argp = o_arg,
|
|
|
|
.rpc_resp = o_res,
|
|
|
|
.rpc_cred = data->owner->so_cred,
|
|
|
|
};
|
2007-07-15 03:39:59 +08:00
|
|
|
struct rpc_task_setup task_setup_data = {
|
|
|
|
.rpc_client = server->client,
|
2007-07-15 03:40:01 +08:00
|
|
|
.rpc_message = &msg,
|
2007-07-15 03:39:59 +08:00
|
|
|
.callback_ops = &nfs4_open_ops,
|
|
|
|
.callback_data = data,
|
2008-02-20 09:04:23 +08:00
|
|
|
.workqueue = nfsiod_workqueue,
|
2007-07-15 03:39:59 +08:00
|
|
|
.flags = RPC_TASK_ASYNC,
|
|
|
|
};
|
2006-01-03 16:55:11 +08:00
|
|
|
int status;
|
|
|
|
|
2007-06-18 04:02:44 +08:00
|
|
|
kref_get(&data->kref);
|
2007-07-08 01:19:59 +08:00
|
|
|
data->rpc_done = 0;
|
|
|
|
data->rpc_status = 0;
|
2007-07-04 11:48:13 +08:00
|
|
|
data->cancelled = 0;
|
2009-12-15 13:27:57 +08:00
|
|
|
if (isrecover)
|
|
|
|
task_setup_data.callback_ops = &nfs4_recover_open_ops;
|
2007-07-15 03:39:59 +08:00
|
|
|
task = rpc_run_task(&task_setup_data);
|
2009-12-15 13:27:57 +08:00
|
|
|
if (IS_ERR(task))
|
|
|
|
return PTR_ERR(task);
|
|
|
|
status = nfs4_wait_for_completion_rpc_task(task);
|
|
|
|
if (status != 0) {
|
|
|
|
data->cancelled = 1;
|
|
|
|
smp_wmb();
|
|
|
|
} else
|
|
|
|
status = data->rpc_status;
|
|
|
|
rpc_put_task(task);
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
|
|
|
|
{
|
|
|
|
struct inode *dir = data->dir->d_inode;
|
|
|
|
struct nfs_openres *o_res = &data->o_res;
|
|
|
|
int status;
|
|
|
|
|
|
|
|
status = nfs4_run_open_task(data, 1);
|
|
|
|
if (status != 0 || !data->rpc_done)
|
|
|
|
return status;
|
|
|
|
|
|
|
|
nfs_refresh_inode(dir, o_res->dir_attr);
|
|
|
|
|
|
|
|
if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
|
|
|
|
status = _nfs4_proc_open_confirm(data);
|
|
|
|
if (status != 0)
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note: On error, nfs4_proc_open will free the struct nfs4_opendata
|
|
|
|
*/
|
|
|
|
static int _nfs4_proc_open(struct nfs4_opendata *data)
|
|
|
|
{
|
|
|
|
struct inode *dir = data->dir->d_inode;
|
|
|
|
struct nfs_server *server = NFS_SERVER(dir);
|
|
|
|
struct nfs_openargs *o_arg = &data->o_arg;
|
|
|
|
struct nfs_openres *o_res = &data->o_res;
|
|
|
|
int status;
|
|
|
|
|
|
|
|
status = nfs4_run_open_task(data, 0);
|
2007-07-08 01:19:59 +08:00
|
|
|
if (status != 0 || !data->rpc_done)
|
2006-01-03 16:55:11 +08:00
|
|
|
return status;
|
|
|
|
|
2005-10-28 10:12:40 +08:00
|
|
|
if (o_arg->open_flags & O_CREAT) {
|
|
|
|
update_changeattr(dir, &o_res->cinfo);
|
|
|
|
nfs_post_op_update_inode(dir, o_res->dir_attr);
|
|
|
|
} else
|
|
|
|
nfs_refresh_inode(dir, o_res->dir_attr);
|
2010-04-12 04:48:44 +08:00
|
|
|
if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
|
|
|
|
server->caps &= ~NFS_CAP_POSIX_LOCK;
|
2005-04-17 06:20:36 +08:00
|
|
|
if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
|
2006-01-03 16:55:12 +08:00
|
|
|
status = _nfs4_proc_open_confirm(data);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (status != 0)
|
2006-01-03 16:55:11 +08:00
|
|
|
return status;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
|
2007-07-18 09:52:41 +08:00
|
|
|
_nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr);
|
2006-01-03 16:55:11 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2011-03-01 09:34:17 +08:00
|
|
|
static int nfs4_client_recover_expired_lease(struct nfs_client *clp)
|
2006-01-03 16:55:24 +08:00
|
|
|
{
|
2009-08-10 03:06:19 +08:00
|
|
|
unsigned int loop;
|
2006-09-15 02:03:14 +08:00
|
|
|
int ret;
|
2006-01-03 16:55:24 +08:00
|
|
|
|
2009-08-10 03:06:19 +08:00
|
|
|
for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
|
2008-12-24 04:21:44 +08:00
|
|
|
ret = nfs4_wait_clnt_recover(clp);
|
2006-09-15 02:03:14 +08:00
|
|
|
if (ret != 0)
|
2009-08-10 03:06:19 +08:00
|
|
|
break;
|
2008-12-24 04:21:42 +08:00
|
|
|
if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) &&
|
|
|
|
!test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state))
|
2006-09-15 02:03:14 +08:00
|
|
|
break;
|
2011-03-10 05:00:53 +08:00
|
|
|
nfs4_schedule_state_manager(clp);
|
2009-08-10 03:06:19 +08:00
|
|
|
ret = -EIO;
|
2006-09-15 02:03:14 +08:00
|
|
|
}
|
2009-08-10 03:06:19 +08:00
|
|
|
return ret;
|
2006-01-03 16:55:24 +08:00
|
|
|
}
|
|
|
|
|
2011-03-01 09:34:17 +08:00
|
|
|
static int nfs4_recover_expired_lease(struct nfs_server *server)
|
|
|
|
{
|
|
|
|
return nfs4_client_recover_expired_lease(server->nfs_client);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* OPEN_EXPIRED:
|
|
|
|
* reclaim state on the server after a network partition.
|
|
|
|
* Assumes caller holds the appropriate lock
|
|
|
|
*/
|
2007-06-05 23:46:42 +08:00
|
|
|
static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-01-03 16:55:08 +08:00
|
|
|
struct nfs4_opendata *opendata;
|
2006-01-03 16:55:15 +08:00
|
|
|
int ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-07-18 09:50:45 +08:00
|
|
|
opendata = nfs4_open_recoverdata_alloc(ctx, state);
|
|
|
|
if (IS_ERR(opendata))
|
|
|
|
return PTR_ERR(opendata);
|
2006-01-03 16:55:15 +08:00
|
|
|
ret = nfs4_open_recover(opendata, state);
|
2008-04-06 03:54:17 +08:00
|
|
|
if (ret == -ESTALE)
|
2007-06-05 23:46:42 +08:00
|
|
|
d_drop(ctx->path.dentry);
|
2007-06-18 04:02:44 +08:00
|
|
|
nfs4_opendata_put(opendata);
|
2006-01-03 16:55:15 +08:00
|
|
|
return ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2009-12-04 04:53:21 +08:00
|
|
|
static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
|
2005-06-23 01:16:29 +08:00
|
|
|
{
|
2007-06-05 23:46:42 +08:00
|
|
|
struct nfs_server *server = NFS_SERVER(state->inode);
|
2005-06-23 01:16:29 +08:00
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int err;
|
|
|
|
|
|
|
|
do {
|
2007-06-05 23:46:42 +08:00
|
|
|
err = _nfs4_open_expired(ctx, state);
|
2009-12-04 04:53:21 +08:00
|
|
|
switch (err) {
|
|
|
|
default:
|
|
|
|
goto out;
|
|
|
|
case -NFS4ERR_GRACE:
|
|
|
|
case -NFS4ERR_DELAY:
|
|
|
|
nfs4_handle_exception(server, err, &exception);
|
|
|
|
err = 0;
|
|
|
|
}
|
2005-06-23 01:16:29 +08:00
|
|
|
} while (exception.retry);
|
2009-12-04 04:53:21 +08:00
|
|
|
out:
|
2005-06-23 01:16:29 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
|
|
|
|
{
|
|
|
|
struct nfs_open_context *ctx;
|
2006-01-03 16:55:15 +08:00
|
|
|
int ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-01-03 16:55:15 +08:00
|
|
|
ctx = nfs4_state_find_open_context(state);
|
|
|
|
if (IS_ERR(ctx))
|
|
|
|
return PTR_ERR(ctx);
|
2007-06-05 23:46:42 +08:00
|
|
|
ret = nfs4_do_open_expired(ctx, state);
|
2006-01-03 16:55:15 +08:00
|
|
|
put_nfs_open_context(ctx);
|
|
|
|
return ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2007-06-06 02:49:03 +08:00
|
|
|
/*
|
|
|
|
* on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
|
|
|
|
* fields corresponding to attributes that were used to store the verifier.
|
|
|
|
* Make sure we clobber those fields in the later setattr call
|
|
|
|
*/
|
|
|
|
static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr)
|
|
|
|
{
|
|
|
|
if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) &&
|
|
|
|
!(sattr->ia_valid & ATTR_ATIME_SET))
|
|
|
|
sattr->ia_valid |= ATTR_ATIME;
|
|
|
|
|
|
|
|
if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) &&
|
|
|
|
!(sattr->ia_valid & ATTR_MTIME_SET))
|
|
|
|
sattr->ia_valid |= ATTR_MTIME;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2006-01-03 16:55:11 +08:00
|
|
|
* Returns a referenced nfs4_state
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2008-12-24 04:21:56 +08:00
|
|
|
static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct nfs4_state_owner *sp;
|
|
|
|
struct nfs4_state *state = NULL;
|
|
|
|
struct nfs_server *server = NFS_SERVER(dir);
|
2006-01-03 16:55:08 +08:00
|
|
|
struct nfs4_opendata *opendata;
|
2007-07-06 07:02:21 +08:00
|
|
|
int status;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Protect against reboot recovery conflicts */
|
|
|
|
status = -ENOMEM;
|
|
|
|
if (!(sp = nfs4_get_state_owner(server, cred))) {
|
|
|
|
dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
|
|
|
|
goto out_err;
|
|
|
|
}
|
2006-01-03 16:55:24 +08:00
|
|
|
status = nfs4_recover_expired_lease(server);
|
|
|
|
if (status != 0)
|
2006-01-03 16:55:25 +08:00
|
|
|
goto err_put_state_owner;
|
2007-07-06 07:02:21 +08:00
|
|
|
if (path->dentry->d_inode != NULL)
|
2008-12-24 04:21:56 +08:00
|
|
|
nfs4_return_incompatible_delegation(path->dentry->d_inode, fmode);
|
2006-01-03 16:55:24 +08:00
|
|
|
status = -ENOMEM;
|
2010-05-14 00:51:01 +08:00
|
|
|
opendata = nfs4_opendata_alloc(path, sp, fmode, flags, sattr, GFP_KERNEL);
|
2006-01-03 16:55:08 +08:00
|
|
|
if (opendata == NULL)
|
2008-12-24 04:21:45 +08:00
|
|
|
goto err_put_state_owner;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-07-06 07:02:21 +08:00
|
|
|
if (path->dentry->d_inode != NULL)
|
|
|
|
opendata->state = nfs4_get_open_state(path->dentry->d_inode, sp);
|
|
|
|
|
2006-01-03 16:55:11 +08:00
|
|
|
status = _nfs4_proc_open(opendata);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (status != 0)
|
2007-06-18 04:02:44 +08:00
|
|
|
goto err_opendata_put;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-01-03 16:55:11 +08:00
|
|
|
state = nfs4_opendata_to_nfs4_state(opendata);
|
2007-07-07 20:04:47 +08:00
|
|
|
status = PTR_ERR(state);
|
|
|
|
if (IS_ERR(state))
|
2007-06-18 04:02:44 +08:00
|
|
|
goto err_opendata_put;
|
2010-04-12 04:48:44 +08:00
|
|
|
if (server->caps & NFS_CAP_POSIX_LOCK)
|
2010-01-27 04:42:30 +08:00
|
|
|
set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
|
2010-04-17 04:22:51 +08:00
|
|
|
|
|
|
|
if (opendata->o_arg.open_flags & O_EXCL) {
|
|
|
|
nfs4_exclusive_attrset(opendata, sattr);
|
|
|
|
|
|
|
|
nfs_fattr_init(opendata->o_res.f_attr);
|
|
|
|
status = nfs4_do_setattr(state->inode, cred,
|
|
|
|
opendata->o_res.f_attr, sattr,
|
|
|
|
state);
|
|
|
|
if (status == 0)
|
|
|
|
nfs_setattr_update_inode(state->inode, sattr);
|
|
|
|
nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr);
|
|
|
|
}
|
2007-06-18 04:02:44 +08:00
|
|
|
nfs4_opendata_put(opendata);
|
2005-04-17 06:20:36 +08:00
|
|
|
nfs4_put_state_owner(sp);
|
|
|
|
*res = state;
|
|
|
|
return 0;
|
2007-06-18 04:02:44 +08:00
|
|
|
err_opendata_put:
|
|
|
|
nfs4_opendata_put(opendata);
|
2006-01-03 16:55:08 +08:00
|
|
|
err_put_state_owner:
|
|
|
|
nfs4_put_state_owner(sp);
|
2005-04-17 06:20:36 +08:00
|
|
|
out_err:
|
|
|
|
*res = NULL;
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-12-24 04:21:56 +08:00
|
|
|
static struct nfs4_state *nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
struct nfs4_state *res;
|
|
|
|
int status;
|
|
|
|
|
|
|
|
do {
|
2008-12-24 04:21:56 +08:00
|
|
|
status = _nfs4_do_open(dir, path, fmode, flags, sattr, cred, &res);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (status == 0)
|
|
|
|
break;
|
|
|
|
/* NOTE: BAD_SEQID means the server and client disagree about the
|
|
|
|
* book-keeping w.r.t. state-changing operations
|
|
|
|
* (OPEN/CLOSE/LOCK/LOCKU...)
|
|
|
|
* It is actually a sign of a bug on the client or on the server.
|
|
|
|
*
|
|
|
|
* If we receive a BAD_SEQID error in the particular case of
|
NFSv4: Add functions to order RPC calls
NFSv4 file state-changing functions such as OPEN, CLOSE, LOCK,... are all
labelled with "sequence identifiers" in order to prevent the server from
reordering RPC requests, as this could cause its file state to
become out of sync with the client.
Currently the NFS client code enforces this ordering locally using
semaphores to restrict access to structures until the RPC call is done.
This, of course, only works with synchronous RPC calls, since the
user process must first grab the semaphore.
By dropping semaphores, and instead teaching the RPC engine to hold
the RPC calls until they are ready to be sent, we can extend this
process to work nicely with asynchronous RPC calls too.
This patch adds a new list called "rpc_sequence" that defines the order
of the RPC calls to be sent. We add one such list for each state_owner.
When an RPC call is ready to be sent, it checks if it is top of the
rpc_sequence list. If so, it proceeds. If not, it goes back to sleep,
and loops until it hits top of the list.
Once the RPC call has completed, it can then bump the sequence id counter,
and remove itself from the rpc_sequence list, and then wake up the next
sleeper.
Note that the state_owner sequence ids and lock_owner sequence ids are
all indexed to the same rpc_sequence list, so OPEN, LOCK,... requests
are all ordered w.r.t. each other.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2005-10-19 05:20:12 +08:00
|
|
|
* doing an OPEN, we assume that nfs_increment_open_seqid() will
|
2005-04-17 06:20:36 +08:00
|
|
|
* have unhashed the old state_owner for us, and that we can
|
|
|
|
* therefore safely retry using a new one. We should still warn
|
|
|
|
* the user though...
|
|
|
|
*/
|
|
|
|
if (status == -NFS4ERR_BAD_SEQID) {
|
2007-07-09 04:49:11 +08:00
|
|
|
printk(KERN_WARNING "NFS: v4 server %s "
|
|
|
|
" returned a bad sequence-id error!\n",
|
|
|
|
NFS_SERVER(dir)->nfs_client->cl_hostname);
|
2005-04-17 06:20:36 +08:00
|
|
|
exception.retry = 1;
|
|
|
|
continue;
|
|
|
|
}
|
2005-10-19 05:20:21 +08:00
|
|
|
/*
|
|
|
|
* BAD_STATEID on OPEN means that the server cancelled our
|
|
|
|
* state before it received the OPEN_CONFIRM.
|
|
|
|
* Recover by retrying the request as per the discussion
|
|
|
|
* on Page 181 of RFC3530.
|
|
|
|
*/
|
|
|
|
if (status == -NFS4ERR_BAD_STATEID) {
|
|
|
|
exception.retry = 1;
|
|
|
|
continue;
|
|
|
|
}
|
2007-07-06 07:02:21 +08:00
|
|
|
if (status == -EAGAIN) {
|
|
|
|
/* We must have found a delegation */
|
|
|
|
exception.retry = 1;
|
|
|
|
continue;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir),
|
|
|
|
status, &exception));
|
|
|
|
} while (exception.retry);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2008-06-11 07:39:41 +08:00
|
|
|
static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
|
|
|
|
struct nfs_fattr *fattr, struct iattr *sattr,
|
|
|
|
struct nfs4_state *state)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-03-21 02:44:46 +08:00
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
2005-04-17 06:20:36 +08:00
|
|
|
struct nfs_setattrargs arg = {
|
2006-03-21 02:44:46 +08:00
|
|
|
.fh = NFS_FH(inode),
|
2005-04-17 06:20:36 +08:00
|
|
|
.iap = sattr,
|
|
|
|
.server = server,
|
|
|
|
.bitmask = server->attr_bitmask,
|
|
|
|
};
|
|
|
|
struct nfs_setattrres res = {
|
|
|
|
.fattr = fattr,
|
|
|
|
.server = server,
|
|
|
|
};
|
|
|
|
struct rpc_message msg = {
|
2008-06-11 07:39:41 +08:00
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
|
|
|
|
.rpc_argp = &arg,
|
|
|
|
.rpc_resp = &res,
|
|
|
|
.rpc_cred = cred,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
2006-01-03 16:55:21 +08:00
|
|
|
unsigned long timestamp = jiffies;
|
2005-08-16 23:49:44 +08:00
|
|
|
int status;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-10-28 10:12:38 +08:00
|
|
|
nfs_fattr_init(fattr);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-03-21 02:44:46 +08:00
|
|
|
if (nfs4_copy_delegation_stateid(&arg.stateid, inode)) {
|
|
|
|
/* Use that stateid */
|
|
|
|
} else if (state != NULL) {
|
2010-07-02 00:49:11 +08:00
|
|
|
nfs4_copy_stateid(&arg.stateid, state, current->files, current->tgid);
|
2005-06-23 01:16:29 +08:00
|
|
|
} else
|
2005-04-17 06:20:36 +08:00
|
|
|
memcpy(&arg.stateid, &zero_stateid, sizeof(arg.stateid));
|
|
|
|
|
2011-03-25 01:12:24 +08:00
|
|
|
status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
|
2006-01-03 16:55:21 +08:00
|
|
|
if (status == 0 && state != NULL)
|
|
|
|
renew_lease(server, timestamp);
|
2005-08-16 23:49:44 +08:00
|
|
|
return status;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2008-06-11 07:39:41 +08:00
|
|
|
static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
|
|
|
|
struct nfs_fattr *fattr, struct iattr *sattr,
|
|
|
|
struct nfs4_state *state)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-03-21 02:44:46 +08:00
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
2005-04-17 06:20:36 +08:00
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int err;
|
|
|
|
do {
|
|
|
|
err = nfs4_handle_exception(server,
|
2008-06-11 07:39:41 +08:00
|
|
|
_nfs4_do_setattr(inode, cred, fattr, sattr, state),
|
2005-04-17 06:20:36 +08:00
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct nfs4_closedata {
|
2007-06-05 22:31:33 +08:00
|
|
|
struct path path;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct inode *inode;
|
|
|
|
struct nfs4_state *state;
|
|
|
|
struct nfs_closeargs arg;
|
|
|
|
struct nfs_closeres res;
|
2005-10-28 10:12:41 +08:00
|
|
|
struct nfs_fattr fattr;
|
2006-01-03 16:55:21 +08:00
|
|
|
unsigned long timestamp;
|
2011-01-06 19:36:32 +08:00
|
|
|
bool roc;
|
|
|
|
u32 roc_barrier;
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2006-01-03 16:55:04 +08:00
|
|
|
static void nfs4_free_closedata(void *data)
|
2005-10-19 05:20:12 +08:00
|
|
|
{
|
2006-01-03 16:55:04 +08:00
|
|
|
struct nfs4_closedata *calldata = data;
|
|
|
|
struct nfs4_state_owner *sp = calldata->state->owner;
|
2005-10-19 05:20:12 +08:00
|
|
|
|
2011-01-06 19:36:32 +08:00
|
|
|
if (calldata->roc)
|
|
|
|
pnfs_roc_release(calldata->state->inode);
|
2005-10-19 05:20:12 +08:00
|
|
|
nfs4_put_open_state(calldata->state);
|
|
|
|
nfs_free_seqid(calldata->arg.seqid);
|
|
|
|
nfs4_put_state_owner(sp);
|
2008-05-03 04:42:45 +08:00
|
|
|
path_put(&calldata->path);
|
2005-10-19 05:20:12 +08:00
|
|
|
kfree(calldata);
|
|
|
|
}
|
|
|
|
|
2009-12-08 21:33:16 +08:00
|
|
|
static void nfs4_close_clear_stateid_flags(struct nfs4_state *state,
|
|
|
|
fmode_t fmode)
|
|
|
|
{
|
|
|
|
spin_lock(&state->owner->so_lock);
|
|
|
|
if (!(fmode & FMODE_READ))
|
|
|
|
clear_bit(NFS_O_RDONLY_STATE, &state->flags);
|
|
|
|
if (!(fmode & FMODE_WRITE))
|
|
|
|
clear_bit(NFS_O_WRONLY_STATE, &state->flags);
|
|
|
|
clear_bit(NFS_O_RDWR_STATE, &state->flags);
|
|
|
|
spin_unlock(&state->owner->so_lock);
|
|
|
|
}
|
|
|
|
|
2006-01-03 16:55:04 +08:00
|
|
|
static void nfs4_close_done(struct rpc_task *task, void *data)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-01-03 16:55:04 +08:00
|
|
|
struct nfs4_closedata *calldata = data;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct nfs4_state *state = calldata->state;
|
|
|
|
struct nfs_server *server = NFS_SERVER(calldata->inode);
|
|
|
|
|
2010-08-01 02:29:06 +08:00
|
|
|
if (!nfs4_sequence_done(task, &calldata->res.seq_res))
|
|
|
|
return;
|
2005-04-17 06:20:36 +08:00
|
|
|
/* hmm. we are done with the inode, and in the process of freeing
|
|
|
|
* the state_owner. we keep this around to process errors
|
|
|
|
*/
|
|
|
|
switch (task->tk_status) {
|
|
|
|
case 0:
|
2011-01-06 19:36:32 +08:00
|
|
|
if (calldata->roc)
|
|
|
|
pnfs_roc_set_barrier(state->inode,
|
|
|
|
calldata->roc_barrier);
|
2007-07-27 05:47:34 +08:00
|
|
|
nfs_set_open_stateid(state, &calldata->res.stateid, 0);
|
2006-01-03 16:55:21 +08:00
|
|
|
renew_lease(server, calldata->timestamp);
|
2009-12-08 21:33:16 +08:00
|
|
|
nfs4_close_clear_stateid_flags(state,
|
|
|
|
calldata->arg.fmode);
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
|
|
|
case -NFS4ERR_STALE_STATEID:
|
2008-12-24 04:21:46 +08:00
|
|
|
case -NFS4ERR_OLD_STATEID:
|
|
|
|
case -NFS4ERR_BAD_STATEID:
|
2005-04-17 06:20:36 +08:00
|
|
|
case -NFS4ERR_EXPIRED:
|
2008-12-24 04:21:56 +08:00
|
|
|
if (calldata->arg.fmode == 0)
|
2008-12-24 04:21:46 +08:00
|
|
|
break;
|
2005-04-17 06:20:36 +08:00
|
|
|
default:
|
2009-12-16 03:47:36 +08:00
|
|
|
if (nfs4_async_handle_error(task, server, state) == -EAGAIN)
|
|
|
|
rpc_restart_call_prepare(task);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2009-12-16 03:47:36 +08:00
|
|
|
nfs_release_seqid(calldata->arg.seqid);
|
2005-10-28 10:12:41 +08:00
|
|
|
nfs_refresh_inode(calldata->inode, calldata->res.fattr);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2006-01-03 16:55:05 +08:00
|
|
|
static void nfs4_close_prepare(struct rpc_task *task, void *data)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-01-03 16:55:05 +08:00
|
|
|
struct nfs4_closedata *calldata = data;
|
2005-10-19 05:20:12 +08:00
|
|
|
struct nfs4_state *state = calldata->state;
|
2009-12-08 21:33:16 +08:00
|
|
|
int call_close = 0;
|
2005-10-19 05:20:12 +08:00
|
|
|
|
2006-01-03 16:55:04 +08:00
|
|
|
if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
|
2005-10-19 05:20:12 +08:00
|
|
|
return;
|
2007-07-06 06:07:55 +08:00
|
|
|
|
2009-12-08 21:33:16 +08:00
|
|
|
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
|
|
|
|
calldata->arg.fmode = FMODE_READ|FMODE_WRITE;
|
2005-11-05 04:32:58 +08:00
|
|
|
spin_lock(&state->owner->so_lock);
|
2007-07-06 06:07:55 +08:00
|
|
|
/* Calculate the change in open mode */
|
2006-01-03 16:55:13 +08:00
|
|
|
if (state->n_rdwr == 0) {
|
2007-07-06 06:07:55 +08:00
|
|
|
if (state->n_rdonly == 0) {
|
2009-12-08 21:33:16 +08:00
|
|
|
call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
|
|
|
|
call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
|
|
|
|
calldata->arg.fmode &= ~FMODE_READ;
|
2007-07-06 06:07:55 +08:00
|
|
|
}
|
|
|
|
if (state->n_wronly == 0) {
|
2009-12-08 21:33:16 +08:00
|
|
|
call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
|
|
|
|
call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
|
|
|
|
calldata->arg.fmode &= ~FMODE_WRITE;
|
2007-07-06 06:07:55 +08:00
|
|
|
}
|
2006-01-03 16:55:13 +08:00
|
|
|
}
|
2005-11-05 04:32:58 +08:00
|
|
|
spin_unlock(&state->owner->so_lock);
|
2009-12-08 21:33:16 +08:00
|
|
|
|
|
|
|
if (!call_close) {
|
2006-01-03 16:55:04 +08:00
|
|
|
/* Note: exit _without_ calling nfs4_close_done */
|
|
|
|
task->tk_action = NULL;
|
2005-10-19 05:20:12 +08:00
|
|
|
return;
|
|
|
|
}
|
2009-12-08 21:33:16 +08:00
|
|
|
|
2011-01-06 19:36:32 +08:00
|
|
|
if (calldata->arg.fmode == 0) {
|
2009-12-08 21:33:16 +08:00
|
|
|
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
|
2011-01-06 19:36:32 +08:00
|
|
|
if (calldata->roc &&
|
|
|
|
pnfs_roc_drain(calldata->inode, &calldata->roc_barrier)) {
|
|
|
|
rpc_sleep_on(&NFS_SERVER(calldata->inode)->roc_rpcwaitq,
|
|
|
|
task, NULL);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2009-12-08 21:33:16 +08:00
|
|
|
|
2005-10-28 10:12:41 +08:00
|
|
|
nfs_fattr_init(calldata->res.fattr);
|
2006-01-03 16:55:21 +08:00
|
|
|
calldata->timestamp = jiffies;
|
2010-06-16 21:52:26 +08:00
|
|
|
if (nfs4_setup_sequence(NFS_SERVER(calldata->inode),
|
2009-04-01 21:22:20 +08:00
|
|
|
&calldata->arg.seq_args, &calldata->res.seq_res,
|
|
|
|
1, task))
|
|
|
|
return;
|
2007-07-15 03:40:01 +08:00
|
|
|
rpc_call_start(task);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2006-01-03 16:55:04 +08:00
|
|
|
static const struct rpc_call_ops nfs4_close_ops = {
|
2006-01-03 16:55:05 +08:00
|
|
|
.rpc_call_prepare = nfs4_close_prepare,
|
2006-01-03 16:55:04 +08:00
|
|
|
.rpc_call_done = nfs4_close_done,
|
|
|
|
.rpc_release = nfs4_free_closedata,
|
|
|
|
};
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* It is possible for data to be read/written from a mem-mapped file
|
|
|
|
* after the sys_close call (which hits the vfs layer as a flush).
|
|
|
|
* This means that we can't safely call nfsv4 close on a file until
|
|
|
|
* the inode is cleared. This in turn means that we are not good
|
|
|
|
* NFSv4 citizens - we do not indicate to the server to update the file's
|
|
|
|
* share state even when we are done with one of the three share
|
|
|
|
* stateid's in the inode.
|
|
|
|
*
|
|
|
|
* NOTE: Caller must be holding the sp->so_owner semaphore!
|
|
|
|
*/
|
2011-01-06 19:36:32 +08:00
|
|
|
int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2007-06-05 22:31:33 +08:00
|
|
|
struct nfs_server *server = NFS_SERVER(state->inode);
|
2005-04-17 06:20:36 +08:00
|
|
|
struct nfs4_closedata *calldata;
|
2007-06-12 11:05:07 +08:00
|
|
|
struct nfs4_state_owner *sp = state->owner;
|
|
|
|
struct rpc_task *task;
|
2007-07-15 03:40:01 +08:00
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
|
|
|
|
.rpc_cred = state->owner->so_cred,
|
|
|
|
};
|
2007-07-15 03:39:59 +08:00
|
|
|
struct rpc_task_setup task_setup_data = {
|
|
|
|
.rpc_client = server->client,
|
2007-07-15 03:40:01 +08:00
|
|
|
.rpc_message = &msg,
|
2007-07-15 03:39:59 +08:00
|
|
|
.callback_ops = &nfs4_close_ops,
|
2008-02-20 09:04:23 +08:00
|
|
|
.workqueue = nfsiod_workqueue,
|
2007-07-15 03:39:59 +08:00
|
|
|
.flags = RPC_TASK_ASYNC,
|
|
|
|
};
|
2005-10-19 05:20:12 +08:00
|
|
|
int status = -ENOMEM;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-05-14 00:51:01 +08:00
|
|
|
calldata = kzalloc(sizeof(*calldata), gfp_mask);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (calldata == NULL)
|
2005-10-19 05:20:12 +08:00
|
|
|
goto out;
|
2007-06-05 22:31:33 +08:00
|
|
|
calldata->inode = state->inode;
|
2005-04-17 06:20:36 +08:00
|
|
|
calldata->state = state;
|
2007-06-05 22:31:33 +08:00
|
|
|
calldata->arg.fh = NFS_FH(state->inode);
|
2007-07-06 06:07:55 +08:00
|
|
|
calldata->arg.stateid = &state->open_stateid;
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Serialization for the sequence id */
|
2010-05-14 00:51:01 +08:00
|
|
|
calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask);
|
2005-10-19 05:20:12 +08:00
|
|
|
if (calldata->arg.seqid == NULL)
|
|
|
|
goto out_free_calldata;
|
2008-12-24 04:21:56 +08:00
|
|
|
calldata->arg.fmode = 0;
|
2009-03-12 02:10:28 +08:00
|
|
|
calldata->arg.bitmask = server->cache_consistency_bitmask;
|
2005-10-28 10:12:41 +08:00
|
|
|
calldata->res.fattr = &calldata->fattr;
|
2008-04-08 01:20:54 +08:00
|
|
|
calldata->res.seqid = calldata->arg.seqid;
|
2005-10-28 10:12:41 +08:00
|
|
|
calldata->res.server = server;
|
2011-01-06 19:36:32 +08:00
|
|
|
calldata->roc = roc;
|
2010-01-31 02:51:04 +08:00
|
|
|
path_get(path);
|
|
|
|
calldata->path = *path;
|
2005-10-19 05:20:12 +08:00
|
|
|
|
2010-12-21 23:52:24 +08:00
|
|
|
msg.rpc_argp = &calldata->arg;
|
|
|
|
msg.rpc_resp = &calldata->res;
|
2007-07-15 03:39:59 +08:00
|
|
|
task_setup_data.callback_data = calldata;
|
|
|
|
task = rpc_run_task(&task_setup_data);
|
2007-06-12 11:05:07 +08:00
|
|
|
if (IS_ERR(task))
|
|
|
|
return PTR_ERR(task);
|
2007-10-19 06:03:27 +08:00
|
|
|
status = 0;
|
|
|
|
if (wait)
|
|
|
|
status = rpc_wait_for_completion_task(task);
|
2007-06-12 11:05:07 +08:00
|
|
|
rpc_put_task(task);
|
2007-10-19 06:03:27 +08:00
|
|
|
return status;
|
2005-10-19 05:20:12 +08:00
|
|
|
out_free_calldata:
|
|
|
|
kfree(calldata);
|
|
|
|
out:
|
2011-01-06 19:36:32 +08:00
|
|
|
if (roc)
|
|
|
|
pnfs_roc_release(state->inode);
|
2007-06-12 11:05:07 +08:00
|
|
|
nfs4_put_open_state(state);
|
|
|
|
nfs4_put_state_owner(sp);
|
2005-10-19 05:20:12 +08:00
|
|
|
return status;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2010-09-17 22:56:51 +08:00
|
|
|
static struct inode *
|
2010-09-17 22:56:50 +08:00
|
|
|
nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct nfs4_state *state;
|
|
|
|
|
2007-10-16 06:17:53 +08:00
|
|
|
/* Protect against concurrent sillydeletes */
|
2010-09-17 22:56:50 +08:00
|
|
|
state = nfs4_do_open(dir, &ctx->path, ctx->mode, open_flags, attr, ctx->cred);
|
2010-09-17 22:56:50 +08:00
|
|
|
if (IS_ERR(state))
|
|
|
|
return ERR_CAST(state);
|
2010-09-17 22:56:50 +08:00
|
|
|
ctx->state = state;
|
2010-09-17 22:56:50 +08:00
|
|
|
return igrab(state->inode);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2009-12-04 04:54:02 +08:00
|
|
|
static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
|
2009-03-20 03:35:50 +08:00
|
|
|
{
|
|
|
|
if (ctx->state == NULL)
|
|
|
|
return;
|
|
|
|
if (is_sync)
|
|
|
|
nfs4_close_sync(&ctx->path, ctx->state, ctx->mode);
|
|
|
|
else
|
|
|
|
nfs4_close_state(&ctx->path, ctx->state, ctx->mode);
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
|
|
|
|
{
|
2009-04-01 21:21:54 +08:00
|
|
|
struct nfs4_server_caps_arg args = {
|
|
|
|
.fhandle = fhandle,
|
|
|
|
};
|
2005-04-17 06:20:36 +08:00
|
|
|
struct nfs4_server_caps_res res = {};
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
|
2009-04-01 21:21:54 +08:00
|
|
|
.rpc_argp = &args,
|
2005-04-17 06:20:36 +08:00
|
|
|
.rpc_resp = &res,
|
|
|
|
};
|
|
|
|
int status;
|
|
|
|
|
2011-03-25 01:12:24 +08:00
|
|
|
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (status == 0) {
|
|
|
|
memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
|
2009-08-10 03:06:19 +08:00
|
|
|
server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
|
|
|
|
NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
|
|
|
|
NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
|
|
|
|
NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
|
|
|
|
NFS_CAP_CTIME|NFS_CAP_MTIME);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (res.attr_bitmask[0] & FATTR4_WORD0_ACL)
|
|
|
|
server->caps |= NFS_CAP_ACLS;
|
|
|
|
if (res.has_links != 0)
|
|
|
|
server->caps |= NFS_CAP_HARDLINKS;
|
|
|
|
if (res.has_symlinks != 0)
|
|
|
|
server->caps |= NFS_CAP_SYMLINKS;
|
2009-08-10 03:06:19 +08:00
|
|
|
if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
|
|
|
|
server->caps |= NFS_CAP_FILEID;
|
|
|
|
if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
|
|
|
|
server->caps |= NFS_CAP_MODE;
|
|
|
|
if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
|
|
|
|
server->caps |= NFS_CAP_NLINK;
|
|
|
|
if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
|
|
|
|
server->caps |= NFS_CAP_OWNER;
|
|
|
|
if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
|
|
|
|
server->caps |= NFS_CAP_OWNER_GROUP;
|
|
|
|
if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
|
|
|
|
server->caps |= NFS_CAP_ATIME;
|
|
|
|
if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
|
|
|
|
server->caps |= NFS_CAP_CTIME;
|
|
|
|
if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
|
|
|
|
server->caps |= NFS_CAP_MTIME;
|
|
|
|
|
2009-03-12 02:10:28 +08:00
|
|
|
memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
|
|
|
|
server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
|
|
|
|
server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
|
2005-04-17 06:20:36 +08:00
|
|
|
server->acl_bitmask = res.acl_bitmask;
|
|
|
|
}
|
2009-04-01 21:22:03 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2006-06-09 21:34:19 +08:00
|
|
|
int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int err;
|
|
|
|
do {
|
|
|
|
err = nfs4_handle_exception(server,
|
|
|
|
_nfs4_server_capabilities(server, fhandle),
|
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
|
|
|
|
struct nfs_fsinfo *info)
|
|
|
|
{
|
|
|
|
struct nfs4_lookup_root_arg args = {
|
|
|
|
.bitmask = nfs4_fattr_bitmap,
|
|
|
|
};
|
|
|
|
struct nfs4_lookup_res res = {
|
|
|
|
.server = server,
|
2005-10-28 10:12:38 +08:00
|
|
|
.fattr = info->fattr,
|
2005-04-17 06:20:36 +08:00
|
|
|
.fh = fhandle,
|
|
|
|
};
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
|
|
|
|
.rpc_argp = &args,
|
|
|
|
.rpc_resp = &res,
|
|
|
|
};
|
2009-04-01 21:22:50 +08:00
|
|
|
|
2005-10-28 10:12:38 +08:00
|
|
|
nfs_fattr_init(info->fattr);
|
2011-03-25 01:12:24 +08:00
|
|
|
return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
|
|
|
|
struct nfs_fsinfo *info)
|
|
|
|
{
|
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int err;
|
|
|
|
do {
|
|
|
|
err = nfs4_handle_exception(server,
|
|
|
|
_nfs4_lookup_root(server, fhandle, info),
|
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2011-03-25 01:12:31 +08:00
|
|
|
static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
|
|
|
|
struct nfs_fsinfo *info, rpc_authflavor_t flavor)
|
|
|
|
{
|
|
|
|
struct rpc_auth *auth;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
auth = rpcauth_create(flavor, server->client);
|
|
|
|
if (!auth) {
|
|
|
|
ret = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
ret = nfs4_lookup_root(server, fhandle, info);
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
NFS: Share NFS superblocks per-protocol per-server per-FSID
The attached patch makes NFS share superblocks between mounts from the same
server and FSID over the same protocol.
It does this by creating each superblock with a false root and returning the
real root dentry in the vfsmount presented by get_sb(). The root dentry set
starts off as an anonymous dentry if we don't already have the dentry for its
inode, otherwise it simply returns the dentry we already have.
We may thus end up with several trees of dentries in the superblock, and if at
some later point one of anonymous tree roots is discovered by normal filesystem
activity to be located in another tree within the superblock, the anonymous
root is named and materialises attached to the second tree at the appropriate
point.
Why do it this way? Why not pass an extra argument to the mount() syscall to
indicate the subpath and then pathwalk from the server root to the desired
directory? You can't guarantee this will work for two reasons:
(1) The root and intervening nodes may not be accessible to the client.
With NFS2 and NFS3, for instance, mountd is called on the server to get
the filehandle for the tip of a path. mountd won't give us handles for
anything we don't have permission to access, and so we can't set up NFS
inodes for such nodes, and so can't easily set up dentries (we'd have to
have ghost inodes or something).
With this patch we don't actually create dentries until we get handles
from the server that we can use to set up their inodes, and we don't
actually bind them into the tree until we know for sure where they go.
(2) Inaccessible symbolic links.
If we're asked to mount two exports from the server, eg:
mount warthog:/warthog/aaa/xxx /mmm
mount warthog:/warthog/bbb/yyy /nnn
We may not be able to access anything nearer the root than xxx and yyy,
but we may find out later that /mmm/www/yyy, say, is actually the same
directory as the one mounted on /nnn. What we might then find out, for
example, is that /warthog/bbb was actually a symbolic link to
/warthog/aaa/xxx/www, but we can't actually determine that by talking to
the server until /warthog is made available by NFS.
This would lead to having constructed an errneous dentry tree which we
can't easily fix. We can end up with a dentry marked as a directory when
it should actually be a symlink, or we could end up with an apparently
hardlinked directory.
With this patch we need not make assumptions about the type of a dentry
for which we can't retrieve information, nor need we assume we know its
place in the grand scheme of things until we actually see that place.
This patch reduces the possibility of aliasing in the inode and page caches for
inodes that may be accessed by more than one NFS export. It also reduces the
number of superblocks required for NFS where there are many NFS exports being
used from a server (home directory server + autofs for example).
This in turn makes it simpler to do local caching of network filesystems, as it
can then be guaranteed that there won't be links from multiple inodes in
separate superblocks to the same cache file.
Obviously, cache aliasing between different levels of NFS protocol could still
be a problem, but at least that gives us another key to use when indexing the
cache.
This patch makes the following changes:
(1) The server record construction/destruction has been abstracted out into
its own set of functions to make things easier to get right. These have
been moved into fs/nfs/client.c.
All the code in fs/nfs/client.c has to do with the management of
connections to servers, and doesn't touch superblocks in any way; the
remaining code in fs/nfs/super.c has to do with VFS superblock management.
(2) The sequence of events undertaken by NFS mount is now reordered:
(a) A volume representation (struct nfs_server) is allocated.
(b) A server representation (struct nfs_client) is acquired. This may be
allocated or shared, and is keyed on server address, port and NFS
version.
(c) If allocated, the client representation is initialised. The state
member variable of nfs_client is used to prevent a race during
initialisation from two mounts.
(d) For NFS4 a simple pathwalk is performed, walking from FH to FH to find
the root filehandle for the mount (fs/nfs/getroot.c). For NFS2/3 we
are given the root FH in advance.
(e) The volume FSID is probed for on the root FH.
(f) The volume representation is initialised from the FSINFO record
retrieved on the root FH.
(g) sget() is called to acquire a superblock. This may be allocated or
shared, keyed on client pointer and FSID.
(h) If allocated, the superblock is initialised.
(i) If the superblock is shared, then the new nfs_server record is
discarded.
(j) The root dentry for this mount is looked up from the root FH.
(k) The root dentry for this mount is assigned to the vfsmount.
(3) nfs_readdir_lookup() creates dentries for each of the entries readdir()
returns; this function now attaches disconnected trees from alternate
roots that happen to be discovered attached to a directory being read (in
the same way nfs_lookup() is made to do for lookup ops).
The new d_materialise_unique() function is now used to do this, thus
permitting the whole thing to be done under one set of locks, and thus
avoiding any race between mount and lookup operations on the same
directory.
(4) The client management code uses a new debug facility: NFSDBG_CLIENT which
is set by echoing 1024 to /proc/net/sunrpc/nfs_debug.
(5) Clone mounts are now called xdev mounts.
(6) Use the dentry passed to the statfs() op as the handle for retrieving fs
statistics rather than the root dentry of the superblock (which is now a
dummy).
Signed-Off-By: David Howells <dhowells@redhat.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2006-08-23 08:06:13 +08:00
|
|
|
/*
|
|
|
|
* get the file handle for the "/" directory on the server
|
|
|
|
*/
|
2005-04-17 06:20:36 +08:00
|
|
|
static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
|
NFS: Share NFS superblocks per-protocol per-server per-FSID
The attached patch makes NFS share superblocks between mounts from the same
server and FSID over the same protocol.
It does this by creating each superblock with a false root and returning the
real root dentry in the vfsmount presented by get_sb(). The root dentry set
starts off as an anonymous dentry if we don't already have the dentry for its
inode, otherwise it simply returns the dentry we already have.
We may thus end up with several trees of dentries in the superblock, and if at
some later point one of anonymous tree roots is discovered by normal filesystem
activity to be located in another tree within the superblock, the anonymous
root is named and materialises attached to the second tree at the appropriate
point.
Why do it this way? Why not pass an extra argument to the mount() syscall to
indicate the subpath and then pathwalk from the server root to the desired
directory? You can't guarantee this will work for two reasons:
(1) The root and intervening nodes may not be accessible to the client.
With NFS2 and NFS3, for instance, mountd is called on the server to get
the filehandle for the tip of a path. mountd won't give us handles for
anything we don't have permission to access, and so we can't set up NFS
inodes for such nodes, and so can't easily set up dentries (we'd have to
have ghost inodes or something).
With this patch we don't actually create dentries until we get handles
from the server that we can use to set up their inodes, and we don't
actually bind them into the tree until we know for sure where they go.
(2) Inaccessible symbolic links.
If we're asked to mount two exports from the server, eg:
mount warthog:/warthog/aaa/xxx /mmm
mount warthog:/warthog/bbb/yyy /nnn
We may not be able to access anything nearer the root than xxx and yyy,
but we may find out later that /mmm/www/yyy, say, is actually the same
directory as the one mounted on /nnn. What we might then find out, for
example, is that /warthog/bbb was actually a symbolic link to
/warthog/aaa/xxx/www, but we can't actually determine that by talking to
the server until /warthog is made available by NFS.
This would lead to having constructed an errneous dentry tree which we
can't easily fix. We can end up with a dentry marked as a directory when
it should actually be a symlink, or we could end up with an apparently
hardlinked directory.
With this patch we need not make assumptions about the type of a dentry
for which we can't retrieve information, nor need we assume we know its
place in the grand scheme of things until we actually see that place.
This patch reduces the possibility of aliasing in the inode and page caches for
inodes that may be accessed by more than one NFS export. It also reduces the
number of superblocks required for NFS where there are many NFS exports being
used from a server (home directory server + autofs for example).
This in turn makes it simpler to do local caching of network filesystems, as it
can then be guaranteed that there won't be links from multiple inodes in
separate superblocks to the same cache file.
Obviously, cache aliasing between different levels of NFS protocol could still
be a problem, but at least that gives us another key to use when indexing the
cache.
This patch makes the following changes:
(1) The server record construction/destruction has been abstracted out into
its own set of functions to make things easier to get right. These have
been moved into fs/nfs/client.c.
All the code in fs/nfs/client.c has to do with the management of
connections to servers, and doesn't touch superblocks in any way; the
remaining code in fs/nfs/super.c has to do with VFS superblock management.
(2) The sequence of events undertaken by NFS mount is now reordered:
(a) A volume representation (struct nfs_server) is allocated.
(b) A server representation (struct nfs_client) is acquired. This may be
allocated or shared, and is keyed on server address, port and NFS
version.
(c) If allocated, the client representation is initialised. The state
member variable of nfs_client is used to prevent a race during
initialisation from two mounts.
(d) For NFS4 a simple pathwalk is performed, walking from FH to FH to find
the root filehandle for the mount (fs/nfs/getroot.c). For NFS2/3 we
are given the root FH in advance.
(e) The volume FSID is probed for on the root FH.
(f) The volume representation is initialised from the FSINFO record
retrieved on the root FH.
(g) sget() is called to acquire a superblock. This may be allocated or
shared, keyed on client pointer and FSID.
(h) If allocated, the superblock is initialised.
(i) If the superblock is shared, then the new nfs_server record is
discarded.
(j) The root dentry for this mount is looked up from the root FH.
(k) The root dentry for this mount is assigned to the vfsmount.
(3) nfs_readdir_lookup() creates dentries for each of the entries readdir()
returns; this function now attaches disconnected trees from alternate
roots that happen to be discovered attached to a directory being read (in
the same way nfs_lookup() is made to do for lookup ops).
The new d_materialise_unique() function is now used to do this, thus
permitting the whole thing to be done under one set of locks, and thus
avoiding any race between mount and lookup operations on the same
directory.
(4) The client management code uses a new debug facility: NFSDBG_CLIENT which
is set by echoing 1024 to /proc/net/sunrpc/nfs_debug.
(5) Clone mounts are now called xdev mounts.
(6) Use the dentry passed to the statfs() op as the handle for retrieving fs
statistics rather than the root dentry of the superblock (which is now a
dummy).
Signed-Off-By: David Howells <dhowells@redhat.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2006-08-23 08:06:13 +08:00
|
|
|
struct nfs_fsinfo *info)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2011-03-25 01:12:31 +08:00
|
|
|
int i, len, status = 0;
|
|
|
|
rpc_authflavor_t flav_array[NFS_MAX_SECFLAVORS + 2];
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-03-25 01:12:31 +08:00
|
|
|
flav_array[0] = RPC_AUTH_UNIX;
|
|
|
|
len = gss_mech_list_pseudoflavors(&flav_array[1]);
|
|
|
|
flav_array[1+len] = RPC_AUTH_NULL;
|
|
|
|
len += 2;
|
|
|
|
|
|
|
|
for (i = 0; i < len; i++) {
|
|
|
|
status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]);
|
2011-04-08 04:02:20 +08:00
|
|
|
if (status != -EPERM)
|
2011-03-25 01:12:31 +08:00
|
|
|
break;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
if (status == 0)
|
|
|
|
status = nfs4_server_capabilities(server, fhandle);
|
|
|
|
if (status == 0)
|
|
|
|
status = nfs4_do_fsinfo(server, fhandle, info);
|
2006-03-14 13:20:47 +08:00
|
|
|
return nfs4_map_errors(status);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2006-06-09 21:34:29 +08:00
|
|
|
/*
|
|
|
|
* Get locations and (maybe) other attributes of a referral.
|
|
|
|
* Note that we'll actually follow the referral later when
|
|
|
|
* we detect fsid mismatch in inode revalidation
|
|
|
|
*/
|
2007-07-18 09:52:39 +08:00
|
|
|
static int nfs4_get_referral(struct inode *dir, const struct qstr *name, struct nfs_fattr *fattr, struct nfs_fh *fhandle)
|
2006-06-09 21:34:29 +08:00
|
|
|
{
|
|
|
|
int status = -ENOMEM;
|
|
|
|
struct page *page = NULL;
|
|
|
|
struct nfs4_fs_locations *locations = NULL;
|
|
|
|
|
|
|
|
page = alloc_page(GFP_KERNEL);
|
|
|
|
if (page == NULL)
|
|
|
|
goto out;
|
|
|
|
locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
|
|
|
|
if (locations == NULL)
|
|
|
|
goto out;
|
|
|
|
|
2007-01-13 15:28:11 +08:00
|
|
|
status = nfs4_proc_fs_locations(dir, name, locations, page);
|
2006-06-09 21:34:29 +08:00
|
|
|
if (status != 0)
|
|
|
|
goto out;
|
|
|
|
/* Make sure server returned a different fsid for the referral */
|
|
|
|
if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
|
2008-05-03 04:42:44 +08:00
|
|
|
dprintk("%s: server did not return a different fsid for a referral at %s\n", __func__, name->name);
|
2006-06-09 21:34:29 +08:00
|
|
|
status = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
|
|
|
|
fattr->valid |= NFS_ATTR_FATTR_V4_REFERRAL;
|
|
|
|
if (!fattr->mode)
|
|
|
|
fattr->mode = S_IFDIR;
|
|
|
|
memset(fhandle, 0, sizeof(struct nfs_fh));
|
|
|
|
out:
|
|
|
|
if (page)
|
|
|
|
__free_page(page);
|
2010-08-12 00:42:15 +08:00
|
|
|
kfree(locations);
|
2006-06-09 21:34:29 +08:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
|
|
|
|
{
|
|
|
|
struct nfs4_getattr_arg args = {
|
|
|
|
.fh = fhandle,
|
|
|
|
.bitmask = server->attr_bitmask,
|
|
|
|
};
|
|
|
|
struct nfs4_getattr_res res = {
|
|
|
|
.fattr = fattr,
|
|
|
|
.server = server,
|
|
|
|
};
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
|
|
|
|
.rpc_argp = &args,
|
|
|
|
.rpc_resp = &res,
|
|
|
|
};
|
|
|
|
|
2005-10-28 10:12:38 +08:00
|
|
|
nfs_fattr_init(fattr);
|
2011-03-25 01:12:24 +08:00
|
|
|
return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
|
|
|
|
{
|
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int err;
|
|
|
|
do {
|
|
|
|
err = nfs4_handle_exception(server,
|
|
|
|
_nfs4_proc_getattr(server, fhandle, fattr),
|
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The file is not closed if it is opened due to the a request to change
|
|
|
|
* the size of the file. The open call will not be needed once the
|
|
|
|
* VFS layer lookup-intents are implemented.
|
|
|
|
*
|
|
|
|
* Close is called when the inode is destroyed.
|
|
|
|
* If we haven't opened the file for O_WRONLY, we
|
|
|
|
* need to in the size_change case to obtain a stateid.
|
|
|
|
*
|
|
|
|
* Got race?
|
|
|
|
* Because OPEN is always done by name in nfsv4, it is
|
|
|
|
* possible that we opened a different file by the same
|
|
|
|
* name. We can recognize this race condition, but we
|
|
|
|
* can't do anything about it besides returning an error.
|
|
|
|
*
|
|
|
|
* This will be fixed with VFS changes (lookup-intent).
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
|
|
|
|
struct iattr *sattr)
|
|
|
|
{
|
2005-06-23 01:16:29 +08:00
|
|
|
struct inode *inode = dentry->d_inode;
|
2008-06-11 07:39:41 +08:00
|
|
|
struct rpc_cred *cred = NULL;
|
2005-11-05 04:33:38 +08:00
|
|
|
struct nfs4_state *state = NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
int status;
|
|
|
|
|
2005-10-28 10:12:38 +08:00
|
|
|
nfs_fattr_init(fattr);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-11-05 04:33:38 +08:00
|
|
|
/* Search for an existing open(O_WRITE) file */
|
2008-06-11 07:39:41 +08:00
|
|
|
if (sattr->ia_valid & ATTR_FILE) {
|
|
|
|
struct nfs_open_context *ctx;
|
|
|
|
|
|
|
|
ctx = nfs_file_open_context(sattr->ia_file);
|
2008-10-16 11:15:16 +08:00
|
|
|
if (ctx) {
|
|
|
|
cred = ctx->cred;
|
|
|
|
state = ctx->state;
|
|
|
|
}
|
2008-06-11 07:39:41 +08:00
|
|
|
}
|
2005-06-23 01:16:29 +08:00
|
|
|
|
2008-06-11 07:39:41 +08:00
|
|
|
status = nfs4_do_setattr(inode, cred, fattr, sattr, state);
|
2005-08-16 23:49:44 +08:00
|
|
|
if (status == 0)
|
|
|
|
nfs_setattr_update_inode(inode, sattr);
|
2005-04-17 06:20:36 +08:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2011-03-25 01:12:24 +08:00
|
|
|
static int _nfs4_proc_lookupfh(struct rpc_clnt *clnt, struct nfs_server *server,
|
|
|
|
const struct nfs_fh *dirfh, const struct qstr *name,
|
|
|
|
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
|
2006-08-23 08:06:09 +08:00
|
|
|
{
|
|
|
|
int status;
|
|
|
|
struct nfs4_lookup_arg args = {
|
|
|
|
.bitmask = server->attr_bitmask,
|
|
|
|
.dir_fh = dirfh,
|
|
|
|
.name = name,
|
|
|
|
};
|
|
|
|
struct nfs4_lookup_res res = {
|
|
|
|
.server = server,
|
|
|
|
.fattr = fattr,
|
|
|
|
.fh = fhandle,
|
|
|
|
};
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
|
|
|
|
.rpc_argp = &args,
|
|
|
|
.rpc_resp = &res,
|
|
|
|
};
|
|
|
|
|
|
|
|
nfs_fattr_init(fattr);
|
|
|
|
|
|
|
|
dprintk("NFS call lookupfh %s\n", name->name);
|
2011-03-25 01:12:24 +08:00
|
|
|
status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
|
2006-08-23 08:06:09 +08:00
|
|
|
dprintk("NFS reply lookupfh: %d\n", status);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_proc_lookupfh(struct nfs_server *server, struct nfs_fh *dirfh,
|
|
|
|
struct qstr *name, struct nfs_fh *fhandle,
|
|
|
|
struct nfs_fattr *fattr)
|
|
|
|
{
|
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int err;
|
|
|
|
do {
|
2011-03-25 01:12:24 +08:00
|
|
|
err = _nfs4_proc_lookupfh(server->client, server, dirfh, name, fhandle, fattr);
|
2007-07-02 06:13:52 +08:00
|
|
|
/* FIXME: !!!! */
|
|
|
|
if (err == -NFS4ERR_MOVED) {
|
|
|
|
err = -EREMOTE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
err = nfs4_handle_exception(server, err, &exception);
|
2006-08-23 08:06:09 +08:00
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2011-03-25 01:12:24 +08:00
|
|
|
static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
|
|
|
|
const struct qstr *name, struct nfs_fh *fhandle,
|
|
|
|
struct nfs_fattr *fattr)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2007-07-02 06:13:52 +08:00
|
|
|
int status;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
dprintk("NFS call lookup %s\n", name->name);
|
2011-03-25 01:12:24 +08:00
|
|
|
status = _nfs4_proc_lookupfh(clnt, NFS_SERVER(dir), NFS_FH(dir), name, fhandle, fattr);
|
2006-06-09 21:34:29 +08:00
|
|
|
if (status == -NFS4ERR_MOVED)
|
|
|
|
status = nfs4_get_referral(dir, name, fattr, fhandle);
|
2005-04-17 06:20:36 +08:00
|
|
|
dprintk("NFS reply lookup: %d\n", status);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2011-03-25 01:12:30 +08:00
|
|
|
void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr, struct nfs_fh *fh)
|
|
|
|
{
|
|
|
|
memset(fh, 0, sizeof(struct nfs_fh));
|
|
|
|
fattr->fsid.major = 1;
|
|
|
|
fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
|
|
|
|
NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_FSID | NFS_ATTR_FATTR_MOUNTPOINT;
|
|
|
|
fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
|
|
|
|
fattr->nlink = 2;
|
|
|
|
}
|
|
|
|
|
2011-03-25 01:12:24 +08:00
|
|
|
static int nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name,
|
|
|
|
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int err;
|
|
|
|
do {
|
|
|
|
err = nfs4_handle_exception(NFS_SERVER(dir),
|
2011-03-25 01:12:24 +08:00
|
|
|
_nfs4_proc_lookup(clnt, dir, name, fhandle, fattr),
|
2005-04-17 06:20:36 +08:00
|
|
|
&exception);
|
2011-03-25 01:12:30 +08:00
|
|
|
if (err == -EPERM)
|
|
|
|
nfs_fixup_secinfo_attributes(fattr, fhandle);
|
2005-04-17 06:20:36 +08:00
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
|
|
|
|
{
|
2007-08-11 05:45:11 +08:00
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
2005-04-17 06:20:36 +08:00
|
|
|
struct nfs4_accessargs args = {
|
|
|
|
.fh = NFS_FH(inode),
|
2007-08-11 05:45:11 +08:00
|
|
|
.bitmask = server->attr_bitmask,
|
|
|
|
};
|
|
|
|
struct nfs4_accessres res = {
|
|
|
|
.server = server,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
|
|
|
|
.rpc_argp = &args,
|
|
|
|
.rpc_resp = &res,
|
|
|
|
.rpc_cred = entry->cred,
|
|
|
|
};
|
|
|
|
int mode = entry->mask;
|
|
|
|
int status;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Determine which access bits we want to ask for...
|
|
|
|
*/
|
|
|
|
if (mode & MAY_READ)
|
|
|
|
args.access |= NFS4_ACCESS_READ;
|
|
|
|
if (S_ISDIR(inode->i_mode)) {
|
|
|
|
if (mode & MAY_WRITE)
|
|
|
|
args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE;
|
|
|
|
if (mode & MAY_EXEC)
|
|
|
|
args.access |= NFS4_ACCESS_LOOKUP;
|
|
|
|
} else {
|
|
|
|
if (mode & MAY_WRITE)
|
|
|
|
args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND;
|
|
|
|
if (mode & MAY_EXEC)
|
|
|
|
args.access |= NFS4_ACCESS_EXECUTE;
|
|
|
|
}
|
2010-04-17 04:22:48 +08:00
|
|
|
|
|
|
|
res.fattr = nfs_alloc_fattr();
|
|
|
|
if (res.fattr == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2011-03-25 01:12:24 +08:00
|
|
|
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!status) {
|
|
|
|
entry->mask = 0;
|
|
|
|
if (res.access & NFS4_ACCESS_READ)
|
|
|
|
entry->mask |= MAY_READ;
|
|
|
|
if (res.access & (NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE))
|
|
|
|
entry->mask |= MAY_WRITE;
|
|
|
|
if (res.access & (NFS4_ACCESS_LOOKUP|NFS4_ACCESS_EXECUTE))
|
|
|
|
entry->mask |= MAY_EXEC;
|
2010-04-17 04:22:48 +08:00
|
|
|
nfs_refresh_inode(inode, res.fattr);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2010-04-17 04:22:48 +08:00
|
|
|
nfs_free_fattr(res.fattr);
|
2005-04-17 06:20:36 +08:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
|
|
|
|
{
|
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int err;
|
|
|
|
do {
|
|
|
|
err = nfs4_handle_exception(NFS_SERVER(inode),
|
|
|
|
_nfs4_proc_access(inode, entry),
|
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* TODO: For the time being, we don't try to get any attributes
|
|
|
|
* along with any of the zero-copy operations READ, READDIR,
|
|
|
|
* READLINK, WRITE.
|
|
|
|
*
|
|
|
|
* In the case of the first three, we want to put the GETATTR
|
|
|
|
* after the read-type operation -- this is because it is hard
|
|
|
|
* to predict the length of a GETATTR response in v4, and thus
|
|
|
|
* align the READ data correctly. This means that the GETATTR
|
|
|
|
* may end up partially falling into the page cache, and we should
|
|
|
|
* shift it into the 'tail' of the xdr_buf before processing.
|
|
|
|
* To do this efficiently, we need to know the total length
|
|
|
|
* of data received, which doesn't seem to be available outside
|
|
|
|
* of the RPC layer.
|
|
|
|
*
|
|
|
|
* In the case of WRITE, we also want to put the GETATTR after
|
|
|
|
* the operation -- in this case because we want to make sure
|
|
|
|
* we get the post-operation mtime and size. This means that
|
|
|
|
* we can't use xdr_encode_pages() as written: we need a variant
|
|
|
|
* of it which would leave room in the 'tail' iovec.
|
|
|
|
*
|
|
|
|
* Both of these changes to the XDR layer would in fact be quite
|
|
|
|
* minor, but I decided to leave them for a subsequent patch.
|
|
|
|
*/
|
|
|
|
static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
|
|
|
|
unsigned int pgbase, unsigned int pglen)
|
|
|
|
{
|
|
|
|
struct nfs4_readlink args = {
|
|
|
|
.fh = NFS_FH(inode),
|
|
|
|
.pgbase = pgbase,
|
|
|
|
.pglen = pglen,
|
|
|
|
.pages = &page,
|
|
|
|
};
|
2009-04-01 21:21:55 +08:00
|
|
|
struct nfs4_readlink_res res;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
|
|
|
|
.rpc_argp = &args,
|
2009-04-01 21:21:55 +08:00
|
|
|
.rpc_resp = &res,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2011-03-25 01:12:24 +08:00
|
|
|
return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_proc_readlink(struct inode *inode, struct page *page,
|
|
|
|
unsigned int pgbase, unsigned int pglen)
|
|
|
|
{
|
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int err;
|
|
|
|
do {
|
|
|
|
err = nfs4_handle_exception(NFS_SERVER(inode),
|
|
|
|
_nfs4_proc_readlink(inode, page, pgbase, pglen),
|
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Got race?
|
|
|
|
* We will need to arrange for the VFS layer to provide an atomic open.
|
|
|
|
* Until then, this create/open method is prone to inefficiency and race
|
|
|
|
* conditions due to the lookup, create, and open VFS calls from sys_open()
|
|
|
|
* placed on the wire.
|
|
|
|
*
|
|
|
|
* Given the above sorry state of affairs, I'm simply sending an OPEN.
|
|
|
|
* The file will be opened again in the subsequent VFS open call
|
|
|
|
* (nfs4_proc_file_open).
|
|
|
|
*
|
|
|
|
* The open for read will just hang around to be used by any process that
|
|
|
|
* opens the file O_RDONLY. This will all be resolved with the VFS changes.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int
|
|
|
|
nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
|
2010-09-17 22:56:51 +08:00
|
|
|
int flags, struct nfs_open_context *ctx)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2010-09-17 22:56:51 +08:00
|
|
|
struct path my_path = {
|
2007-06-06 00:30:00 +08:00
|
|
|
.dentry = dentry,
|
|
|
|
};
|
2010-09-17 22:56:51 +08:00
|
|
|
struct path *path = &my_path;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct nfs4_state *state;
|
2010-09-17 22:56:51 +08:00
|
|
|
struct rpc_cred *cred = NULL;
|
|
|
|
fmode_t fmode = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
int status = 0;
|
|
|
|
|
2010-09-17 22:56:51 +08:00
|
|
|
if (ctx != NULL) {
|
|
|
|
cred = ctx->cred;
|
|
|
|
path = &ctx->path;
|
|
|
|
fmode = ctx->mode;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2010-12-09 19:35:14 +08:00
|
|
|
sattr->ia_mode &= ~current_umask();
|
2010-09-17 22:56:51 +08:00
|
|
|
state = nfs4_do_open(dir, path, fmode, flags, sattr, cred);
|
2007-10-03 06:38:53 +08:00
|
|
|
d_drop(dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (IS_ERR(state)) {
|
|
|
|
status = PTR_ERR(state);
|
2010-09-17 22:56:51 +08:00
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2007-10-03 06:38:53 +08:00
|
|
|
d_add(dentry, igrab(state->inode));
|
2007-10-02 09:42:01 +08:00
|
|
|
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
|
2010-09-17 22:56:51 +08:00
|
|
|
if (ctx != NULL)
|
|
|
|
ctx->state = state;
|
2005-10-19 05:20:17 +08:00
|
|
|
else
|
2010-09-17 22:56:51 +08:00
|
|
|
nfs4_close_sync(path, state, fmode);
|
2005-04-17 06:20:36 +08:00
|
|
|
out:
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
|
|
|
|
{
|
2005-10-28 10:12:44 +08:00
|
|
|
struct nfs_server *server = NFS_SERVER(dir);
|
2007-07-15 03:39:57 +08:00
|
|
|
struct nfs_removeargs args = {
|
2005-04-17 06:20:36 +08:00
|
|
|
.fh = NFS_FH(dir),
|
2007-07-15 03:39:57 +08:00
|
|
|
.name.len = name->len,
|
|
|
|
.name.name = name->name,
|
2005-10-28 10:12:44 +08:00
|
|
|
.bitmask = server->attr_bitmask,
|
|
|
|
};
|
2007-07-15 03:39:57 +08:00
|
|
|
struct nfs_removeres res = {
|
2005-10-28 10:12:44 +08:00
|
|
|
.server = server,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
struct rpc_message msg = {
|
2007-07-15 03:39:57 +08:00
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
|
|
|
|
.rpc_argp = &args,
|
|
|
|
.rpc_resp = &res,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
2010-04-17 04:22:50 +08:00
|
|
|
int status = -ENOMEM;
|
|
|
|
|
|
|
|
res.dir_attr = nfs_alloc_fattr();
|
|
|
|
if (res.dir_attr == NULL)
|
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-03-25 01:12:24 +08:00
|
|
|
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
|
2005-10-28 10:12:44 +08:00
|
|
|
if (status == 0) {
|
|
|
|
update_changeattr(dir, &res.cinfo);
|
2010-04-17 04:22:50 +08:00
|
|
|
nfs_post_op_update_inode(dir, res.dir_attr);
|
2005-10-28 10:12:44 +08:00
|
|
|
}
|
2010-04-17 04:22:50 +08:00
|
|
|
nfs_free_fattr(res.dir_attr);
|
|
|
|
out:
|
2005-04-17 06:20:36 +08:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
|
|
|
|
{
|
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int err;
|
|
|
|
do {
|
|
|
|
err = nfs4_handle_exception(NFS_SERVER(dir),
|
|
|
|
_nfs4_proc_remove(dir, name),
|
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2007-07-15 03:39:58 +08:00
|
|
|
static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2007-07-15 03:39:58 +08:00
|
|
|
struct nfs_server *server = NFS_SERVER(dir);
|
|
|
|
struct nfs_removeargs *args = msg->rpc_argp;
|
|
|
|
struct nfs_removeres *res = msg->rpc_resp;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-03-12 02:10:28 +08:00
|
|
|
args->bitmask = server->cache_consistency_bitmask;
|
2007-07-15 03:39:58 +08:00
|
|
|
res->server = server;
|
2010-09-24 21:17:01 +08:00
|
|
|
res->seq_res.sr_slot = NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
|
|
|
|
}
|
|
|
|
|
2007-07-15 03:39:58 +08:00
|
|
|
static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2007-07-15 03:39:58 +08:00
|
|
|
struct nfs_removeres *res = task->tk_msg.rpc_resp;
|
|
|
|
|
2010-08-01 02:29:06 +08:00
|
|
|
if (!nfs4_sequence_done(task, &res->seq_res))
|
|
|
|
return 0;
|
2008-12-24 04:21:46 +08:00
|
|
|
if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
|
2007-07-15 03:39:58 +08:00
|
|
|
return 0;
|
|
|
|
update_changeattr(dir, &res->cinfo);
|
2010-04-17 04:22:50 +08:00
|
|
|
nfs_post_op_update_inode(dir, res->dir_attr);
|
2007-07-15 03:39:58 +08:00
|
|
|
return 1;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2010-09-18 05:31:57 +08:00
|
|
|
static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
|
|
|
|
{
|
|
|
|
struct nfs_server *server = NFS_SERVER(dir);
|
|
|
|
struct nfs_renameargs *arg = msg->rpc_argp;
|
|
|
|
struct nfs_renameres *res = msg->rpc_resp;
|
|
|
|
|
|
|
|
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
|
|
|
|
arg->bitmask = server->attr_bitmask;
|
|
|
|
res->server = server;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
|
|
|
|
struct inode *new_dir)
|
|
|
|
{
|
|
|
|
struct nfs_renameres *res = task->tk_msg.rpc_resp;
|
|
|
|
|
|
|
|
if (!nfs4_sequence_done(task, &res->seq_res))
|
|
|
|
return 0;
|
|
|
|
if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
update_changeattr(old_dir, &res->old_cinfo);
|
|
|
|
nfs_post_op_update_inode(old_dir, res->old_fattr);
|
|
|
|
update_changeattr(new_dir, &res->new_cinfo);
|
|
|
|
nfs_post_op_update_inode(new_dir, res->new_fattr);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
|
|
|
|
struct inode *new_dir, struct qstr *new_name)
|
|
|
|
{
|
2005-10-28 10:12:43 +08:00
|
|
|
struct nfs_server *server = NFS_SERVER(old_dir);
|
2010-09-18 05:30:25 +08:00
|
|
|
struct nfs_renameargs arg = {
|
2005-04-17 06:20:36 +08:00
|
|
|
.old_dir = NFS_FH(old_dir),
|
|
|
|
.new_dir = NFS_FH(new_dir),
|
|
|
|
.old_name = old_name,
|
|
|
|
.new_name = new_name,
|
2005-10-28 10:12:43 +08:00
|
|
|
.bitmask = server->attr_bitmask,
|
|
|
|
};
|
2010-09-18 05:31:06 +08:00
|
|
|
struct nfs_renameres res = {
|
2005-10-28 10:12:43 +08:00
|
|
|
.server = server,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME],
|
|
|
|
.rpc_argp = &arg,
|
|
|
|
.rpc_resp = &res,
|
|
|
|
};
|
2010-04-17 04:22:49 +08:00
|
|
|
int status = -ENOMEM;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-04-17 04:22:49 +08:00
|
|
|
res.old_fattr = nfs_alloc_fattr();
|
|
|
|
res.new_fattr = nfs_alloc_fattr();
|
|
|
|
if (res.old_fattr == NULL || res.new_fattr == NULL)
|
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-03-25 01:12:24 +08:00
|
|
|
status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!status) {
|
|
|
|
update_changeattr(old_dir, &res.old_cinfo);
|
2005-10-28 10:12:43 +08:00
|
|
|
nfs_post_op_update_inode(old_dir, res.old_fattr);
|
2005-04-17 06:20:36 +08:00
|
|
|
update_changeattr(new_dir, &res.new_cinfo);
|
2005-10-28 10:12:43 +08:00
|
|
|
nfs_post_op_update_inode(new_dir, res.new_fattr);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2010-04-17 04:22:49 +08:00
|
|
|
out:
|
|
|
|
nfs_free_fattr(res.new_fattr);
|
|
|
|
nfs_free_fattr(res.old_fattr);
|
2005-04-17 06:20:36 +08:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
|
|
|
|
struct inode *new_dir, struct qstr *new_name)
|
|
|
|
{
|
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int err;
|
|
|
|
do {
|
|
|
|
err = nfs4_handle_exception(NFS_SERVER(old_dir),
|
|
|
|
_nfs4_proc_rename(old_dir, old_name,
|
|
|
|
new_dir, new_name),
|
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
|
|
|
|
{
|
2005-10-28 10:12:42 +08:00
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
2005-04-17 06:20:36 +08:00
|
|
|
struct nfs4_link_arg arg = {
|
|
|
|
.fh = NFS_FH(inode),
|
|
|
|
.dir_fh = NFS_FH(dir),
|
|
|
|
.name = name,
|
2005-10-28 10:12:42 +08:00
|
|
|
.bitmask = server->attr_bitmask,
|
|
|
|
};
|
|
|
|
struct nfs4_link_res res = {
|
|
|
|
.server = server,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
|
|
|
|
.rpc_argp = &arg,
|
2005-10-28 10:12:42 +08:00
|
|
|
.rpc_resp = &res,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
2010-04-17 04:22:49 +08:00
|
|
|
int status = -ENOMEM;
|
|
|
|
|
|
|
|
res.fattr = nfs_alloc_fattr();
|
|
|
|
res.dir_attr = nfs_alloc_fattr();
|
|
|
|
if (res.fattr == NULL || res.dir_attr == NULL)
|
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-03-25 01:12:24 +08:00
|
|
|
status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
|
2005-10-28 10:12:42 +08:00
|
|
|
if (!status) {
|
|
|
|
update_changeattr(dir, &res.cinfo);
|
|
|
|
nfs_post_op_update_inode(dir, res.dir_attr);
|
2006-05-25 13:40:47 +08:00
|
|
|
nfs_post_op_update_inode(inode, res.fattr);
|
2005-10-28 10:12:42 +08:00
|
|
|
}
|
2010-04-17 04:22:49 +08:00
|
|
|
out:
|
|
|
|
nfs_free_fattr(res.dir_attr);
|
|
|
|
nfs_free_fattr(res.fattr);
|
2005-04-17 06:20:36 +08:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
|
|
|
|
{
|
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int err;
|
|
|
|
do {
|
|
|
|
err = nfs4_handle_exception(NFS_SERVER(inode),
|
|
|
|
_nfs4_proc_link(inode, dir, name),
|
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2008-06-21 03:35:32 +08:00
|
|
|
struct nfs4_createdata {
|
|
|
|
struct rpc_message msg;
|
|
|
|
struct nfs4_create_arg arg;
|
|
|
|
struct nfs4_create_res res;
|
|
|
|
struct nfs_fh fh;
|
|
|
|
struct nfs_fattr fattr;
|
|
|
|
struct nfs_fattr dir_fattr;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
|
|
|
|
struct qstr *name, struct iattr *sattr, u32 ftype)
|
|
|
|
{
|
|
|
|
struct nfs4_createdata *data;
|
|
|
|
|
|
|
|
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
|
|
|
if (data != NULL) {
|
|
|
|
struct nfs_server *server = NFS_SERVER(dir);
|
|
|
|
|
|
|
|
data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
|
|
|
|
data->msg.rpc_argp = &data->arg;
|
|
|
|
data->msg.rpc_resp = &data->res;
|
|
|
|
data->arg.dir_fh = NFS_FH(dir);
|
|
|
|
data->arg.server = server;
|
|
|
|
data->arg.name = name;
|
|
|
|
data->arg.attrs = sattr;
|
|
|
|
data->arg.ftype = ftype;
|
|
|
|
data->arg.bitmask = server->attr_bitmask;
|
|
|
|
data->res.server = server;
|
|
|
|
data->res.fh = &data->fh;
|
|
|
|
data->res.fattr = &data->fattr;
|
|
|
|
data->res.dir_fattr = &data->dir_fattr;
|
|
|
|
nfs_fattr_init(data->res.fattr);
|
|
|
|
nfs_fattr_init(data->res.dir_fattr);
|
|
|
|
}
|
|
|
|
return data;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
|
|
|
|
{
|
2011-03-25 01:12:24 +08:00
|
|
|
int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
|
2011-03-25 01:12:23 +08:00
|
|
|
&data->arg.seq_args, &data->res.seq_res, 1);
|
2008-06-21 03:35:32 +08:00
|
|
|
if (status == 0) {
|
|
|
|
update_changeattr(dir, &data->res.dir_cinfo);
|
|
|
|
nfs_post_op_update_inode(dir, data->res.dir_fattr);
|
|
|
|
status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
|
|
|
|
}
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs4_free_createdata(struct nfs4_createdata *data)
|
|
|
|
{
|
|
|
|
kfree(data);
|
|
|
|
}
|
|
|
|
|
2006-08-23 08:06:22 +08:00
|
|
|
static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
|
2006-08-23 08:06:23 +08:00
|
|
|
struct page *page, unsigned int len, struct iattr *sattr)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2008-06-21 03:35:32 +08:00
|
|
|
struct nfs4_createdata *data;
|
|
|
|
int status = -ENAMETOOLONG;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-08-23 08:06:23 +08:00
|
|
|
if (len > NFS4_MAXPATHLEN)
|
2008-06-21 03:35:32 +08:00
|
|
|
goto out;
|
2006-08-23 08:06:22 +08:00
|
|
|
|
2008-06-21 03:35:32 +08:00
|
|
|
status = -ENOMEM;
|
|
|
|
data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
|
|
|
|
if (data == NULL)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
|
|
|
|
data->arg.u.symlink.pages = &page;
|
|
|
|
data->arg.u.symlink.len = len;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-06-21 03:35:32 +08:00
|
|
|
status = nfs4_do_create(dir, dentry, data);
|
|
|
|
|
|
|
|
nfs4_free_createdata(data);
|
|
|
|
out:
|
2005-04-17 06:20:36 +08:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2006-08-23 08:06:22 +08:00
|
|
|
static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
|
2006-08-23 08:06:23 +08:00
|
|
|
struct page *page, unsigned int len, struct iattr *sattr)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int err;
|
|
|
|
do {
|
|
|
|
err = nfs4_handle_exception(NFS_SERVER(dir),
|
2006-08-23 08:06:23 +08:00
|
|
|
_nfs4_proc_symlink(dir, dentry, page,
|
|
|
|
len, sattr),
|
2005-04-17 06:20:36 +08:00
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
|
|
|
|
struct iattr *sattr)
|
|
|
|
{
|
2008-06-21 03:35:32 +08:00
|
|
|
struct nfs4_createdata *data;
|
|
|
|
int status = -ENOMEM;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-06-21 03:35:32 +08:00
|
|
|
data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
|
|
|
|
if (data == NULL)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
status = nfs4_do_create(dir, dentry, data);
|
|
|
|
|
|
|
|
nfs4_free_createdata(data);
|
|
|
|
out:
|
2005-04-17 06:20:36 +08:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
|
|
|
|
struct iattr *sattr)
|
|
|
|
{
|
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int err;
|
2010-12-09 19:35:14 +08:00
|
|
|
|
|
|
|
sattr->ia_mode &= ~current_umask();
|
2005-04-17 06:20:36 +08:00
|
|
|
do {
|
|
|
|
err = nfs4_handle_exception(NFS_SERVER(dir),
|
|
|
|
_nfs4_proc_mkdir(dir, dentry, sattr),
|
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
|
2010-10-21 03:44:37 +08:00
|
|
|
u64 cookie, struct page **pages, unsigned int count, int plus)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct inode *dir = dentry->d_inode;
|
|
|
|
struct nfs4_readdir_arg args = {
|
|
|
|
.fh = NFS_FH(dir),
|
2010-10-21 03:44:37 +08:00
|
|
|
.pages = pages,
|
2005-04-17 06:20:36 +08:00
|
|
|
.pgbase = 0,
|
|
|
|
.count = count,
|
2009-11-11 15:15:42 +08:00
|
|
|
.bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask,
|
NFS: Readdir plus in v4
By requsting more attributes during a readdir, we can mimic the readdir plus
operation that was in NFSv3.
To test, I ran the command `ls -lU --color=none` on directories with various
numbers of files. Without readdir plus, I see this:
n files | 100 | 1,000 | 10,000 | 100,000 | 1,000,000
--------+-----------+-----------+-----------+-----------+----------
real | 0m00.153s | 0m00.589s | 0m05.601s | 0m56.691s | 9m59.128s
user | 0m00.007s | 0m00.007s | 0m00.077s | 0m00.703s | 0m06.800s
sys | 0m00.010s | 0m00.070s | 0m00.633s | 0m06.423s | 1m10.005s
access | 3 | 1 | 1 | 4 | 31
getattr | 2 | 1 | 1 | 1 | 1
lookup | 104 | 1,003 | 10,003 | 100,003 | 1,000,003
readdir | 2 | 16 | 158 | 1,575 | 15,749
total | 111 | 1,021 | 10,163 | 101,583 | 1,015,784
With readdir plus enabled, I see this:
n files | 100 | 1,000 | 10,000 | 100,000 | 1,000,000
--------+-----------+-----------+-----------+-----------+----------
real | 0m00.115s | 0m00.206s | 0m01.079s | 0m12.521s | 2m07.528s
user | 0m00.003s | 0m00.003s | 0m00.040s | 0m00.290s | 0m03.296s
sys | 0m00.007s | 0m00.020s | 0m00.120s | 0m01.357s | 0m17.556s
access | 3 | 1 | 1 | 1 | 7
getattr | 2 | 1 | 1 | 1 | 1
lookup | 4 | 3 | 3 | 3 | 3
readdir | 6 | 62 | 630 | 6,300 | 62,993
total | 15 | 67 | 635 | 6,305 | 63,004
Readdir plus disabled has about a 16x increase in the number of rpc calls and
is 4 - 5 times slower on large directories.
Signed-off-by: Bryan Schumaker <bjschuma@netapp.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2010-10-22 04:33:18 +08:00
|
|
|
.plus = plus,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
struct nfs4_readdir_res res;
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
|
|
|
|
.rpc_argp = &args,
|
|
|
|
.rpc_resp = &res,
|
|
|
|
.rpc_cred = cred,
|
|
|
|
};
|
|
|
|
int status;
|
|
|
|
|
2008-05-03 04:42:44 +08:00
|
|
|
dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __func__,
|
2005-06-23 01:16:39 +08:00
|
|
|
dentry->d_parent->d_name.name,
|
|
|
|
dentry->d_name.name,
|
|
|
|
(unsigned long long)cookie);
|
2005-04-17 06:20:36 +08:00
|
|
|
nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args);
|
|
|
|
res.pgbase = args.pgbase;
|
2011-03-25 01:12:24 +08:00
|
|
|
status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
|
2010-11-16 09:26:22 +08:00
|
|
|
if (status >= 0) {
|
2005-04-17 06:20:36 +08:00
|
|
|
memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE);
|
2010-11-16 09:26:22 +08:00
|
|
|
status += args.pgbase;
|
|
|
|
}
|
2007-09-29 05:11:45 +08:00
|
|
|
|
|
|
|
nfs_invalidate_atime(dir);
|
|
|
|
|
2008-05-03 04:42:44 +08:00
|
|
|
dprintk("%s: returns %d\n", __func__, status);
|
2005-04-17 06:20:36 +08:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
|
2010-10-21 03:44:37 +08:00
|
|
|
u64 cookie, struct page **pages, unsigned int count, int plus)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int err;
|
|
|
|
do {
|
|
|
|
err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode),
|
|
|
|
_nfs4_proc_readdir(dentry, cred, cookie,
|
2010-10-21 03:44:37 +08:00
|
|
|
pages, count, plus),
|
2005-04-17 06:20:36 +08:00
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
|
|
|
|
struct iattr *sattr, dev_t rdev)
|
|
|
|
{
|
2008-06-21 03:35:32 +08:00
|
|
|
struct nfs4_createdata *data;
|
|
|
|
int mode = sattr->ia_mode;
|
|
|
|
int status = -ENOMEM;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
BUG_ON(!(sattr->ia_valid & ATTR_MODE));
|
|
|
|
BUG_ON(!S_ISFIFO(mode) && !S_ISBLK(mode) && !S_ISCHR(mode) && !S_ISSOCK(mode));
|
2008-06-21 03:35:32 +08:00
|
|
|
|
|
|
|
data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
|
|
|
|
if (data == NULL)
|
|
|
|
goto out;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (S_ISFIFO(mode))
|
2008-06-21 03:35:32 +08:00
|
|
|
data->arg.ftype = NF4FIFO;
|
2005-04-17 06:20:36 +08:00
|
|
|
else if (S_ISBLK(mode)) {
|
2008-06-21 03:35:32 +08:00
|
|
|
data->arg.ftype = NF4BLK;
|
|
|
|
data->arg.u.device.specdata1 = MAJOR(rdev);
|
|
|
|
data->arg.u.device.specdata2 = MINOR(rdev);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
else if (S_ISCHR(mode)) {
|
2008-06-21 03:35:32 +08:00
|
|
|
data->arg.ftype = NF4CHR;
|
|
|
|
data->arg.u.device.specdata1 = MAJOR(rdev);
|
|
|
|
data->arg.u.device.specdata2 = MINOR(rdev);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2008-06-21 03:35:32 +08:00
|
|
|
status = nfs4_do_create(dir, dentry, data);
|
|
|
|
|
|
|
|
nfs4_free_createdata(data);
|
|
|
|
out:
|
2005-04-17 06:20:36 +08:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
|
|
|
|
struct iattr *sattr, dev_t rdev)
|
|
|
|
{
|
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int err;
|
2010-12-09 19:35:14 +08:00
|
|
|
|
|
|
|
sattr->ia_mode &= ~current_umask();
|
2005-04-17 06:20:36 +08:00
|
|
|
do {
|
|
|
|
err = nfs4_handle_exception(NFS_SERVER(dir),
|
|
|
|
_nfs4_proc_mknod(dir, dentry, sattr, rdev),
|
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
|
|
|
|
struct nfs_fsstat *fsstat)
|
|
|
|
{
|
|
|
|
struct nfs4_statfs_arg args = {
|
|
|
|
.fh = fhandle,
|
|
|
|
.bitmask = server->attr_bitmask,
|
|
|
|
};
|
2009-04-01 21:21:56 +08:00
|
|
|
struct nfs4_statfs_res res = {
|
|
|
|
.fsstat = fsstat,
|
|
|
|
};
|
2005-04-17 06:20:36 +08:00
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
|
|
|
|
.rpc_argp = &args,
|
2009-04-01 21:21:56 +08:00
|
|
|
.rpc_resp = &res,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2005-10-28 10:12:38 +08:00
|
|
|
nfs_fattr_init(fsstat->fattr);
|
2011-03-25 01:12:24 +08:00
|
|
|
return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
|
|
|
|
{
|
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int err;
|
|
|
|
do {
|
|
|
|
err = nfs4_handle_exception(server,
|
|
|
|
_nfs4_proc_statfs(server, fhandle, fsstat),
|
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
|
|
|
|
struct nfs_fsinfo *fsinfo)
|
|
|
|
{
|
|
|
|
struct nfs4_fsinfo_arg args = {
|
|
|
|
.fh = fhandle,
|
|
|
|
.bitmask = server->attr_bitmask,
|
|
|
|
};
|
2009-04-01 21:21:57 +08:00
|
|
|
struct nfs4_fsinfo_res res = {
|
|
|
|
.fsinfo = fsinfo,
|
|
|
|
};
|
2005-04-17 06:20:36 +08:00
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
|
|
|
|
.rpc_argp = &args,
|
2009-04-01 21:21:57 +08:00
|
|
|
.rpc_resp = &res,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2011-03-25 01:12:24 +08:00
|
|
|
return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
|
|
|
|
{
|
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int err;
|
|
|
|
|
|
|
|
do {
|
|
|
|
err = nfs4_handle_exception(server,
|
|
|
|
_nfs4_do_fsinfo(server, fhandle, fsinfo),
|
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
|
|
|
|
{
|
2005-10-28 10:12:38 +08:00
|
|
|
nfs_fattr_init(fsinfo->fattr);
|
2005-04-17 06:20:36 +08:00
|
|
|
return nfs4_do_fsinfo(server, fhandle, fsinfo);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
|
|
|
|
struct nfs_pathconf *pathconf)
|
|
|
|
{
|
|
|
|
struct nfs4_pathconf_arg args = {
|
|
|
|
.fh = fhandle,
|
|
|
|
.bitmask = server->attr_bitmask,
|
|
|
|
};
|
2009-04-01 21:21:58 +08:00
|
|
|
struct nfs4_pathconf_res res = {
|
|
|
|
.pathconf = pathconf,
|
|
|
|
};
|
2005-04-17 06:20:36 +08:00
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
|
|
|
|
.rpc_argp = &args,
|
2009-04-01 21:21:58 +08:00
|
|
|
.rpc_resp = &res,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/* None of the pathconf attributes are mandatory to implement */
|
|
|
|
if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
|
|
|
|
memset(pathconf, 0, sizeof(*pathconf));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-10-28 10:12:38 +08:00
|
|
|
nfs_fattr_init(pathconf->fattr);
|
2011-03-25 01:12:24 +08:00
|
|
|
return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
|
|
|
|
struct nfs_pathconf *pathconf)
|
|
|
|
{
|
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int err;
|
|
|
|
|
|
|
|
do {
|
|
|
|
err = nfs4_handle_exception(server,
|
|
|
|
_nfs4_proc_pathconf(server, fhandle, pathconf),
|
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2011-03-01 09:34:20 +08:00
|
|
|
static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-03-21 02:44:27 +08:00
|
|
|
struct nfs_server *server = NFS_SERVER(data->inode);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-12-24 04:21:46 +08:00
|
|
|
if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
|
2009-12-07 22:00:24 +08:00
|
|
|
nfs_restart_rpc(task, server->nfs_client);
|
2006-03-21 02:44:27 +08:00
|
|
|
return -EAGAIN;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2007-09-29 05:20:07 +08:00
|
|
|
|
|
|
|
nfs_invalidate_atime(data->inode);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (task->tk_status > 0)
|
2006-03-21 02:44:27 +08:00
|
|
|
renew_lease(server, data->timestamp);
|
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2011-03-01 09:34:20 +08:00
|
|
|
static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
|
|
|
|
{
|
|
|
|
|
|
|
|
dprintk("--> %s\n", __func__);
|
|
|
|
|
|
|
|
if (!nfs4_sequence_done(task, &data->res.seq_res))
|
|
|
|
return -EAGAIN;
|
|
|
|
|
|
|
|
return data->read_done_cb(task, data);
|
|
|
|
}
|
|
|
|
|
2007-07-15 03:40:00 +08:00
|
|
|
static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
data->timestamp = jiffies;
|
2011-03-01 09:34:20 +08:00
|
|
|
data->read_done_cb = nfs4_read_done_cb;
|
2007-07-15 03:40:00 +08:00
|
|
|
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2011-03-01 09:34:20 +08:00
|
|
|
/* Reset the the nfs_read_data to send the read to the MDS. */
|
|
|
|
void nfs4_reset_read(struct rpc_task *task, struct nfs_read_data *data)
|
|
|
|
{
|
|
|
|
dprintk("%s Reset task for i/o through\n", __func__);
|
|
|
|
put_lseg(data->lseg);
|
|
|
|
data->lseg = NULL;
|
|
|
|
/* offsets will differ in the dense stripe case */
|
|
|
|
data->args.offset = data->mds_offset;
|
|
|
|
data->ds_clp = NULL;
|
|
|
|
data->args.fh = NFS_FH(data->inode);
|
|
|
|
data->read_done_cb = nfs4_read_done_cb;
|
|
|
|
task->tk_ops = data->mds_ops;
|
|
|
|
rpc_task_reset_client(task, NFS_CLIENT(data->inode));
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nfs4_reset_read);
|
|
|
|
|
2011-03-03 23:13:42 +08:00
|
|
|
static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct inode *inode = data->inode;
|
|
|
|
|
2008-12-24 04:21:46 +08:00
|
|
|
if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
|
2009-12-07 22:00:24 +08:00
|
|
|
nfs_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
|
2006-03-21 02:44:27 +08:00
|
|
|
return -EAGAIN;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2005-10-28 10:12:44 +08:00
|
|
|
if (task->tk_status >= 0) {
|
2005-04-17 06:20:36 +08:00
|
|
|
renew_lease(NFS_SERVER(inode), data->timestamp);
|
2007-10-01 03:21:24 +08:00
|
|
|
nfs_post_op_update_inode_force_wcc(inode, data->res.fattr);
|
2005-10-28 10:12:44 +08:00
|
|
|
}
|
2006-03-21 02:44:27 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2011-03-03 23:13:42 +08:00
|
|
|
static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
|
|
|
|
{
|
|
|
|
if (!nfs4_sequence_done(task, &data->res.seq_res))
|
|
|
|
return -EAGAIN;
|
|
|
|
return data->write_done_cb(task, data);
|
|
|
|
}
|
|
|
|
|
2011-03-03 23:13:47 +08:00
|
|
|
/* Reset the the nfs_write_data to send the write to the MDS. */
|
|
|
|
void nfs4_reset_write(struct rpc_task *task, struct nfs_write_data *data)
|
|
|
|
{
|
|
|
|
dprintk("%s Reset task for i/o through\n", __func__);
|
|
|
|
put_lseg(data->lseg);
|
|
|
|
data->lseg = NULL;
|
|
|
|
data->ds_clp = NULL;
|
|
|
|
data->write_done_cb = nfs4_write_done_cb;
|
|
|
|
data->args.fh = NFS_FH(data->inode);
|
|
|
|
data->args.bitmask = data->res.server->cache_consistency_bitmask;
|
|
|
|
data->args.offset = data->mds_offset;
|
|
|
|
data->res.fattr = &data->fattr;
|
|
|
|
task->tk_ops = data->mds_ops;
|
|
|
|
rpc_task_reset_client(task, NFS_CLIENT(data->inode));
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nfs4_reset_write);
|
|
|
|
|
2007-07-15 03:40:00 +08:00
|
|
|
static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2007-07-15 03:40:00 +08:00
|
|
|
struct nfs_server *server = NFS_SERVER(data->inode);
|
|
|
|
|
2011-03-03 23:13:46 +08:00
|
|
|
if (data->lseg) {
|
|
|
|
data->args.bitmask = NULL;
|
|
|
|
data->res.fattr = NULL;
|
|
|
|
} else
|
|
|
|
data->args.bitmask = server->cache_consistency_bitmask;
|
2011-03-03 23:13:42 +08:00
|
|
|
if (!data->write_done_cb)
|
|
|
|
data->write_done_cb = nfs4_write_done_cb;
|
2005-10-28 10:12:44 +08:00
|
|
|
data->res.server = server;
|
2005-04-17 06:20:36 +08:00
|
|
|
data->timestamp = jiffies;
|
|
|
|
|
2007-07-15 03:40:00 +08:00
|
|
|
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2011-03-23 21:27:46 +08:00
|
|
|
static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_write_data *data)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct inode *inode = data->inode;
|
2010-08-01 02:29:06 +08:00
|
|
|
|
2008-12-24 04:21:46 +08:00
|
|
|
if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
|
2009-12-07 22:00:24 +08:00
|
|
|
nfs_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
|
2006-03-21 02:44:27 +08:00
|
|
|
return -EAGAIN;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2007-10-09 02:10:31 +08:00
|
|
|
nfs_refresh_inode(inode, data->res.fattr);
|
2006-03-21 02:44:27 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2011-03-23 21:27:46 +08:00
|
|
|
static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data)
|
|
|
|
{
|
|
|
|
if (!nfs4_sequence_done(task, &data->res.seq_res))
|
|
|
|
return -EAGAIN;
|
|
|
|
return data->write_done_cb(task, data);
|
|
|
|
}
|
|
|
|
|
2007-07-15 03:40:00 +08:00
|
|
|
static void nfs4_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-03-21 02:44:27 +08:00
|
|
|
struct nfs_server *server = NFS_SERVER(data->inode);
|
2011-03-23 21:27:52 +08:00
|
|
|
|
|
|
|
if (data->lseg) {
|
|
|
|
data->args.bitmask = NULL;
|
|
|
|
data->res.fattr = NULL;
|
|
|
|
} else
|
|
|
|
data->args.bitmask = server->cache_consistency_bitmask;
|
2011-03-23 21:27:46 +08:00
|
|
|
if (!data->write_done_cb)
|
|
|
|
data->write_done_cb = nfs4_commit_done_cb;
|
2005-10-28 10:12:44 +08:00
|
|
|
data->res.server = server;
|
2007-07-15 03:40:00 +08:00
|
|
|
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2010-05-08 01:34:17 +08:00
|
|
|
struct nfs4_renewdata {
|
|
|
|
struct nfs_client *client;
|
|
|
|
unsigned long timestamp;
|
|
|
|
};
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
|
|
|
|
* standalone procedure for queueing an asynchronous RENEW.
|
|
|
|
*/
|
2010-05-08 01:34:17 +08:00
|
|
|
static void nfs4_renew_release(void *calldata)
|
2010-02-05 19:45:04 +08:00
|
|
|
{
|
2010-05-08 01:34:17 +08:00
|
|
|
struct nfs4_renewdata *data = calldata;
|
|
|
|
struct nfs_client *clp = data->client;
|
2010-02-05 19:45:04 +08:00
|
|
|
|
2010-02-05 19:45:06 +08:00
|
|
|
if (atomic_read(&clp->cl_count) > 1)
|
|
|
|
nfs4_schedule_state_renewal(clp);
|
|
|
|
nfs_put_client(clp);
|
2010-05-08 01:34:17 +08:00
|
|
|
kfree(data);
|
2010-02-05 19:45:04 +08:00
|
|
|
}
|
|
|
|
|
2010-05-08 01:34:17 +08:00
|
|
|
static void nfs4_renew_done(struct rpc_task *task, void *calldata)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2010-05-08 01:34:17 +08:00
|
|
|
struct nfs4_renewdata *data = calldata;
|
|
|
|
struct nfs_client *clp = data->client;
|
|
|
|
unsigned long timestamp = data->timestamp;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (task->tk_status < 0) {
|
2009-05-27 02:51:00 +08:00
|
|
|
/* Unless we're shutting down, schedule state recovery! */
|
|
|
|
if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) != 0)
|
2011-03-10 05:00:53 +08:00
|
|
|
nfs4_schedule_lease_recovery(clp);
|
2005-04-17 06:20:36 +08:00
|
|
|
return;
|
|
|
|
}
|
2010-08-01 02:29:06 +08:00
|
|
|
do_renew_lease(clp, timestamp);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2006-01-03 16:55:04 +08:00
|
|
|
static const struct rpc_call_ops nfs4_renew_ops = {
|
|
|
|
.rpc_call_done = nfs4_renew_done,
|
2010-02-05 19:45:04 +08:00
|
|
|
.rpc_release = nfs4_renew_release,
|
2006-01-03 16:55:04 +08:00
|
|
|
};
|
|
|
|
|
2006-08-23 08:06:08 +08:00
|
|
|
int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
|
|
|
|
.rpc_argp = clp,
|
2006-01-03 16:55:25 +08:00
|
|
|
.rpc_cred = cred,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
2010-05-08 01:34:17 +08:00
|
|
|
struct nfs4_renewdata *data;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-02-05 19:45:06 +08:00
|
|
|
if (!atomic_inc_not_zero(&clp->cl_count))
|
|
|
|
return -EIO;
|
2010-05-08 01:34:17 +08:00
|
|
|
data = kmalloc(sizeof(*data), GFP_KERNEL);
|
|
|
|
if (data == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
data->client = clp;
|
|
|
|
data->timestamp = jiffies;
|
2005-04-17 06:20:36 +08:00
|
|
|
return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT,
|
2010-05-08 01:34:17 +08:00
|
|
|
&nfs4_renew_ops, data);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2006-08-23 08:06:08 +08:00
|
|
|
int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
|
|
|
|
.rpc_argp = clp,
|
2006-01-03 16:55:25 +08:00
|
|
|
.rpc_cred = cred,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
unsigned long now = jiffies;
|
|
|
|
int status;
|
|
|
|
|
|
|
|
status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
|
|
|
|
if (status < 0)
|
|
|
|
return status;
|
2010-08-01 02:29:06 +08:00
|
|
|
do_renew_lease(clp, now);
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-06-23 01:16:22 +08:00
|
|
|
static inline int nfs4_server_supports_acls(struct nfs_server *server)
|
|
|
|
{
|
|
|
|
return (server->caps & NFS_CAP_ACLS)
|
|
|
|
&& (server->acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
|
|
|
|
&& (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_CACHE_SIZE, and that
|
|
|
|
* it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_CACHE_SIZE) bytes on
|
|
|
|
* the stack.
|
|
|
|
*/
|
|
|
|
#define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT)
|
|
|
|
|
|
|
|
static void buf_to_pages(const void *buf, size_t buflen,
|
|
|
|
struct page **pages, unsigned int *pgbase)
|
|
|
|
{
|
|
|
|
const void *p = buf;
|
|
|
|
|
|
|
|
*pgbase = offset_in_page(buf);
|
|
|
|
p -= *pgbase;
|
|
|
|
while (p < buf + buflen) {
|
|
|
|
*(pages++) = virt_to_page(p);
|
|
|
|
p += PAGE_CACHE_SIZE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
nfs4: Ensure that ACL pages sent over NFS were not allocated from the slab (v3)
The "bad_page()" page allocator sanity check was reported recently (call
chain as follows):
bad_page+0x69/0x91
free_hot_cold_page+0x81/0x144
skb_release_data+0x5f/0x98
__kfree_skb+0x11/0x1a
tcp_ack+0x6a3/0x1868
tcp_rcv_established+0x7a6/0x8b9
tcp_v4_do_rcv+0x2a/0x2fa
tcp_v4_rcv+0x9a2/0x9f6
do_timer+0x2df/0x52c
ip_local_deliver+0x19d/0x263
ip_rcv+0x539/0x57c
netif_receive_skb+0x470/0x49f
:virtio_net:virtnet_poll+0x46b/0x5c5
net_rx_action+0xac/0x1b3
__do_softirq+0x89/0x133
call_softirq+0x1c/0x28
do_softirq+0x2c/0x7d
do_IRQ+0xec/0xf5
default_idle+0x0/0x50
ret_from_intr+0x0/0xa
default_idle+0x29/0x50
cpu_idle+0x95/0xb8
start_kernel+0x220/0x225
_sinittext+0x22f/0x236
It occurs because an skb with a fraglist was freed from the tcp
retransmit queue when it was acked, but a page on that fraglist had
PG_Slab set (indicating it was allocated from the Slab allocator (which
means the free path above can't safely free it via put_page.
We tracked this back to an nfsv4 setacl operation, in which the nfs code
attempted to fill convert the passed in buffer to an array of pages in
__nfs4_proc_set_acl, which gets used by the skb->frags list in
xs_sendpages. __nfs4_proc_set_acl just converts each page in the buffer
to a page struct via virt_to_page, but the vfs allocates the buffer via
kmalloc, meaning the PG_slab bit is set. We can't create a buffer with
kmalloc and free it later in the tcp ack path with put_page, so we need
to either:
1) ensure that when we create the list of pages, no page struct has
PG_Slab set
or
2) not use a page list to send this data
Given that these buffers can be multiple pages and arbitrarily sized, I
think (1) is the right way to go. I've written the below patch to
allocate a page from the buddy allocator directly and copy the data over
to it. This ensures that we have a put_page free-able page for every
entry that winds up on an skb frag list, so it can be safely freed when
the frame is acked. We do a put page on each entry after the
rpc_call_sync call so as to drop our own reference count to the page,
leaving only the ref count taken by tcp_sendpages. This way the data
will be properly freed when the ack comes in
Successfully tested by myself to solve the above oops.
Note, as this is the result of a setacl operation that exceeded a page
of data, I think this amounts to a local DOS triggerable by an
uprivlidged user, so I'm CCing security on this as well.
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: Trond Myklebust <Trond.Myklebust@netapp.com>
CC: security@kernel.org
CC: Jeff Layton <jlayton@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-03-05 08:26:03 +08:00
|
|
|
static int buf_to_pages_noslab(const void *buf, size_t buflen,
|
|
|
|
struct page **pages, unsigned int *pgbase)
|
|
|
|
{
|
|
|
|
struct page *newpage, **spages;
|
|
|
|
int rc = 0;
|
|
|
|
size_t len;
|
|
|
|
spages = pages;
|
|
|
|
|
|
|
|
do {
|
2011-03-03 07:19:37 +08:00
|
|
|
len = min_t(size_t, PAGE_CACHE_SIZE, buflen);
|
nfs4: Ensure that ACL pages sent over NFS were not allocated from the slab (v3)
The "bad_page()" page allocator sanity check was reported recently (call
chain as follows):
bad_page+0x69/0x91
free_hot_cold_page+0x81/0x144
skb_release_data+0x5f/0x98
__kfree_skb+0x11/0x1a
tcp_ack+0x6a3/0x1868
tcp_rcv_established+0x7a6/0x8b9
tcp_v4_do_rcv+0x2a/0x2fa
tcp_v4_rcv+0x9a2/0x9f6
do_timer+0x2df/0x52c
ip_local_deliver+0x19d/0x263
ip_rcv+0x539/0x57c
netif_receive_skb+0x470/0x49f
:virtio_net:virtnet_poll+0x46b/0x5c5
net_rx_action+0xac/0x1b3
__do_softirq+0x89/0x133
call_softirq+0x1c/0x28
do_softirq+0x2c/0x7d
do_IRQ+0xec/0xf5
default_idle+0x0/0x50
ret_from_intr+0x0/0xa
default_idle+0x29/0x50
cpu_idle+0x95/0xb8
start_kernel+0x220/0x225
_sinittext+0x22f/0x236
It occurs because an skb with a fraglist was freed from the tcp
retransmit queue when it was acked, but a page on that fraglist had
PG_Slab set (indicating it was allocated from the Slab allocator (which
means the free path above can't safely free it via put_page.
We tracked this back to an nfsv4 setacl operation, in which the nfs code
attempted to fill convert the passed in buffer to an array of pages in
__nfs4_proc_set_acl, which gets used by the skb->frags list in
xs_sendpages. __nfs4_proc_set_acl just converts each page in the buffer
to a page struct via virt_to_page, but the vfs allocates the buffer via
kmalloc, meaning the PG_slab bit is set. We can't create a buffer with
kmalloc and free it later in the tcp ack path with put_page, so we need
to either:
1) ensure that when we create the list of pages, no page struct has
PG_Slab set
or
2) not use a page list to send this data
Given that these buffers can be multiple pages and arbitrarily sized, I
think (1) is the right way to go. I've written the below patch to
allocate a page from the buddy allocator directly and copy the data over
to it. This ensures that we have a put_page free-able page for every
entry that winds up on an skb frag list, so it can be safely freed when
the frame is acked. We do a put page on each entry after the
rpc_call_sync call so as to drop our own reference count to the page,
leaving only the ref count taken by tcp_sendpages. This way the data
will be properly freed when the ack comes in
Successfully tested by myself to solve the above oops.
Note, as this is the result of a setacl operation that exceeded a page
of data, I think this amounts to a local DOS triggerable by an
uprivlidged user, so I'm CCing security on this as well.
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: Trond Myklebust <Trond.Myklebust@netapp.com>
CC: security@kernel.org
CC: Jeff Layton <jlayton@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-03-05 08:26:03 +08:00
|
|
|
newpage = alloc_page(GFP_KERNEL);
|
|
|
|
|
|
|
|
if (newpage == NULL)
|
|
|
|
goto unwind;
|
|
|
|
memcpy(page_address(newpage), buf, len);
|
|
|
|
buf += len;
|
|
|
|
buflen -= len;
|
|
|
|
*pages++ = newpage;
|
|
|
|
rc++;
|
|
|
|
} while (buflen != 0);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
unwind:
|
|
|
|
for(; rc > 0; rc--)
|
|
|
|
__free_page(spages[rc-1]);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2005-06-23 01:16:23 +08:00
|
|
|
struct nfs4_cached_acl {
|
|
|
|
int cached;
|
|
|
|
size_t len;
|
2005-06-23 01:16:28 +08:00
|
|
|
char data[0];
|
2005-06-23 01:16:23 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
|
|
|
|
{
|
|
|
|
struct nfs_inode *nfsi = NFS_I(inode);
|
|
|
|
|
|
|
|
spin_lock(&inode->i_lock);
|
|
|
|
kfree(nfsi->nfs4_acl);
|
|
|
|
nfsi->nfs4_acl = acl;
|
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs4_zap_acl_attr(struct inode *inode)
|
|
|
|
{
|
|
|
|
nfs4_set_cached_acl(inode, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
|
|
|
|
{
|
|
|
|
struct nfs_inode *nfsi = NFS_I(inode);
|
|
|
|
struct nfs4_cached_acl *acl;
|
|
|
|
int ret = -ENOENT;
|
|
|
|
|
|
|
|
spin_lock(&inode->i_lock);
|
|
|
|
acl = nfsi->nfs4_acl;
|
|
|
|
if (acl == NULL)
|
|
|
|
goto out;
|
|
|
|
if (buf == NULL) /* user is just asking for length */
|
|
|
|
goto out_len;
|
|
|
|
if (acl->cached == 0)
|
|
|
|
goto out;
|
|
|
|
ret = -ERANGE; /* see getxattr(2) man page */
|
|
|
|
if (acl->len > buflen)
|
|
|
|
goto out;
|
|
|
|
memcpy(buf, acl->data, acl->len);
|
|
|
|
out_len:
|
|
|
|
ret = acl->len;
|
|
|
|
out:
|
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs4_write_cached_acl(struct inode *inode, const char *buf, size_t acl_len)
|
|
|
|
{
|
|
|
|
struct nfs4_cached_acl *acl;
|
|
|
|
|
|
|
|
if (buf && acl_len <= PAGE_SIZE) {
|
|
|
|
acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL);
|
|
|
|
if (acl == NULL)
|
|
|
|
goto out;
|
|
|
|
acl->cached = 1;
|
|
|
|
memcpy(acl->data, buf, acl_len);
|
|
|
|
} else {
|
|
|
|
acl = kmalloc(sizeof(*acl), GFP_KERNEL);
|
|
|
|
if (acl == NULL)
|
|
|
|
goto out;
|
|
|
|
acl->cached = 0;
|
|
|
|
}
|
|
|
|
acl->len = acl_len;
|
|
|
|
out:
|
|
|
|
nfs4_set_cached_acl(inode, acl);
|
|
|
|
}
|
|
|
|
|
2006-08-25 00:27:15 +08:00
|
|
|
static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
|
2005-06-23 01:16:22 +08:00
|
|
|
{
|
|
|
|
struct page *pages[NFS4ACL_MAXPAGES];
|
|
|
|
struct nfs_getaclargs args = {
|
|
|
|
.fh = NFS_FH(inode),
|
|
|
|
.acl_pages = pages,
|
|
|
|
.acl_len = buflen,
|
|
|
|
};
|
2009-04-01 21:21:59 +08:00
|
|
|
struct nfs_getaclres res = {
|
|
|
|
.acl_len = buflen,
|
|
|
|
};
|
2005-06-23 01:16:23 +08:00
|
|
|
void *resp_buf;
|
2005-06-23 01:16:22 +08:00
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
|
|
|
|
.rpc_argp = &args,
|
2009-04-01 21:21:59 +08:00
|
|
|
.rpc_resp = &res,
|
2005-06-23 01:16:22 +08:00
|
|
|
};
|
2005-06-23 01:16:23 +08:00
|
|
|
struct page *localpage = NULL;
|
2005-06-23 01:16:22 +08:00
|
|
|
int ret;
|
|
|
|
|
2005-06-23 01:16:23 +08:00
|
|
|
if (buflen < PAGE_SIZE) {
|
|
|
|
/* As long as we're doing a round trip to the server anyway,
|
|
|
|
* let's be prepared for a page of acl data. */
|
|
|
|
localpage = alloc_page(GFP_KERNEL);
|
|
|
|
resp_buf = page_address(localpage);
|
|
|
|
if (localpage == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
args.acl_pages[0] = localpage;
|
|
|
|
args.acl_pgbase = 0;
|
2009-04-01 21:21:59 +08:00
|
|
|
args.acl_len = PAGE_SIZE;
|
2005-06-23 01:16:23 +08:00
|
|
|
} else {
|
|
|
|
resp_buf = buf;
|
|
|
|
buf_to_pages(buf, buflen, args.acl_pages, &args.acl_pgbase);
|
|
|
|
}
|
2011-03-25 01:12:24 +08:00
|
|
|
ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
|
2005-06-23 01:16:23 +08:00
|
|
|
if (ret)
|
|
|
|
goto out_free;
|
2009-04-01 21:21:59 +08:00
|
|
|
if (res.acl_len > args.acl_len)
|
|
|
|
nfs4_write_cached_acl(inode, NULL, res.acl_len);
|
2005-06-23 01:16:23 +08:00
|
|
|
else
|
2009-04-01 21:21:59 +08:00
|
|
|
nfs4_write_cached_acl(inode, resp_buf, res.acl_len);
|
2005-06-23 01:16:23 +08:00
|
|
|
if (buf) {
|
|
|
|
ret = -ERANGE;
|
2009-04-01 21:21:59 +08:00
|
|
|
if (res.acl_len > buflen)
|
2005-06-23 01:16:23 +08:00
|
|
|
goto out_free;
|
|
|
|
if (localpage)
|
2009-04-01 21:21:59 +08:00
|
|
|
memcpy(buf, resp_buf, res.acl_len);
|
2005-06-23 01:16:23 +08:00
|
|
|
}
|
2009-04-01 21:21:59 +08:00
|
|
|
ret = res.acl_len;
|
2005-06-23 01:16:23 +08:00
|
|
|
out_free:
|
|
|
|
if (localpage)
|
|
|
|
__free_page(localpage);
|
2005-06-23 01:16:22 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2006-08-25 00:27:15 +08:00
|
|
|
static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
|
|
|
|
{
|
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
ssize_t ret;
|
|
|
|
do {
|
|
|
|
ret = __nfs4_get_acl_uncached(inode, buf, buflen);
|
|
|
|
if (ret >= 0)
|
|
|
|
break;
|
|
|
|
ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2005-06-23 01:16:23 +08:00
|
|
|
static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
|
|
|
|
{
|
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!nfs4_server_supports_acls(server))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
ret = nfs_revalidate_inode(server, inode);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2010-12-01 18:42:16 +08:00
|
|
|
if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
|
|
|
|
nfs_zap_acl_cache(inode);
|
2005-06-23 01:16:23 +08:00
|
|
|
ret = nfs4_read_cached_acl(inode, buf, buflen);
|
|
|
|
if (ret != -ENOENT)
|
|
|
|
return ret;
|
|
|
|
return nfs4_get_acl_uncached(inode, buf, buflen);
|
|
|
|
}
|
|
|
|
|
2006-08-25 00:27:15 +08:00
|
|
|
static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
|
2005-06-23 01:16:23 +08:00
|
|
|
{
|
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
|
|
|
struct page *pages[NFS4ACL_MAXPAGES];
|
|
|
|
struct nfs_setaclargs arg = {
|
|
|
|
.fh = NFS_FH(inode),
|
|
|
|
.acl_pages = pages,
|
|
|
|
.acl_len = buflen,
|
|
|
|
};
|
2009-04-01 21:22:01 +08:00
|
|
|
struct nfs_setaclres res;
|
2005-06-23 01:16:23 +08:00
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
|
|
|
|
.rpc_argp = &arg,
|
2009-04-01 21:22:01 +08:00
|
|
|
.rpc_resp = &res,
|
2005-06-23 01:16:23 +08:00
|
|
|
};
|
nfs4: Ensure that ACL pages sent over NFS were not allocated from the slab (v3)
The "bad_page()" page allocator sanity check was reported recently (call
chain as follows):
bad_page+0x69/0x91
free_hot_cold_page+0x81/0x144
skb_release_data+0x5f/0x98
__kfree_skb+0x11/0x1a
tcp_ack+0x6a3/0x1868
tcp_rcv_established+0x7a6/0x8b9
tcp_v4_do_rcv+0x2a/0x2fa
tcp_v4_rcv+0x9a2/0x9f6
do_timer+0x2df/0x52c
ip_local_deliver+0x19d/0x263
ip_rcv+0x539/0x57c
netif_receive_skb+0x470/0x49f
:virtio_net:virtnet_poll+0x46b/0x5c5
net_rx_action+0xac/0x1b3
__do_softirq+0x89/0x133
call_softirq+0x1c/0x28
do_softirq+0x2c/0x7d
do_IRQ+0xec/0xf5
default_idle+0x0/0x50
ret_from_intr+0x0/0xa
default_idle+0x29/0x50
cpu_idle+0x95/0xb8
start_kernel+0x220/0x225
_sinittext+0x22f/0x236
It occurs because an skb with a fraglist was freed from the tcp
retransmit queue when it was acked, but a page on that fraglist had
PG_Slab set (indicating it was allocated from the Slab allocator (which
means the free path above can't safely free it via put_page.
We tracked this back to an nfsv4 setacl operation, in which the nfs code
attempted to fill convert the passed in buffer to an array of pages in
__nfs4_proc_set_acl, which gets used by the skb->frags list in
xs_sendpages. __nfs4_proc_set_acl just converts each page in the buffer
to a page struct via virt_to_page, but the vfs allocates the buffer via
kmalloc, meaning the PG_slab bit is set. We can't create a buffer with
kmalloc and free it later in the tcp ack path with put_page, so we need
to either:
1) ensure that when we create the list of pages, no page struct has
PG_Slab set
or
2) not use a page list to send this data
Given that these buffers can be multiple pages and arbitrarily sized, I
think (1) is the right way to go. I've written the below patch to
allocate a page from the buddy allocator directly and copy the data over
to it. This ensures that we have a put_page free-able page for every
entry that winds up on an skb frag list, so it can be safely freed when
the frame is acked. We do a put page on each entry after the
rpc_call_sync call so as to drop our own reference count to the page,
leaving only the ref count taken by tcp_sendpages. This way the data
will be properly freed when the ack comes in
Successfully tested by myself to solve the above oops.
Note, as this is the result of a setacl operation that exceeded a page
of data, I think this amounts to a local DOS triggerable by an
uprivlidged user, so I'm CCing security on this as well.
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: Trond Myklebust <Trond.Myklebust@netapp.com>
CC: security@kernel.org
CC: Jeff Layton <jlayton@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-03-05 08:26:03 +08:00
|
|
|
int ret, i;
|
2005-06-23 01:16:23 +08:00
|
|
|
|
|
|
|
if (!nfs4_server_supports_acls(server))
|
|
|
|
return -EOPNOTSUPP;
|
nfs4: Ensure that ACL pages sent over NFS were not allocated from the slab (v3)
The "bad_page()" page allocator sanity check was reported recently (call
chain as follows):
bad_page+0x69/0x91
free_hot_cold_page+0x81/0x144
skb_release_data+0x5f/0x98
__kfree_skb+0x11/0x1a
tcp_ack+0x6a3/0x1868
tcp_rcv_established+0x7a6/0x8b9
tcp_v4_do_rcv+0x2a/0x2fa
tcp_v4_rcv+0x9a2/0x9f6
do_timer+0x2df/0x52c
ip_local_deliver+0x19d/0x263
ip_rcv+0x539/0x57c
netif_receive_skb+0x470/0x49f
:virtio_net:virtnet_poll+0x46b/0x5c5
net_rx_action+0xac/0x1b3
__do_softirq+0x89/0x133
call_softirq+0x1c/0x28
do_softirq+0x2c/0x7d
do_IRQ+0xec/0xf5
default_idle+0x0/0x50
ret_from_intr+0x0/0xa
default_idle+0x29/0x50
cpu_idle+0x95/0xb8
start_kernel+0x220/0x225
_sinittext+0x22f/0x236
It occurs because an skb with a fraglist was freed from the tcp
retransmit queue when it was acked, but a page on that fraglist had
PG_Slab set (indicating it was allocated from the Slab allocator (which
means the free path above can't safely free it via put_page.
We tracked this back to an nfsv4 setacl operation, in which the nfs code
attempted to fill convert the passed in buffer to an array of pages in
__nfs4_proc_set_acl, which gets used by the skb->frags list in
xs_sendpages. __nfs4_proc_set_acl just converts each page in the buffer
to a page struct via virt_to_page, but the vfs allocates the buffer via
kmalloc, meaning the PG_slab bit is set. We can't create a buffer with
kmalloc and free it later in the tcp ack path with put_page, so we need
to either:
1) ensure that when we create the list of pages, no page struct has
PG_Slab set
or
2) not use a page list to send this data
Given that these buffers can be multiple pages and arbitrarily sized, I
think (1) is the right way to go. I've written the below patch to
allocate a page from the buddy allocator directly and copy the data over
to it. This ensures that we have a put_page free-able page for every
entry that winds up on an skb frag list, so it can be safely freed when
the frame is acked. We do a put page on each entry after the
rpc_call_sync call so as to drop our own reference count to the page,
leaving only the ref count taken by tcp_sendpages. This way the data
will be properly freed when the ack comes in
Successfully tested by myself to solve the above oops.
Note, as this is the result of a setacl operation that exceeded a page
of data, I think this amounts to a local DOS triggerable by an
uprivlidged user, so I'm CCing security on this as well.
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: Trond Myklebust <Trond.Myklebust@netapp.com>
CC: security@kernel.org
CC: Jeff Layton <jlayton@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-03-05 08:26:03 +08:00
|
|
|
i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
|
|
|
|
if (i < 0)
|
|
|
|
return i;
|
2005-10-19 05:20:19 +08:00
|
|
|
nfs_inode_return_delegation(inode);
|
2011-03-25 01:12:24 +08:00
|
|
|
ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
|
nfs4: Ensure that ACL pages sent over NFS were not allocated from the slab (v3)
The "bad_page()" page allocator sanity check was reported recently (call
chain as follows):
bad_page+0x69/0x91
free_hot_cold_page+0x81/0x144
skb_release_data+0x5f/0x98
__kfree_skb+0x11/0x1a
tcp_ack+0x6a3/0x1868
tcp_rcv_established+0x7a6/0x8b9
tcp_v4_do_rcv+0x2a/0x2fa
tcp_v4_rcv+0x9a2/0x9f6
do_timer+0x2df/0x52c
ip_local_deliver+0x19d/0x263
ip_rcv+0x539/0x57c
netif_receive_skb+0x470/0x49f
:virtio_net:virtnet_poll+0x46b/0x5c5
net_rx_action+0xac/0x1b3
__do_softirq+0x89/0x133
call_softirq+0x1c/0x28
do_softirq+0x2c/0x7d
do_IRQ+0xec/0xf5
default_idle+0x0/0x50
ret_from_intr+0x0/0xa
default_idle+0x29/0x50
cpu_idle+0x95/0xb8
start_kernel+0x220/0x225
_sinittext+0x22f/0x236
It occurs because an skb with a fraglist was freed from the tcp
retransmit queue when it was acked, but a page on that fraglist had
PG_Slab set (indicating it was allocated from the Slab allocator (which
means the free path above can't safely free it via put_page.
We tracked this back to an nfsv4 setacl operation, in which the nfs code
attempted to fill convert the passed in buffer to an array of pages in
__nfs4_proc_set_acl, which gets used by the skb->frags list in
xs_sendpages. __nfs4_proc_set_acl just converts each page in the buffer
to a page struct via virt_to_page, but the vfs allocates the buffer via
kmalloc, meaning the PG_slab bit is set. We can't create a buffer with
kmalloc and free it later in the tcp ack path with put_page, so we need
to either:
1) ensure that when we create the list of pages, no page struct has
PG_Slab set
or
2) not use a page list to send this data
Given that these buffers can be multiple pages and arbitrarily sized, I
think (1) is the right way to go. I've written the below patch to
allocate a page from the buddy allocator directly and copy the data over
to it. This ensures that we have a put_page free-able page for every
entry that winds up on an skb frag list, so it can be safely freed when
the frame is acked. We do a put page on each entry after the
rpc_call_sync call so as to drop our own reference count to the page,
leaving only the ref count taken by tcp_sendpages. This way the data
will be properly freed when the ack comes in
Successfully tested by myself to solve the above oops.
Note, as this is the result of a setacl operation that exceeded a page
of data, I think this amounts to a local DOS triggerable by an
uprivlidged user, so I'm CCing security on this as well.
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: Trond Myklebust <Trond.Myklebust@netapp.com>
CC: security@kernel.org
CC: Jeff Layton <jlayton@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-03-05 08:26:03 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Free each page after tx, so the only ref left is
|
|
|
|
* held by the network stack
|
|
|
|
*/
|
|
|
|
for (; i > 0; i--)
|
|
|
|
put_page(pages[i-1]);
|
|
|
|
|
2010-12-01 18:42:16 +08:00
|
|
|
/*
|
|
|
|
* Acl update can result in inode attribute update.
|
|
|
|
* so mark the attribute cache invalid.
|
|
|
|
*/
|
|
|
|
spin_lock(&inode->i_lock);
|
|
|
|
NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
|
|
|
|
spin_unlock(&inode->i_lock);
|
2008-06-12 05:39:04 +08:00
|
|
|
nfs_access_zap_cache(inode);
|
|
|
|
nfs_zap_acl_cache(inode);
|
2005-06-23 01:16:23 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2006-08-25 00:27:15 +08:00
|
|
|
static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
|
|
|
|
{
|
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int err;
|
|
|
|
do {
|
|
|
|
err = nfs4_handle_exception(NFS_SERVER(inode),
|
|
|
|
__nfs4_proc_set_acl(inode, buf, buflen),
|
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static int
|
2010-06-16 21:52:25 +08:00
|
|
|
nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2010-06-16 21:52:25 +08:00
|
|
|
struct nfs_client *clp = server->nfs_client;
|
|
|
|
|
|
|
|
if (task->tk_status >= 0)
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
switch(task->tk_status) {
|
2008-12-24 04:21:46 +08:00
|
|
|
case -NFS4ERR_ADMIN_REVOKED:
|
|
|
|
case -NFS4ERR_BAD_STATEID:
|
|
|
|
case -NFS4ERR_OPENMODE:
|
|
|
|
if (state == NULL)
|
|
|
|
break;
|
2011-03-10 05:00:53 +08:00
|
|
|
nfs4_schedule_stateid_recovery(server, state);
|
|
|
|
goto wait_on_recovery;
|
2005-04-17 06:20:36 +08:00
|
|
|
case -NFS4ERR_STALE_STATEID:
|
2010-01-27 04:42:47 +08:00
|
|
|
case -NFS4ERR_STALE_CLIENTID:
|
2005-04-17 06:20:36 +08:00
|
|
|
case -NFS4ERR_EXPIRED:
|
2011-03-10 05:00:53 +08:00
|
|
|
nfs4_schedule_lease_recovery(clp);
|
|
|
|
goto wait_on_recovery;
|
nfs41: kick start nfs41 session recovery when handling errors
Remove checking for any errors that the SEQUENCE operation does not return.
-NFS4ERR_STALE_CLIENTID, NFS4ERR_EXPIRED, NFS4ERR_CB_PATH_DOWN, NFS4ERR_BACK_CHAN_BUSY, NFS4ERR_OP_NOT_IN_SESSION.
SEQUENCE operation error recovery is very primative, we only reset the session.
Remove checking for any errors that are returned by the SEQUENCE operation, but
that resetting the session won't address.
NFS4ERR_RETRY_UNCACHED_REP, NFS4ERR_SEQUENCE_POS,NFS4ERR_TOO_MANY_OPS.
Add error checking for missing SEQUENCE errors that a session reset will
address.
NFS4ERR_BAD_HIGH_SLOT, NFS4ERR_DEADSESSION, NFS4ERR_SEQ_FALSE_RETRY.
A reset of the session is currently our only response to a SEQUENCE operation
error. Don't reset the session on errors where a new session won't help.
Don't reset the session on errors where a new session won't help.
[nfs41: nfs4_async_handle_error update error checking]
Signed-off-by: Andy Adamson <andros@netapp.com>
Signed-off-by: Benny Halevy <bhalevy@panasas.com>
[nfs41: trigger the state manager for session reset]
Replace session state bit with nfs_client state bit. Set the
NFS4CLNT_SESSION_SETUP bit upon a session related error in the sync/async
error handlers.
[nfs41: _nfs4_async_handle_error fix session reset error list]
Sequence operation errors that session reset could help.
NFS4ERR_BADSESSION
NFS4ERR_BADSLOT
NFS4ERR_BAD_HIGH_SLOT
NFS4ERR_DEADSESSION
NFS4ERR_CONN_NOT_BOUND_TO_SESSION
NFS4ERR_SEQ_FALSE_RETRY
NFS4ERR_SEQ_MISORDERED
Sequence operation errors that a session reset would not help
NFS4ERR_BADXDR
NFS4ERR_DELAY
NFS4ERR_REP_TOO_BIG
NFS4ERR_REP_TOO_BIG_TO_CACHE
NFS4ERR_REQ_TOO_BIG
NFS4ERR_RETRY_UNCACHED_REP
NFS4ERR_SEQUENCE_POS
NFS4ERR_TOO_MANY_OPS
Signed-off-by: Andy Adamson <andros@netapp.com>
[nfs41 nfs4_handle_exception fix session reset error list]
Signed-off-by: Andy Adamson <andros@netapp.com>
Signed-off-by: Benny Halevy <bhalevy@panasas.com>
[moved nfs41_sequece_call_done code to nfs41: sequence operation]
Signed-off-by: Andy Adamson <andros@netapp.com>
Signed-off-by: Benny Halevy <bhalevy@panasas.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2009-04-01 21:22:42 +08:00
|
|
|
#if defined(CONFIG_NFS_V4_1)
|
|
|
|
case -NFS4ERR_BADSESSION:
|
|
|
|
case -NFS4ERR_BADSLOT:
|
|
|
|
case -NFS4ERR_BAD_HIGH_SLOT:
|
|
|
|
case -NFS4ERR_DEADSESSION:
|
|
|
|
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
|
|
|
|
case -NFS4ERR_SEQ_FALSE_RETRY:
|
|
|
|
case -NFS4ERR_SEQ_MISORDERED:
|
|
|
|
dprintk("%s ERROR %d, Reset session\n", __func__,
|
|
|
|
task->tk_status);
|
2011-03-10 05:00:53 +08:00
|
|
|
nfs4_schedule_session_recovery(clp->cl_session);
|
nfs41: kick start nfs41 session recovery when handling errors
Remove checking for any errors that the SEQUENCE operation does not return.
-NFS4ERR_STALE_CLIENTID, NFS4ERR_EXPIRED, NFS4ERR_CB_PATH_DOWN, NFS4ERR_BACK_CHAN_BUSY, NFS4ERR_OP_NOT_IN_SESSION.
SEQUENCE operation error recovery is very primative, we only reset the session.
Remove checking for any errors that are returned by the SEQUENCE operation, but
that resetting the session won't address.
NFS4ERR_RETRY_UNCACHED_REP, NFS4ERR_SEQUENCE_POS,NFS4ERR_TOO_MANY_OPS.
Add error checking for missing SEQUENCE errors that a session reset will
address.
NFS4ERR_BAD_HIGH_SLOT, NFS4ERR_DEADSESSION, NFS4ERR_SEQ_FALSE_RETRY.
A reset of the session is currently our only response to a SEQUENCE operation
error. Don't reset the session on errors where a new session won't help.
Don't reset the session on errors where a new session won't help.
[nfs41: nfs4_async_handle_error update error checking]
Signed-off-by: Andy Adamson <andros@netapp.com>
Signed-off-by: Benny Halevy <bhalevy@panasas.com>
[nfs41: trigger the state manager for session reset]
Replace session state bit with nfs_client state bit. Set the
NFS4CLNT_SESSION_SETUP bit upon a session related error in the sync/async
error handlers.
[nfs41: _nfs4_async_handle_error fix session reset error list]
Sequence operation errors that session reset could help.
NFS4ERR_BADSESSION
NFS4ERR_BADSLOT
NFS4ERR_BAD_HIGH_SLOT
NFS4ERR_DEADSESSION
NFS4ERR_CONN_NOT_BOUND_TO_SESSION
NFS4ERR_SEQ_FALSE_RETRY
NFS4ERR_SEQ_MISORDERED
Sequence operation errors that a session reset would not help
NFS4ERR_BADXDR
NFS4ERR_DELAY
NFS4ERR_REP_TOO_BIG
NFS4ERR_REP_TOO_BIG_TO_CACHE
NFS4ERR_REQ_TOO_BIG
NFS4ERR_RETRY_UNCACHED_REP
NFS4ERR_SEQUENCE_POS
NFS4ERR_TOO_MANY_OPS
Signed-off-by: Andy Adamson <andros@netapp.com>
[nfs41 nfs4_handle_exception fix session reset error list]
Signed-off-by: Andy Adamson <andros@netapp.com>
Signed-off-by: Benny Halevy <bhalevy@panasas.com>
[moved nfs41_sequece_call_done code to nfs41: sequence operation]
Signed-off-by: Andy Adamson <andros@netapp.com>
Signed-off-by: Benny Halevy <bhalevy@panasas.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2009-04-01 21:22:42 +08:00
|
|
|
task->tk_status = 0;
|
|
|
|
return -EAGAIN;
|
|
|
|
#endif /* CONFIG_NFS_V4_1 */
|
2005-04-17 06:20:36 +08:00
|
|
|
case -NFS4ERR_DELAY:
|
2010-06-16 21:52:25 +08:00
|
|
|
nfs_inc_server_stats(server, NFSIOS_DELAY);
|
2006-03-21 02:44:14 +08:00
|
|
|
case -NFS4ERR_GRACE:
|
2010-01-07 22:42:03 +08:00
|
|
|
case -EKEYEXPIRED:
|
2005-04-17 06:20:36 +08:00
|
|
|
rpc_delay(task, NFS4_POLL_RETRY_MAX);
|
|
|
|
task->tk_status = 0;
|
|
|
|
return -EAGAIN;
|
|
|
|
case -NFS4ERR_OLD_STATEID:
|
|
|
|
task->tk_status = 0;
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
task->tk_status = nfs4_map_errors(task->tk_status);
|
|
|
|
return 0;
|
2011-03-10 05:00:53 +08:00
|
|
|
wait_on_recovery:
|
2010-01-27 04:42:47 +08:00
|
|
|
rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
|
|
|
|
if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
|
|
|
|
rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
|
|
|
|
task->tk_status = 0;
|
|
|
|
return -EAGAIN;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2010-04-17 04:43:06 +08:00
|
|
|
int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
|
|
|
|
unsigned short port, struct rpc_cred *cred,
|
|
|
|
struct nfs4_setclientid_res *res)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
nfs4_verifier sc_verifier;
|
|
|
|
struct nfs4_setclientid setclientid = {
|
|
|
|
.sc_verifier = &sc_verifier,
|
|
|
|
.sc_prog = program,
|
2011-01-06 10:04:30 +08:00
|
|
|
.sc_cb_ident = clp->cl_cb_ident,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
|
|
|
|
.rpc_argp = &setclientid,
|
2010-04-17 04:43:06 +08:00
|
|
|
.rpc_resp = res,
|
2006-01-03 16:55:26 +08:00
|
|
|
.rpc_cred = cred,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
2006-10-20 14:28:51 +08:00
|
|
|
__be32 *p;
|
2005-04-17 06:20:36 +08:00
|
|
|
int loop = 0;
|
|
|
|
int status;
|
|
|
|
|
2006-10-20 14:28:51 +08:00
|
|
|
p = (__be32*)sc_verifier.data;
|
2005-04-17 06:20:36 +08:00
|
|
|
*p++ = htonl((u32)clp->cl_boot_time.tv_sec);
|
|
|
|
*p = htonl((u32)clp->cl_boot_time.tv_nsec);
|
|
|
|
|
|
|
|
for(;;) {
|
|
|
|
setclientid.sc_name_len = scnprintf(setclientid.sc_name,
|
2007-12-15 03:56:07 +08:00
|
|
|
sizeof(setclientid.sc_name), "%s/%s %s %s %u",
|
2007-12-11 03:57:09 +08:00
|
|
|
clp->cl_ipaddr,
|
|
|
|
rpc_peeraddr2str(clp->cl_rpcclient,
|
|
|
|
RPC_DISPLAY_ADDR),
|
2007-12-15 03:56:07 +08:00
|
|
|
rpc_peeraddr2str(clp->cl_rpcclient,
|
|
|
|
RPC_DISPLAY_PROTO),
|
2008-04-08 08:49:28 +08:00
|
|
|
clp->cl_rpcclient->cl_auth->au_ops->au_name,
|
2005-04-17 06:20:36 +08:00
|
|
|
clp->cl_id_uniquifier);
|
|
|
|
setclientid.sc_netid_len = scnprintf(setclientid.sc_netid,
|
2007-12-11 03:57:09 +08:00
|
|
|
sizeof(setclientid.sc_netid),
|
|
|
|
rpc_peeraddr2str(clp->cl_rpcclient,
|
|
|
|
RPC_DISPLAY_NETID));
|
2005-04-17 06:20:36 +08:00
|
|
|
setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
|
2007-12-11 03:57:09 +08:00
|
|
|
sizeof(setclientid.sc_uaddr), "%s.%u.%u",
|
2005-04-17 06:20:36 +08:00
|
|
|
clp->cl_ipaddr, port >> 8, port & 255);
|
|
|
|
|
|
|
|
status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
|
|
|
|
if (status != -NFS4ERR_CLID_INUSE)
|
|
|
|
break;
|
|
|
|
if (signalled())
|
|
|
|
break;
|
|
|
|
if (loop++ & 1)
|
2010-12-14 08:05:46 +08:00
|
|
|
ssleep(clp->cl_lease_time / HZ + 1);
|
2005-04-17 06:20:36 +08:00
|
|
|
else
|
|
|
|
if (++clp->cl_id_uniquifier == 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2010-04-17 04:43:06 +08:00
|
|
|
static int _nfs4_proc_setclientid_confirm(struct nfs_client *clp,
|
|
|
|
struct nfs4_setclientid_res *arg,
|
|
|
|
struct rpc_cred *cred)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct nfs_fsinfo fsinfo;
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
|
2010-04-17 04:43:06 +08:00
|
|
|
.rpc_argp = arg,
|
2005-04-17 06:20:36 +08:00
|
|
|
.rpc_resp = &fsinfo,
|
2006-01-03 16:55:26 +08:00
|
|
|
.rpc_cred = cred,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
unsigned long now;
|
|
|
|
int status;
|
|
|
|
|
|
|
|
now = jiffies;
|
|
|
|
status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
|
|
|
|
if (status == 0) {
|
|
|
|
spin_lock(&clp->cl_lock);
|
|
|
|
clp->cl_lease_time = fsinfo.lease_time * HZ;
|
|
|
|
clp->cl_last_renewal = now;
|
|
|
|
spin_unlock(&clp->cl_lock);
|
|
|
|
}
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2010-04-17 04:43:06 +08:00
|
|
|
int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
|
|
|
|
struct nfs4_setclientid_res *arg,
|
|
|
|
struct rpc_cred *cred)
|
2006-03-21 02:44:47 +08:00
|
|
|
{
|
2008-06-25 01:25:57 +08:00
|
|
|
long timeout = 0;
|
2006-03-21 02:44:47 +08:00
|
|
|
int err;
|
|
|
|
do {
|
2010-04-17 04:43:06 +08:00
|
|
|
err = _nfs4_proc_setclientid_confirm(clp, arg, cred);
|
2006-03-21 02:44:47 +08:00
|
|
|
switch (err) {
|
|
|
|
case 0:
|
|
|
|
return err;
|
|
|
|
case -NFS4ERR_RESOURCE:
|
|
|
|
/* The IBM lawyers misread another document! */
|
|
|
|
case -NFS4ERR_DELAY:
|
|
|
|
err = nfs4_delay(clp->cl_rpcclient, &timeout);
|
|
|
|
}
|
|
|
|
} while (err == 0);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2006-01-03 16:55:18 +08:00
|
|
|
struct nfs4_delegreturndata {
|
|
|
|
struct nfs4_delegreturnargs args;
|
2006-01-03 16:55:38 +08:00
|
|
|
struct nfs4_delegreturnres res;
|
2006-01-03 16:55:18 +08:00
|
|
|
struct nfs_fh fh;
|
|
|
|
nfs4_stateid stateid;
|
2006-01-03 16:55:21 +08:00
|
|
|
unsigned long timestamp;
|
2006-01-03 16:55:38 +08:00
|
|
|
struct nfs_fattr fattr;
|
2006-01-03 16:55:18 +08:00
|
|
|
int rpc_status;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_delegreturndata *data = calldata;
|
2009-04-01 21:22:28 +08:00
|
|
|
|
2010-08-01 02:29:06 +08:00
|
|
|
if (!nfs4_sequence_done(task, &data->res.seq_res))
|
|
|
|
return;
|
2009-04-01 21:22:28 +08:00
|
|
|
|
2009-12-07 22:23:21 +08:00
|
|
|
switch (task->tk_status) {
|
|
|
|
case -NFS4ERR_STALE_STATEID:
|
|
|
|
case -NFS4ERR_EXPIRED:
|
|
|
|
case 0:
|
2006-01-03 16:55:38 +08:00
|
|
|
renew_lease(data->res.server, data->timestamp);
|
2009-12-07 22:23:21 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
if (nfs4_async_handle_error(task, data->res.server, NULL) ==
|
|
|
|
-EAGAIN) {
|
|
|
|
nfs_restart_rpc(task, data->res.server->nfs_client);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
data->rpc_status = task->tk_status;
|
2006-01-03 16:55:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs4_delegreturn_release(void *calldata)
|
|
|
|
{
|
|
|
|
kfree(calldata);
|
|
|
|
}
|
|
|
|
|
2009-04-01 21:22:28 +08:00
|
|
|
#if defined(CONFIG_NFS_V4_1)
|
|
|
|
static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
|
|
|
|
{
|
|
|
|
struct nfs4_delegreturndata *d_data;
|
|
|
|
|
|
|
|
d_data = (struct nfs4_delegreturndata *)data;
|
|
|
|
|
2010-06-16 21:52:26 +08:00
|
|
|
if (nfs4_setup_sequence(d_data->res.server,
|
2009-04-01 21:22:28 +08:00
|
|
|
&d_data->args.seq_args,
|
|
|
|
&d_data->res.seq_res, 1, task))
|
|
|
|
return;
|
|
|
|
rpc_call_start(task);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_NFS_V4_1 */
|
|
|
|
|
2006-03-21 02:44:07 +08:00
|
|
|
static const struct rpc_call_ops nfs4_delegreturn_ops = {
|
2009-04-01 21:22:28 +08:00
|
|
|
#if defined(CONFIG_NFS_V4_1)
|
|
|
|
.rpc_call_prepare = nfs4_delegreturn_prepare,
|
|
|
|
#endif /* CONFIG_NFS_V4_1 */
|
2006-01-03 16:55:18 +08:00
|
|
|
.rpc_call_done = nfs4_delegreturn_done,
|
|
|
|
.rpc_release = nfs4_delegreturn_release,
|
|
|
|
};
|
|
|
|
|
2008-01-25 07:14:34 +08:00
|
|
|
static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
|
2006-01-03 16:55:18 +08:00
|
|
|
{
|
|
|
|
struct nfs4_delegreturndata *data;
|
2006-01-03 16:55:38 +08:00
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
2006-01-03 16:55:18 +08:00
|
|
|
struct rpc_task *task;
|
2007-07-15 03:40:01 +08:00
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
|
|
|
|
.rpc_cred = cred,
|
|
|
|
};
|
2007-07-15 03:39:59 +08:00
|
|
|
struct rpc_task_setup task_setup_data = {
|
|
|
|
.rpc_client = server->client,
|
2007-07-15 03:40:01 +08:00
|
|
|
.rpc_message = &msg,
|
2007-07-15 03:39:59 +08:00
|
|
|
.callback_ops = &nfs4_delegreturn_ops,
|
|
|
|
.flags = RPC_TASK_ASYNC,
|
|
|
|
};
|
2008-01-25 07:14:34 +08:00
|
|
|
int status = 0;
|
2006-01-03 16:55:18 +08:00
|
|
|
|
2010-05-14 00:51:01 +08:00
|
|
|
data = kzalloc(sizeof(*data), GFP_NOFS);
|
2006-01-03 16:55:18 +08:00
|
|
|
if (data == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
data->args.fhandle = &data->fh;
|
|
|
|
data->args.stateid = &data->stateid;
|
2006-01-03 16:55:38 +08:00
|
|
|
data->args.bitmask = server->attr_bitmask;
|
2006-01-03 16:55:18 +08:00
|
|
|
nfs_copy_fh(&data->fh, NFS_FH(inode));
|
|
|
|
memcpy(&data->stateid, stateid, sizeof(data->stateid));
|
2006-01-03 16:55:38 +08:00
|
|
|
data->res.fattr = &data->fattr;
|
|
|
|
data->res.server = server;
|
2007-07-15 03:40:01 +08:00
|
|
|
nfs_fattr_init(data->res.fattr);
|
2006-01-03 16:55:21 +08:00
|
|
|
data->timestamp = jiffies;
|
2006-01-03 16:55:18 +08:00
|
|
|
data->rpc_status = 0;
|
|
|
|
|
2007-07-15 03:39:59 +08:00
|
|
|
task_setup_data.callback_data = data;
|
2010-12-21 23:52:24 +08:00
|
|
|
msg.rpc_argp = &data->args;
|
|
|
|
msg.rpc_resp = &data->res;
|
2007-07-15 03:39:59 +08:00
|
|
|
task = rpc_run_task(&task_setup_data);
|
2006-03-21 07:11:10 +08:00
|
|
|
if (IS_ERR(task))
|
2006-01-03 16:55:18 +08:00
|
|
|
return PTR_ERR(task);
|
2008-01-25 07:14:34 +08:00
|
|
|
if (!issync)
|
|
|
|
goto out;
|
2006-01-03 16:55:18 +08:00
|
|
|
status = nfs4_wait_for_completion_rpc_task(task);
|
2008-01-25 07:14:34 +08:00
|
|
|
if (status != 0)
|
|
|
|
goto out;
|
|
|
|
status = data->rpc_status;
|
|
|
|
if (status != 0)
|
|
|
|
goto out;
|
|
|
|
nfs_refresh_inode(inode, &data->fattr);
|
|
|
|
out:
|
2006-11-12 11:18:03 +08:00
|
|
|
rpc_put_task(task);
|
2006-01-03 16:55:18 +08:00
|
|
|
return status;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2008-01-25 07:14:34 +08:00
|
|
|
int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int err;
|
|
|
|
do {
|
2008-01-25 07:14:34 +08:00
|
|
|
err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
|
2005-04-17 06:20:36 +08:00
|
|
|
switch (err) {
|
|
|
|
case -NFS4ERR_STALE_STATEID:
|
|
|
|
case -NFS4ERR_EXPIRED:
|
|
|
|
case 0:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
err = nfs4_handle_exception(server, err, &exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define NFS4_LOCK_MINTIMEOUT (1 * HZ)
|
|
|
|
#define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* sleep, with exponential backoff, and retry the LOCK operation.
|
|
|
|
*/
|
|
|
|
static unsigned long
|
|
|
|
nfs4_set_lock_task_retry(unsigned long timeout)
|
|
|
|
{
|
2007-12-07 05:24:39 +08:00
|
|
|
schedule_timeout_killable(timeout);
|
2005-04-17 06:20:36 +08:00
|
|
|
timeout <<= 1;
|
|
|
|
if (timeout > NFS4_LOCK_MAXTIMEOUT)
|
|
|
|
return NFS4_LOCK_MAXTIMEOUT;
|
|
|
|
return timeout;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
|
|
|
|
{
|
|
|
|
struct inode *inode = state->inode;
|
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
2006-08-23 08:06:09 +08:00
|
|
|
struct nfs_client *clp = server->nfs_client;
|
2006-01-03 16:55:16 +08:00
|
|
|
struct nfs_lockt_args arg = {
|
2005-04-17 06:20:36 +08:00
|
|
|
.fh = NFS_FH(inode),
|
2006-01-03 16:55:16 +08:00
|
|
|
.fl = request,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
2006-01-03 16:55:16 +08:00
|
|
|
struct nfs_lockt_res res = {
|
|
|
|
.denied = request,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
|
|
|
|
.rpc_argp = &arg,
|
|
|
|
.rpc_resp = &res,
|
|
|
|
.rpc_cred = state->owner->so_cred,
|
|
|
|
};
|
|
|
|
struct nfs4_lock_state *lsp;
|
|
|
|
int status;
|
|
|
|
|
2006-01-03 16:55:16 +08:00
|
|
|
arg.lock_owner.clientid = clp->cl_clientid;
|
2005-06-23 01:16:32 +08:00
|
|
|
status = nfs4_set_lock_state(state, request);
|
|
|
|
if (status != 0)
|
|
|
|
goto out;
|
|
|
|
lsp = request->fl_u.nfs4_fl.owner;
|
2007-07-03 01:58:33 +08:00
|
|
|
arg.lock_owner.id = lsp->ls_id.id;
|
2010-12-21 23:45:27 +08:00
|
|
|
arg.lock_owner.s_dev = server->s_dev;
|
2011-03-25 01:12:24 +08:00
|
|
|
status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
|
2006-01-03 16:55:16 +08:00
|
|
|
switch (status) {
|
|
|
|
case 0:
|
|
|
|
request->fl_type = F_UNLCK;
|
|
|
|
break;
|
|
|
|
case -NFS4ERR_DENIED:
|
|
|
|
status = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2007-02-23 07:48:53 +08:00
|
|
|
request->fl_ops->fl_release_private(request);
|
2005-06-23 01:16:32 +08:00
|
|
|
out:
|
2005-04-17 06:20:36 +08:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
|
|
|
|
{
|
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int err;
|
|
|
|
|
|
|
|
do {
|
|
|
|
err = nfs4_handle_exception(NFS_SERVER(state->inode),
|
|
|
|
_nfs4_proc_getlk(state, cmd, request),
|
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int do_vfs_lock(struct file *file, struct file_lock *fl)
|
|
|
|
{
|
|
|
|
int res = 0;
|
|
|
|
switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
|
|
|
|
case FL_POSIX:
|
|
|
|
res = posix_lock_file_wait(file, fl);
|
|
|
|
break;
|
|
|
|
case FL_FLOCK:
|
|
|
|
res = flock_lock_file_wait(file, fl);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2005-10-19 05:20:15 +08:00
|
|
|
struct nfs4_unlockdata {
|
2006-01-03 16:55:16 +08:00
|
|
|
struct nfs_locku_args arg;
|
|
|
|
struct nfs_locku_res res;
|
2005-10-19 05:20:15 +08:00
|
|
|
struct nfs4_lock_state *lsp;
|
|
|
|
struct nfs_open_context *ctx;
|
2006-01-03 16:55:16 +08:00
|
|
|
struct file_lock fl;
|
|
|
|
const struct nfs_server *server;
|
2006-01-03 16:55:21 +08:00
|
|
|
unsigned long timestamp;
|
2005-10-19 05:20:15 +08:00
|
|
|
};
|
|
|
|
|
2006-01-03 16:55:16 +08:00
|
|
|
static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
|
|
|
|
struct nfs_open_context *ctx,
|
|
|
|
struct nfs4_lock_state *lsp,
|
|
|
|
struct nfs_seqid *seqid)
|
|
|
|
{
|
|
|
|
struct nfs4_unlockdata *p;
|
|
|
|
struct inode *inode = lsp->ls_state->inode;
|
|
|
|
|
2010-05-14 00:51:01 +08:00
|
|
|
p = kzalloc(sizeof(*p), GFP_NOFS);
|
2006-01-03 16:55:16 +08:00
|
|
|
if (p == NULL)
|
|
|
|
return NULL;
|
|
|
|
p->arg.fh = NFS_FH(inode);
|
|
|
|
p->arg.fl = &p->fl;
|
|
|
|
p->arg.seqid = seqid;
|
2008-04-08 01:20:54 +08:00
|
|
|
p->res.seqid = seqid;
|
2006-01-03 16:55:16 +08:00
|
|
|
p->arg.stateid = &lsp->ls_stateid;
|
|
|
|
p->lsp = lsp;
|
|
|
|
atomic_inc(&lsp->ls_count);
|
|
|
|
/* Ensure we don't close file until we're done freeing locks! */
|
|
|
|
p->ctx = get_nfs_open_context(ctx);
|
|
|
|
memcpy(&p->fl, fl, sizeof(p->fl));
|
|
|
|
p->server = NFS_SERVER(inode);
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
2006-01-03 16:55:07 +08:00
|
|
|
static void nfs4_locku_release_calldata(void *data)
|
2005-10-19 05:20:15 +08:00
|
|
|
{
|
2006-01-03 16:55:04 +08:00
|
|
|
struct nfs4_unlockdata *calldata = data;
|
2006-01-03 16:55:16 +08:00
|
|
|
nfs_free_seqid(calldata->arg.seqid);
|
2006-01-03 16:55:07 +08:00
|
|
|
nfs4_put_lock_state(calldata->lsp);
|
|
|
|
put_nfs_open_context(calldata->ctx);
|
|
|
|
kfree(calldata);
|
2005-10-19 05:20:15 +08:00
|
|
|
}
|
|
|
|
|
2006-01-03 16:55:04 +08:00
|
|
|
static void nfs4_locku_done(struct rpc_task *task, void *data)
|
2005-10-19 05:20:15 +08:00
|
|
|
{
|
2006-01-03 16:55:04 +08:00
|
|
|
struct nfs4_unlockdata *calldata = data;
|
2005-10-19 05:20:15 +08:00
|
|
|
|
2010-08-01 02:29:06 +08:00
|
|
|
if (!nfs4_sequence_done(task, &calldata->res.seq_res))
|
|
|
|
return;
|
2005-10-19 05:20:15 +08:00
|
|
|
switch (task->tk_status) {
|
|
|
|
case 0:
|
|
|
|
memcpy(calldata->lsp->ls_stateid.data,
|
2006-01-03 16:55:16 +08:00
|
|
|
calldata->res.stateid.data,
|
2005-10-19 05:20:15 +08:00
|
|
|
sizeof(calldata->lsp->ls_stateid.data));
|
2006-01-03 16:55:21 +08:00
|
|
|
renew_lease(calldata->server, calldata->timestamp);
|
2005-10-19 05:20:15 +08:00
|
|
|
break;
|
2008-12-24 04:21:46 +08:00
|
|
|
case -NFS4ERR_BAD_STATEID:
|
|
|
|
case -NFS4ERR_OLD_STATEID:
|
2005-10-19 05:20:15 +08:00
|
|
|
case -NFS4ERR_STALE_STATEID:
|
|
|
|
case -NFS4ERR_EXPIRED:
|
|
|
|
break;
|
|
|
|
default:
|
2008-12-24 04:21:46 +08:00
|
|
|
if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN)
|
2009-12-07 22:00:24 +08:00
|
|
|
nfs_restart_rpc(task,
|
2009-12-06 08:32:19 +08:00
|
|
|
calldata->server->nfs_client);
|
2005-10-19 05:20:15 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-01-03 16:55:05 +08:00
|
|
|
static void nfs4_locku_prepare(struct rpc_task *task, void *data)
|
2005-10-19 05:20:15 +08:00
|
|
|
{
|
2006-01-03 16:55:05 +08:00
|
|
|
struct nfs4_unlockdata *calldata = data;
|
2005-10-19 05:20:15 +08:00
|
|
|
|
2006-01-03 16:55:16 +08:00
|
|
|
if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
|
2005-10-19 05:20:15 +08:00
|
|
|
return;
|
|
|
|
if ((calldata->lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0) {
|
2006-01-03 16:55:04 +08:00
|
|
|
/* Note: exit _without_ running nfs4_locku_done */
|
|
|
|
task->tk_action = NULL;
|
2005-10-19 05:20:15 +08:00
|
|
|
return;
|
|
|
|
}
|
2006-01-03 16:55:21 +08:00
|
|
|
calldata->timestamp = jiffies;
|
2010-06-16 21:52:26 +08:00
|
|
|
if (nfs4_setup_sequence(calldata->server,
|
2009-04-01 21:22:23 +08:00
|
|
|
&calldata->arg.seq_args,
|
|
|
|
&calldata->res.seq_res, 1, task))
|
|
|
|
return;
|
2007-07-15 03:40:01 +08:00
|
|
|
rpc_call_start(task);
|
2005-10-19 05:20:15 +08:00
|
|
|
}
|
|
|
|
|
2006-01-03 16:55:04 +08:00
|
|
|
static const struct rpc_call_ops nfs4_locku_ops = {
|
2006-01-03 16:55:05 +08:00
|
|
|
.rpc_call_prepare = nfs4_locku_prepare,
|
2006-01-03 16:55:04 +08:00
|
|
|
.rpc_call_done = nfs4_locku_done,
|
2006-01-03 16:55:07 +08:00
|
|
|
.rpc_release = nfs4_locku_release_calldata,
|
2006-01-03 16:55:04 +08:00
|
|
|
};
|
|
|
|
|
2006-01-03 16:55:17 +08:00
|
|
|
static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
|
|
|
|
struct nfs_open_context *ctx,
|
|
|
|
struct nfs4_lock_state *lsp,
|
|
|
|
struct nfs_seqid *seqid)
|
|
|
|
{
|
|
|
|
struct nfs4_unlockdata *data;
|
2007-07-15 03:40:01 +08:00
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
|
|
|
|
.rpc_cred = ctx->cred,
|
|
|
|
};
|
2007-07-15 03:39:59 +08:00
|
|
|
struct rpc_task_setup task_setup_data = {
|
|
|
|
.rpc_client = NFS_CLIENT(lsp->ls_state->inode),
|
2007-07-15 03:40:01 +08:00
|
|
|
.rpc_message = &msg,
|
2007-07-15 03:39:59 +08:00
|
|
|
.callback_ops = &nfs4_locku_ops,
|
2008-02-20 09:04:23 +08:00
|
|
|
.workqueue = nfsiod_workqueue,
|
2007-07-15 03:39:59 +08:00
|
|
|
.flags = RPC_TASK_ASYNC,
|
|
|
|
};
|
2006-01-03 16:55:17 +08:00
|
|
|
|
NFSv4: Make sure unlock is really an unlock when cancelling a lock
I ran into a curious issue when a lock is being canceled. The
cancellation results in a lock request to the vfs layer instead of an
unlock request. This is particularly insidious when the process that
owns the lock is exiting. In that case, sometimes the erroneous lock is
applied AFTER the process has entered zombie state, preventing the lock
from ever being released. Eventually other processes block on the lock
causing a slow degredation of the system. In the 2.6.16 kernel this was
investigated on, the problem is compounded by the fact that the cl_sem
is held while blocking on the vfs lock, which results in most processes
accessing the nfs file system in question hanging.
In more detail, here is how the situation occurs:
first _nfs4_do_setlk():
static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int reclaim)
...
ret = nfs4_wait_for_completion_rpc_task(task);
if (ret == 0) {
...
} else
data->cancelled = 1;
then nfs4_lock_release():
static void nfs4_lock_release(void *calldata)
...
if (data->cancelled != 0) {
struct rpc_task *task;
task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
data->arg.lock_seqid);
The problem is the same file_lock that was passed in to _nfs4_do_setlk()
gets passed to nfs4_do_unlck() from nfs4_lock_release(). So the type is
still F_RDLCK or FWRLCK, not F_UNLCK. At some point, when cancelling the
lock, the type needs to be changed to F_UNLCK. It seemed easiest to do
that in nfs4_do_unlck(), but it could be done in nfs4_lock_release().
The concern I had with doing it there was if something still needed the
original file_lock, though it turns out the original file_lock still
needs to be modified by nfs4_do_unlck() because nfs4_do_unlck() uses the
original file_lock to pass to the vfs layer, and a copy of the original
file_lock for the RPC request.
It seems like the simplest solution is to force all situations where
nfs4_do_unlck() is being used to result in an unlock, so with that in
mind, I made the following change:
Signed-off-by: Frank Filz <ffilzlnx@us.ibm.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2007-07-10 06:32:29 +08:00
|
|
|
/* Ensure this is an unlock - when canceling a lock, the
|
|
|
|
* canceled lock is passed in, and it won't be an unlock.
|
|
|
|
*/
|
|
|
|
fl->fl_type = F_UNLCK;
|
|
|
|
|
2006-01-03 16:55:17 +08:00
|
|
|
data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
|
|
|
|
if (data == NULL) {
|
|
|
|
nfs_free_seqid(seqid);
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
}
|
|
|
|
|
2010-12-21 23:52:24 +08:00
|
|
|
msg.rpc_argp = &data->arg;
|
|
|
|
msg.rpc_resp = &data->res;
|
2007-07-15 03:39:59 +08:00
|
|
|
task_setup_data.callback_data = data;
|
|
|
|
return rpc_run_task(&task_setup_data);
|
2006-01-03 16:55:17 +08:00
|
|
|
}
|
|
|
|
|
2005-10-19 05:20:15 +08:00
|
|
|
static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
|
|
|
|
{
|
2008-12-24 04:21:44 +08:00
|
|
|
struct nfs_inode *nfsi = NFS_I(state->inode);
|
2006-01-03 16:55:16 +08:00
|
|
|
struct nfs_seqid *seqid;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct nfs4_lock_state *lsp;
|
2006-01-03 16:55:07 +08:00
|
|
|
struct rpc_task *task;
|
|
|
|
int status = 0;
|
2008-04-05 03:08:02 +08:00
|
|
|
unsigned char fl_flags = request->fl_flags;
|
2005-10-19 05:20:15 +08:00
|
|
|
|
2005-06-23 01:16:32 +08:00
|
|
|
status = nfs4_set_lock_state(state, request);
|
2006-06-30 04:38:34 +08:00
|
|
|
/* Unlock _before_ we do the RPC call */
|
|
|
|
request->fl_flags |= FL_EXISTS;
|
2008-12-24 04:21:44 +08:00
|
|
|
down_read(&nfsi->rwsem);
|
|
|
|
if (do_vfs_lock(request->fl_file, request) == -ENOENT) {
|
|
|
|
up_read(&nfsi->rwsem);
|
2006-06-30 04:38:34 +08:00
|
|
|
goto out;
|
2008-12-24 04:21:44 +08:00
|
|
|
}
|
|
|
|
up_read(&nfsi->rwsem);
|
2005-06-23 01:16:32 +08:00
|
|
|
if (status != 0)
|
2006-06-30 04:38:34 +08:00
|
|
|
goto out;
|
|
|
|
/* Is this a delegated lock? */
|
|
|
|
if (test_bit(NFS_DELEGATED_STATE, &state->flags))
|
|
|
|
goto out;
|
2005-06-23 01:16:32 +08:00
|
|
|
lsp = request->fl_u.nfs4_fl.owner;
|
2010-05-14 00:51:01 +08:00
|
|
|
seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
|
2006-06-30 04:38:34 +08:00
|
|
|
status = -ENOMEM;
|
2006-01-03 16:55:16 +08:00
|
|
|
if (seqid == NULL)
|
2006-06-30 04:38:34 +08:00
|
|
|
goto out;
|
2007-08-11 05:44:32 +08:00
|
|
|
task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
|
2006-01-03 16:55:17 +08:00
|
|
|
status = PTR_ERR(task);
|
|
|
|
if (IS_ERR(task))
|
2006-06-30 04:38:34 +08:00
|
|
|
goto out;
|
2006-01-03 16:55:17 +08:00
|
|
|
status = nfs4_wait_for_completion_rpc_task(task);
|
2006-11-12 11:18:03 +08:00
|
|
|
rpc_put_task(task);
|
2006-06-30 04:38:34 +08:00
|
|
|
out:
|
2008-04-05 03:08:02 +08:00
|
|
|
request->fl_flags = fl_flags;
|
2005-04-17 06:20:36 +08:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2006-01-03 16:55:17 +08:00
|
|
|
struct nfs4_lockdata {
|
|
|
|
struct nfs_lock_args arg;
|
|
|
|
struct nfs_lock_res res;
|
|
|
|
struct nfs4_lock_state *lsp;
|
|
|
|
struct nfs_open_context *ctx;
|
|
|
|
struct file_lock fl;
|
2006-01-03 16:55:21 +08:00
|
|
|
unsigned long timestamp;
|
2006-01-03 16:55:17 +08:00
|
|
|
int rpc_status;
|
|
|
|
int cancelled;
|
2009-04-01 21:22:22 +08:00
|
|
|
struct nfs_server *server;
|
2006-01-03 16:55:17 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
|
2010-05-14 00:51:01 +08:00
|
|
|
struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
|
|
|
|
gfp_t gfp_mask)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-01-03 16:55:17 +08:00
|
|
|
struct nfs4_lockdata *p;
|
|
|
|
struct inode *inode = lsp->ls_state->inode;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
2006-01-03 16:55:17 +08:00
|
|
|
|
2010-05-14 00:51:01 +08:00
|
|
|
p = kzalloc(sizeof(*p), gfp_mask);
|
2006-01-03 16:55:17 +08:00
|
|
|
if (p == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
p->arg.fh = NFS_FH(inode);
|
|
|
|
p->arg.fl = &p->fl;
|
2010-05-14 00:51:01 +08:00
|
|
|
p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
|
2008-01-09 06:56:07 +08:00
|
|
|
if (p->arg.open_seqid == NULL)
|
|
|
|
goto out_free;
|
2010-05-14 00:51:01 +08:00
|
|
|
p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask);
|
2006-01-03 16:55:17 +08:00
|
|
|
if (p->arg.lock_seqid == NULL)
|
2008-01-09 06:56:07 +08:00
|
|
|
goto out_free_seqid;
|
2006-01-03 16:55:17 +08:00
|
|
|
p->arg.lock_stateid = &lsp->ls_stateid;
|
2006-08-23 08:06:09 +08:00
|
|
|
p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
|
2007-07-03 01:58:33 +08:00
|
|
|
p->arg.lock_owner.id = lsp->ls_id.id;
|
2010-12-21 23:45:27 +08:00
|
|
|
p->arg.lock_owner.s_dev = server->s_dev;
|
2008-04-08 01:20:54 +08:00
|
|
|
p->res.lock_seqid = p->arg.lock_seqid;
|
2006-01-03 16:55:17 +08:00
|
|
|
p->lsp = lsp;
|
2009-04-01 21:22:22 +08:00
|
|
|
p->server = server;
|
2006-01-03 16:55:17 +08:00
|
|
|
atomic_inc(&lsp->ls_count);
|
|
|
|
p->ctx = get_nfs_open_context(ctx);
|
|
|
|
memcpy(&p->fl, fl, sizeof(p->fl));
|
|
|
|
return p;
|
2008-01-09 06:56:07 +08:00
|
|
|
out_free_seqid:
|
|
|
|
nfs_free_seqid(p->arg.open_seqid);
|
2006-01-03 16:55:17 +08:00
|
|
|
out_free:
|
|
|
|
kfree(p);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_lockdata *data = calldata;
|
|
|
|
struct nfs4_state *state = data->lsp->ls_state;
|
2005-10-19 05:20:15 +08:00
|
|
|
|
2008-05-03 04:42:44 +08:00
|
|
|
dprintk("%s: begin!\n", __func__);
|
2008-01-09 06:56:07 +08:00
|
|
|
if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
|
|
|
|
return;
|
2006-01-03 16:55:17 +08:00
|
|
|
/* Do we need to do an open_to_lock_owner? */
|
|
|
|
if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) {
|
2008-01-03 05:27:16 +08:00
|
|
|
if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0)
|
|
|
|
return;
|
2006-01-03 16:55:17 +08:00
|
|
|
data->arg.open_stateid = &state->stateid;
|
|
|
|
data->arg.new_lock_owner = 1;
|
2008-04-08 01:20:54 +08:00
|
|
|
data->res.open_seqid = data->arg.open_seqid;
|
2008-01-09 06:56:07 +08:00
|
|
|
} else
|
|
|
|
data->arg.new_lock_owner = 0;
|
2006-01-03 16:55:21 +08:00
|
|
|
data->timestamp = jiffies;
|
2010-06-16 21:52:26 +08:00
|
|
|
if (nfs4_setup_sequence(data->server,
|
|
|
|
&data->arg.seq_args,
|
2009-04-01 21:22:22 +08:00
|
|
|
&data->res.seq_res, 1, task))
|
|
|
|
return;
|
2007-07-15 03:40:01 +08:00
|
|
|
rpc_call_start(task);
|
2008-05-03 04:42:44 +08:00
|
|
|
dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
|
2006-01-03 16:55:17 +08:00
|
|
|
}
|
|
|
|
|
2009-12-15 13:27:57 +08:00
|
|
|
static void nfs4_recover_lock_prepare(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
|
|
|
rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
|
|
|
|
nfs4_lock_prepare(task, calldata);
|
|
|
|
}
|
|
|
|
|
2006-01-03 16:55:17 +08:00
|
|
|
static void nfs4_lock_done(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_lockdata *data = calldata;
|
|
|
|
|
2008-05-03 04:42:44 +08:00
|
|
|
dprintk("%s: begin!\n", __func__);
|
2006-01-03 16:55:17 +08:00
|
|
|
|
2010-08-01 02:29:06 +08:00
|
|
|
if (!nfs4_sequence_done(task, &data->res.seq_res))
|
|
|
|
return;
|
2009-04-01 21:22:22 +08:00
|
|
|
|
2006-01-03 16:55:17 +08:00
|
|
|
data->rpc_status = task->tk_status;
|
|
|
|
if (data->arg.new_lock_owner != 0) {
|
|
|
|
if (data->rpc_status == 0)
|
|
|
|
nfs_confirm_seqid(&data->lsp->ls_seqid, 0);
|
|
|
|
else
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (data->rpc_status == 0) {
|
|
|
|
memcpy(data->lsp->ls_stateid.data, data->res.stateid.data,
|
|
|
|
sizeof(data->lsp->ls_stateid.data));
|
|
|
|
data->lsp->ls_flags |= NFS_LOCK_INITIALIZED;
|
2007-06-05 22:42:27 +08:00
|
|
|
renew_lease(NFS_SERVER(data->ctx->path.dentry->d_inode), data->timestamp);
|
2006-01-03 16:55:17 +08:00
|
|
|
}
|
|
|
|
out:
|
2008-05-03 04:42:44 +08:00
|
|
|
dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
|
2006-01-03 16:55:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs4_lock_release(void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_lockdata *data = calldata;
|
|
|
|
|
2008-05-03 04:42:44 +08:00
|
|
|
dprintk("%s: begin!\n", __func__);
|
2008-01-09 06:56:07 +08:00
|
|
|
nfs_free_seqid(data->arg.open_seqid);
|
2006-01-03 16:55:17 +08:00
|
|
|
if (data->cancelled != 0) {
|
|
|
|
struct rpc_task *task;
|
|
|
|
task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
|
|
|
|
data->arg.lock_seqid);
|
|
|
|
if (!IS_ERR(task))
|
2011-02-22 03:05:41 +08:00
|
|
|
rpc_put_task_async(task);
|
2008-05-03 04:42:44 +08:00
|
|
|
dprintk("%s: cancelling lock!\n", __func__);
|
2006-01-03 16:55:17 +08:00
|
|
|
} else
|
|
|
|
nfs_free_seqid(data->arg.lock_seqid);
|
|
|
|
nfs4_put_lock_state(data->lsp);
|
|
|
|
put_nfs_open_context(data->ctx);
|
|
|
|
kfree(data);
|
2008-05-03 04:42:44 +08:00
|
|
|
dprintk("%s: done!\n", __func__);
|
2006-01-03 16:55:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct rpc_call_ops nfs4_lock_ops = {
|
|
|
|
.rpc_call_prepare = nfs4_lock_prepare,
|
|
|
|
.rpc_call_done = nfs4_lock_done,
|
|
|
|
.rpc_release = nfs4_lock_release,
|
|
|
|
};
|
|
|
|
|
2009-12-15 13:27:57 +08:00
|
|
|
static const struct rpc_call_ops nfs4_recover_lock_ops = {
|
|
|
|
.rpc_call_prepare = nfs4_recover_lock_prepare,
|
|
|
|
.rpc_call_done = nfs4_lock_done,
|
|
|
|
.rpc_release = nfs4_lock_release,
|
|
|
|
};
|
|
|
|
|
2010-01-27 04:42:21 +08:00
|
|
|
static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
|
|
|
|
{
|
|
|
|
switch (error) {
|
|
|
|
case -NFS4ERR_ADMIN_REVOKED:
|
|
|
|
case -NFS4ERR_BAD_STATEID:
|
2011-03-10 05:00:56 +08:00
|
|
|
lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
|
2010-01-27 04:42:21 +08:00
|
|
|
if (new_lock_owner != 0 ||
|
|
|
|
(lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
|
2011-03-10 05:00:56 +08:00
|
|
|
nfs4_schedule_stateid_recovery(server, lsp->ls_state);
|
2010-01-27 04:42:47 +08:00
|
|
|
break;
|
|
|
|
case -NFS4ERR_STALE_STATEID:
|
|
|
|
lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
|
2011-03-10 05:00:56 +08:00
|
|
|
case -NFS4ERR_EXPIRED:
|
|
|
|
nfs4_schedule_lease_recovery(server->nfs_client);
|
2010-01-27 04:42:21 +08:00
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2009-12-09 17:50:14 +08:00
|
|
|
static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
|
2006-01-03 16:55:17 +08:00
|
|
|
{
|
|
|
|
struct nfs4_lockdata *data;
|
|
|
|
struct rpc_task *task;
|
2007-07-15 03:40:01 +08:00
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
|
|
|
|
.rpc_cred = state->owner->so_cred,
|
|
|
|
};
|
2007-07-15 03:39:59 +08:00
|
|
|
struct rpc_task_setup task_setup_data = {
|
|
|
|
.rpc_client = NFS_CLIENT(state->inode),
|
2007-07-15 03:40:01 +08:00
|
|
|
.rpc_message = &msg,
|
2007-07-15 03:39:59 +08:00
|
|
|
.callback_ops = &nfs4_lock_ops,
|
2008-02-20 09:04:23 +08:00
|
|
|
.workqueue = nfsiod_workqueue,
|
2007-07-15 03:39:59 +08:00
|
|
|
.flags = RPC_TASK_ASYNC,
|
|
|
|
};
|
2006-01-03 16:55:17 +08:00
|
|
|
int ret;
|
|
|
|
|
2008-05-03 04:42:44 +08:00
|
|
|
dprintk("%s: begin!\n", __func__);
|
2007-08-11 05:44:32 +08:00
|
|
|
data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
|
2010-05-14 00:51:01 +08:00
|
|
|
fl->fl_u.nfs4_fl.owner,
|
|
|
|
recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
|
2006-01-03 16:55:17 +08:00
|
|
|
if (data == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
if (IS_SETLKW(cmd))
|
|
|
|
data->arg.block = 1;
|
2009-12-15 13:27:57 +08:00
|
|
|
if (recovery_type > NFS_LOCK_NEW) {
|
|
|
|
if (recovery_type == NFS_LOCK_RECLAIM)
|
2009-12-09 17:50:14 +08:00
|
|
|
data->arg.reclaim = NFS_LOCK_RECLAIM;
|
2009-12-15 13:27:57 +08:00
|
|
|
task_setup_data.callback_ops = &nfs4_recover_lock_ops;
|
|
|
|
}
|
2010-12-21 23:52:24 +08:00
|
|
|
msg.rpc_argp = &data->arg;
|
|
|
|
msg.rpc_resp = &data->res;
|
2007-07-15 03:39:59 +08:00
|
|
|
task_setup_data.callback_data = data;
|
|
|
|
task = rpc_run_task(&task_setup_data);
|
2006-03-21 07:11:10 +08:00
|
|
|
if (IS_ERR(task))
|
2006-01-03 16:55:17 +08:00
|
|
|
return PTR_ERR(task);
|
|
|
|
ret = nfs4_wait_for_completion_rpc_task(task);
|
|
|
|
if (ret == 0) {
|
|
|
|
ret = data->rpc_status;
|
2010-01-27 04:42:21 +08:00
|
|
|
if (ret)
|
|
|
|
nfs4_handle_setlk_error(data->server, data->lsp,
|
|
|
|
data->arg.new_lock_owner, ret);
|
2006-01-03 16:55:17 +08:00
|
|
|
} else
|
|
|
|
data->cancelled = 1;
|
2006-11-12 11:18:03 +08:00
|
|
|
rpc_put_task(task);
|
2008-05-03 04:42:44 +08:00
|
|
|
dprintk("%s: done, ret = %d!\n", __func__, ret);
|
2006-01-03 16:55:17 +08:00
|
|
|
return ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
|
|
|
|
{
|
2005-06-23 01:16:29 +08:00
|
|
|
struct nfs_server *server = NFS_SERVER(state->inode);
|
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int err;
|
|
|
|
|
|
|
|
do {
|
2006-06-30 04:38:36 +08:00
|
|
|
/* Cache the lock if possible... */
|
|
|
|
if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
|
|
|
|
return 0;
|
2009-12-09 17:50:14 +08:00
|
|
|
err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
|
2010-10-20 07:47:49 +08:00
|
|
|
if (err != -NFS4ERR_DELAY)
|
2005-06-23 01:16:29 +08:00
|
|
|
break;
|
|
|
|
nfs4_handle_exception(server, err, &exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
|
|
|
|
{
|
2005-06-23 01:16:29 +08:00
|
|
|
struct nfs_server *server = NFS_SERVER(state->inode);
|
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int err;
|
|
|
|
|
2005-11-05 04:39:36 +08:00
|
|
|
err = nfs4_set_lock_state(state, request);
|
|
|
|
if (err != 0)
|
|
|
|
return err;
|
2005-06-23 01:16:29 +08:00
|
|
|
do {
|
2006-06-30 04:38:36 +08:00
|
|
|
if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
|
|
|
|
return 0;
|
2009-12-09 17:50:14 +08:00
|
|
|
err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
|
2009-12-04 04:53:21 +08:00
|
|
|
switch (err) {
|
|
|
|
default:
|
|
|
|
goto out;
|
|
|
|
case -NFS4ERR_GRACE:
|
|
|
|
case -NFS4ERR_DELAY:
|
|
|
|
nfs4_handle_exception(server, err, &exception);
|
|
|
|
err = 0;
|
|
|
|
}
|
2005-06-23 01:16:29 +08:00
|
|
|
} while (exception.retry);
|
2009-12-04 04:53:21 +08:00
|
|
|
out:
|
2005-06-23 01:16:29 +08:00
|
|
|
return err;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
|
|
|
|
{
|
2008-12-24 04:21:44 +08:00
|
|
|
struct nfs_inode *nfsi = NFS_I(state->inode);
|
2006-06-30 04:38:39 +08:00
|
|
|
unsigned char fl_flags = request->fl_flags;
|
2010-01-27 04:42:30 +08:00
|
|
|
int status = -ENOLCK;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-01-27 04:42:30 +08:00
|
|
|
if ((fl_flags & FL_POSIX) &&
|
|
|
|
!test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
|
|
|
|
goto out;
|
2005-11-05 04:39:36 +08:00
|
|
|
/* Is this a delegated open? */
|
|
|
|
status = nfs4_set_lock_state(state, request);
|
|
|
|
if (status != 0)
|
|
|
|
goto out;
|
2006-06-30 04:38:39 +08:00
|
|
|
request->fl_flags |= FL_ACCESS;
|
|
|
|
status = do_vfs_lock(request->fl_file, request);
|
|
|
|
if (status < 0)
|
|
|
|
goto out;
|
2008-12-24 04:21:44 +08:00
|
|
|
down_read(&nfsi->rwsem);
|
2006-06-30 04:38:39 +08:00
|
|
|
if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
|
|
|
|
/* Yes: cache locks! */
|
|
|
|
/* ...but avoid races with delegation recall... */
|
2008-12-24 04:21:44 +08:00
|
|
|
request->fl_flags = fl_flags & ~FL_SLEEP;
|
|
|
|
status = do_vfs_lock(request->fl_file, request);
|
|
|
|
goto out_unlock;
|
2006-06-30 04:38:39 +08:00
|
|
|
}
|
2009-12-09 17:50:14 +08:00
|
|
|
status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
|
2005-11-05 04:39:36 +08:00
|
|
|
if (status != 0)
|
2006-06-30 04:38:39 +08:00
|
|
|
goto out_unlock;
|
2005-11-05 04:39:36 +08:00
|
|
|
/* Note: we always want to sleep here! */
|
2006-06-30 04:38:39 +08:00
|
|
|
request->fl_flags = fl_flags | FL_SLEEP;
|
2005-11-05 04:39:36 +08:00
|
|
|
if (do_vfs_lock(request->fl_file, request) < 0)
|
2008-05-03 04:42:44 +08:00
|
|
|
printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__);
|
2006-06-30 04:38:39 +08:00
|
|
|
out_unlock:
|
2008-12-24 04:21:44 +08:00
|
|
|
up_read(&nfsi->rwsem);
|
2006-06-30 04:38:39 +08:00
|
|
|
out:
|
|
|
|
request->fl_flags = fl_flags;
|
2005-04-17 06:20:36 +08:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
|
|
|
|
{
|
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int err;
|
|
|
|
|
|
|
|
do {
|
2009-06-18 04:22:59 +08:00
|
|
|
err = _nfs4_proc_setlk(state, cmd, request);
|
|
|
|
if (err == -NFS4ERR_DENIED)
|
|
|
|
err = -EAGAIN;
|
2005-04-17 06:20:36 +08:00
|
|
|
err = nfs4_handle_exception(NFS_SERVER(state->inode),
|
2009-06-18 04:22:59 +08:00
|
|
|
err, &exception);
|
2005-04-17 06:20:36 +08:00
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
|
|
|
|
{
|
|
|
|
struct nfs_open_context *ctx;
|
|
|
|
struct nfs4_state *state;
|
|
|
|
unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
|
|
|
|
int status;
|
|
|
|
|
|
|
|
/* verify open state */
|
2007-08-11 05:44:32 +08:00
|
|
|
ctx = nfs_file_open_context(filp);
|
2005-04-17 06:20:36 +08:00
|
|
|
state = ctx->state;
|
|
|
|
|
|
|
|
if (request->fl_start < 0 || request->fl_end < 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2009-07-22 07:22:38 +08:00
|
|
|
if (IS_GETLK(cmd)) {
|
|
|
|
if (state != NULL)
|
|
|
|
return nfs4_proc_getlk(state, F_GETLK, request);
|
|
|
|
return 0;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2009-07-22 07:22:38 +08:00
|
|
|
if (request->fl_type == F_UNLCK) {
|
|
|
|
if (state != NULL)
|
|
|
|
return nfs4_proc_unlck(state, cmd, request);
|
|
|
|
return 0;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-07-22 07:22:38 +08:00
|
|
|
if (state == NULL)
|
|
|
|
return -ENOLCK;
|
2005-04-17 06:20:36 +08:00
|
|
|
do {
|
|
|
|
status = nfs4_proc_setlk(state, cmd, request);
|
|
|
|
if ((status != -EAGAIN) || IS_SETLK(cmd))
|
|
|
|
break;
|
|
|
|
timeout = nfs4_set_lock_task_retry(timeout);
|
|
|
|
status = -ERESTARTSYS;
|
|
|
|
if (signalled())
|
|
|
|
break;
|
|
|
|
} while(status < 0);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2005-11-05 04:38:11 +08:00
|
|
|
int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
|
|
|
|
{
|
|
|
|
struct nfs_server *server = NFS_SERVER(state->inode);
|
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = nfs4_set_lock_state(state, fl);
|
|
|
|
if (err != 0)
|
|
|
|
goto out;
|
|
|
|
do {
|
2009-12-09 17:50:14 +08:00
|
|
|
err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
|
2009-06-18 04:22:58 +08:00
|
|
|
switch (err) {
|
|
|
|
default:
|
|
|
|
printk(KERN_ERR "%s: unhandled error %d.\n",
|
|
|
|
__func__, err);
|
|
|
|
case 0:
|
2009-06-18 04:22:59 +08:00
|
|
|
case -ESTALE:
|
2009-06-18 04:22:58 +08:00
|
|
|
goto out;
|
|
|
|
case -NFS4ERR_EXPIRED:
|
|
|
|
case -NFS4ERR_STALE_CLIENTID:
|
2009-06-18 04:22:59 +08:00
|
|
|
case -NFS4ERR_STALE_STATEID:
|
2011-03-10 05:00:53 +08:00
|
|
|
nfs4_schedule_lease_recovery(server->nfs_client);
|
|
|
|
goto out;
|
2009-12-07 22:48:30 +08:00
|
|
|
case -NFS4ERR_BADSESSION:
|
|
|
|
case -NFS4ERR_BADSLOT:
|
|
|
|
case -NFS4ERR_BAD_HIGH_SLOT:
|
|
|
|
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
|
|
|
|
case -NFS4ERR_DEADSESSION:
|
2011-03-10 05:00:53 +08:00
|
|
|
nfs4_schedule_session_recovery(server->nfs_client->cl_session);
|
2009-06-18 04:22:58 +08:00
|
|
|
goto out;
|
2009-06-18 04:22:59 +08:00
|
|
|
case -ERESTARTSYS:
|
|
|
|
/*
|
|
|
|
* The show must go on: exit, but mark the
|
|
|
|
* stateid as needing recovery.
|
|
|
|
*/
|
|
|
|
case -NFS4ERR_ADMIN_REVOKED:
|
|
|
|
case -NFS4ERR_BAD_STATEID:
|
|
|
|
case -NFS4ERR_OPENMODE:
|
2011-03-10 05:00:53 +08:00
|
|
|
nfs4_schedule_stateid_recovery(server, state);
|
2009-06-18 04:22:59 +08:00
|
|
|
err = 0;
|
|
|
|
goto out;
|
2010-10-20 07:47:49 +08:00
|
|
|
case -EKEYEXPIRED:
|
|
|
|
/*
|
|
|
|
* User RPCSEC_GSS context has expired.
|
|
|
|
* We cannot recover this stateid now, so
|
|
|
|
* skip it and allow recovery thread to
|
|
|
|
* proceed.
|
|
|
|
*/
|
|
|
|
err = 0;
|
|
|
|
goto out;
|
2009-06-18 04:22:59 +08:00
|
|
|
case -ENOMEM:
|
|
|
|
case -NFS4ERR_DENIED:
|
|
|
|
/* kill_proc(fl->fl_pid, SIGLOST, 1); */
|
|
|
|
err = 0;
|
|
|
|
goto out;
|
2009-06-18 04:22:58 +08:00
|
|
|
case -NFS4ERR_DELAY:
|
|
|
|
break;
|
|
|
|
}
|
2005-11-05 04:38:11 +08:00
|
|
|
err = nfs4_handle_exception(server, err, &exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
}
|
2005-06-23 01:16:22 +08:00
|
|
|
|
2010-07-02 00:49:01 +08:00
|
|
|
static void nfs4_release_lockowner_release(void *calldata)
|
|
|
|
{
|
|
|
|
kfree(calldata);
|
|
|
|
}
|
|
|
|
|
|
|
|
const struct rpc_call_ops nfs4_release_lockowner_ops = {
|
|
|
|
.rpc_release = nfs4_release_lockowner_release,
|
|
|
|
};
|
|
|
|
|
|
|
|
void nfs4_release_lockowner(const struct nfs4_lock_state *lsp)
|
|
|
|
{
|
|
|
|
struct nfs_server *server = lsp->ls_state->owner->so_server;
|
|
|
|
struct nfs_release_lockowner_args *args;
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
|
|
|
|
};
|
|
|
|
|
|
|
|
if (server->nfs_client->cl_mvops->minor_version != 0)
|
|
|
|
return;
|
|
|
|
args = kmalloc(sizeof(*args), GFP_NOFS);
|
|
|
|
if (!args)
|
|
|
|
return;
|
|
|
|
args->lock_owner.clientid = server->nfs_client->cl_clientid;
|
|
|
|
args->lock_owner.id = lsp->ls_id.id;
|
2010-12-21 23:45:27 +08:00
|
|
|
args->lock_owner.s_dev = server->s_dev;
|
2010-07-02 00:49:01 +08:00
|
|
|
msg.rpc_argp = args;
|
|
|
|
rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, args);
|
|
|
|
}
|
|
|
|
|
2005-06-23 01:16:22 +08:00
|
|
|
#define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
|
|
|
|
|
2010-12-09 19:35:25 +08:00
|
|
|
static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key,
|
|
|
|
const void *buf, size_t buflen,
|
|
|
|
int flags, int type)
|
2005-06-23 01:16:22 +08:00
|
|
|
{
|
2010-12-09 19:35:25 +08:00
|
|
|
if (strcmp(key, "") != 0)
|
|
|
|
return -EINVAL;
|
2005-06-23 01:16:23 +08:00
|
|
|
|
2010-12-09 19:35:25 +08:00
|
|
|
return nfs4_proc_set_acl(dentry->d_inode, buf, buflen);
|
2005-06-23 01:16:22 +08:00
|
|
|
}
|
|
|
|
|
2010-12-09 19:35:25 +08:00
|
|
|
static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key,
|
|
|
|
void *buf, size_t buflen, int type)
|
2005-06-23 01:16:22 +08:00
|
|
|
{
|
2010-12-09 19:35:25 +08:00
|
|
|
if (strcmp(key, "") != 0)
|
|
|
|
return -EINVAL;
|
2005-06-23 01:16:22 +08:00
|
|
|
|
2010-12-09 19:35:25 +08:00
|
|
|
return nfs4_proc_get_acl(dentry->d_inode, buf, buflen);
|
2005-06-23 01:16:22 +08:00
|
|
|
}
|
|
|
|
|
2010-12-09 19:35:25 +08:00
|
|
|
static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list,
|
|
|
|
size_t list_len, const char *name,
|
|
|
|
size_t name_len, int type)
|
2005-06-23 01:16:22 +08:00
|
|
|
{
|
2010-12-09 19:35:25 +08:00
|
|
|
size_t len = sizeof(XATTR_NAME_NFSV4_ACL);
|
2005-06-23 01:16:22 +08:00
|
|
|
|
2006-03-21 12:23:42 +08:00
|
|
|
if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode)))
|
|
|
|
return 0;
|
2010-12-09 19:35:25 +08:00
|
|
|
|
|
|
|
if (list && len <= list_len)
|
|
|
|
memcpy(list, XATTR_NAME_NFSV4_ACL, len);
|
2005-06-23 01:16:22 +08:00
|
|
|
return len;
|
2005-06-23 01:16:22 +08:00
|
|
|
}
|
|
|
|
|
2009-03-12 02:10:28 +08:00
|
|
|
static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
|
|
|
|
{
|
|
|
|
if (!((fattr->valid & NFS_ATTR_FATTR_FILEID) &&
|
|
|
|
(fattr->valid & NFS_ATTR_FATTR_FSID) &&
|
|
|
|
(fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
|
|
|
|
NFS_ATTR_FATTR_NLINK;
|
|
|
|
fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
|
|
|
|
fattr->nlink = 2;
|
|
|
|
}
|
|
|
|
|
2007-07-18 09:52:39 +08:00
|
|
|
int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
|
2006-06-09 21:34:23 +08:00
|
|
|
struct nfs4_fs_locations *fs_locations, struct page *page)
|
2006-06-09 21:34:22 +08:00
|
|
|
{
|
|
|
|
struct nfs_server *server = NFS_SERVER(dir);
|
|
|
|
u32 bitmask[2] = {
|
2006-06-09 21:34:24 +08:00
|
|
|
[0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
|
|
|
|
[1] = FATTR4_WORD1_MOUNTED_ON_FILEID,
|
2006-06-09 21:34:22 +08:00
|
|
|
};
|
|
|
|
struct nfs4_fs_locations_arg args = {
|
|
|
|
.dir_fh = NFS_FH(dir),
|
2007-01-13 15:28:11 +08:00
|
|
|
.name = name,
|
2006-06-09 21:34:22 +08:00
|
|
|
.page = page,
|
|
|
|
.bitmask = bitmask,
|
|
|
|
};
|
2009-04-01 21:22:02 +08:00
|
|
|
struct nfs4_fs_locations_res res = {
|
|
|
|
.fs_locations = fs_locations,
|
|
|
|
};
|
2006-06-09 21:34:22 +08:00
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
|
|
|
|
.rpc_argp = &args,
|
2009-04-01 21:22:02 +08:00
|
|
|
.rpc_resp = &res,
|
2006-06-09 21:34:22 +08:00
|
|
|
};
|
|
|
|
int status;
|
|
|
|
|
2008-05-03 04:42:44 +08:00
|
|
|
dprintk("%s: start\n", __func__);
|
2007-01-13 15:28:11 +08:00
|
|
|
nfs_fattr_init(&fs_locations->fattr);
|
2006-06-09 21:34:22 +08:00
|
|
|
fs_locations->server = server;
|
2006-06-09 21:34:25 +08:00
|
|
|
fs_locations->nlocations = 0;
|
2011-03-25 01:12:24 +08:00
|
|
|
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
|
2009-03-12 02:10:28 +08:00
|
|
|
nfs_fixup_referral_attributes(&fs_locations->fattr);
|
2008-05-03 04:42:44 +08:00
|
|
|
dprintk("%s: returned status = %d\n", __func__, status);
|
2006-06-09 21:34:22 +08:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2011-03-25 01:12:29 +08:00
|
|
|
static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors)
|
|
|
|
{
|
|
|
|
int status;
|
|
|
|
struct nfs4_secinfo_arg args = {
|
|
|
|
.dir_fh = NFS_FH(dir),
|
|
|
|
.name = name,
|
|
|
|
};
|
|
|
|
struct nfs4_secinfo_res res = {
|
|
|
|
.flavors = flavors,
|
|
|
|
};
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
|
|
|
|
.rpc_argp = &args,
|
|
|
|
.rpc_resp = &res,
|
|
|
|
};
|
|
|
|
|
|
|
|
dprintk("NFS call secinfo %s\n", name->name);
|
|
|
|
status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
|
|
|
|
dprintk("NFS reply secinfo: %d\n", status);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors)
|
|
|
|
{
|
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int err;
|
|
|
|
do {
|
|
|
|
err = nfs4_handle_exception(NFS_SERVER(dir),
|
|
|
|
_nfs4_proc_secinfo(dir, name, flavors),
|
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2009-04-01 21:21:53 +08:00
|
|
|
#ifdef CONFIG_NFS_V4_1
|
2010-12-14 23:11:57 +08:00
|
|
|
/*
|
|
|
|
* Check the exchange flags returned by the server for invalid flags, having
|
|
|
|
* both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
|
|
|
|
* DS flags set.
|
|
|
|
*/
|
|
|
|
static int nfs4_check_cl_exchange_flags(u32 flags)
|
|
|
|
{
|
|
|
|
if (flags & ~EXCHGID4_FLAG_MASK_R)
|
|
|
|
goto out_inval;
|
|
|
|
if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
|
|
|
|
(flags & EXCHGID4_FLAG_USE_NON_PNFS))
|
|
|
|
goto out_inval;
|
|
|
|
if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
|
|
|
|
goto out_inval;
|
|
|
|
return NFS_OK;
|
|
|
|
out_inval:
|
|
|
|
return -NFS4ERR_INVAL;
|
|
|
|
}
|
|
|
|
|
2009-04-01 21:22:29 +08:00
|
|
|
/*
|
|
|
|
* nfs4_proc_exchange_id()
|
|
|
|
*
|
|
|
|
* Since the clientid has expired, all compounds using sessions
|
|
|
|
* associated with the stale clientid will be returning
|
|
|
|
* NFS4ERR_BADSESSION in the sequence operation, and will therefore
|
|
|
|
* be in some phase of session reset.
|
|
|
|
*/
|
2009-12-05 04:52:24 +08:00
|
|
|
int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
|
2009-04-01 21:22:29 +08:00
|
|
|
{
|
|
|
|
nfs4_verifier verifier;
|
|
|
|
struct nfs41_exchange_id_args args = {
|
|
|
|
.client = clp,
|
2010-12-14 23:11:57 +08:00
|
|
|
.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER,
|
2009-04-01 21:22:29 +08:00
|
|
|
};
|
|
|
|
struct nfs41_exchange_id_res res = {
|
|
|
|
.client = clp,
|
|
|
|
};
|
|
|
|
int status;
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
|
|
|
|
.rpc_argp = &args,
|
|
|
|
.rpc_resp = &res,
|
|
|
|
.rpc_cred = cred,
|
|
|
|
};
|
|
|
|
__be32 *p;
|
|
|
|
|
|
|
|
dprintk("--> %s\n", __func__);
|
|
|
|
BUG_ON(clp == NULL);
|
2009-04-01 21:22:46 +08:00
|
|
|
|
2009-04-01 21:22:29 +08:00
|
|
|
p = (u32 *)verifier.data;
|
|
|
|
*p++ = htonl((u32)clp->cl_boot_time.tv_sec);
|
|
|
|
*p = htonl((u32)clp->cl_boot_time.tv_nsec);
|
|
|
|
args.verifier = &verifier;
|
|
|
|
|
2011-01-26 08:15:32 +08:00
|
|
|
args.id_len = scnprintf(args.id, sizeof(args.id),
|
|
|
|
"%s/%s.%s/%u",
|
|
|
|
clp->cl_ipaddr,
|
|
|
|
init_utsname()->nodename,
|
|
|
|
init_utsname()->domainname,
|
|
|
|
clp->cl_rpcclient->cl_auth->au_flavor);
|
2009-04-01 21:22:29 +08:00
|
|
|
|
2011-01-26 08:15:32 +08:00
|
|
|
status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
|
|
|
|
if (!status)
|
|
|
|
status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags);
|
2009-04-01 21:22:29 +08:00
|
|
|
dprintk("<-- %s status= %d\n", __func__, status);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2009-04-01 21:22:30 +08:00
|
|
|
struct nfs4_get_lease_time_data {
|
|
|
|
struct nfs4_get_lease_time_args *args;
|
|
|
|
struct nfs4_get_lease_time_res *res;
|
|
|
|
struct nfs_client *clp;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void nfs4_get_lease_time_prepare(struct rpc_task *task,
|
|
|
|
void *calldata)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct nfs4_get_lease_time_data *data =
|
|
|
|
(struct nfs4_get_lease_time_data *)calldata;
|
|
|
|
|
|
|
|
dprintk("--> %s\n", __func__);
|
2009-12-15 13:27:56 +08:00
|
|
|
rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
|
2009-04-01 21:22:30 +08:00
|
|
|
/* just setup sequence, do not trigger session recovery
|
|
|
|
since we're invoked within one */
|
|
|
|
ret = nfs41_setup_sequence(data->clp->cl_session,
|
2009-12-15 13:27:56 +08:00
|
|
|
&data->args->la_seq_args,
|
|
|
|
&data->res->lr_seq_res, 0, task);
|
2009-04-01 21:22:30 +08:00
|
|
|
|
|
|
|
BUG_ON(ret == -EAGAIN);
|
|
|
|
rpc_call_start(task);
|
|
|
|
dprintk("<-- %s\n", __func__);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called from nfs4_state_manager thread for session setup, so don't recover
|
|
|
|
* from sequence operation or clientid errors.
|
|
|
|
*/
|
|
|
|
static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_get_lease_time_data *data =
|
|
|
|
(struct nfs4_get_lease_time_data *)calldata;
|
|
|
|
|
|
|
|
dprintk("--> %s\n", __func__);
|
2010-08-01 02:29:06 +08:00
|
|
|
if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
|
|
|
|
return;
|
2009-04-01 21:22:30 +08:00
|
|
|
switch (task->tk_status) {
|
|
|
|
case -NFS4ERR_DELAY:
|
|
|
|
case -NFS4ERR_GRACE:
|
|
|
|
dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
|
|
|
|
rpc_delay(task, NFS4_POLL_RETRY_MIN);
|
|
|
|
task->tk_status = 0;
|
2009-12-07 22:00:24 +08:00
|
|
|
nfs_restart_rpc(task, data->clp);
|
2009-04-01 21:22:30 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
dprintk("<-- %s\n", __func__);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct rpc_call_ops nfs4_get_lease_time_ops = {
|
|
|
|
.rpc_call_prepare = nfs4_get_lease_time_prepare,
|
|
|
|
.rpc_call_done = nfs4_get_lease_time_done,
|
|
|
|
};
|
|
|
|
|
|
|
|
int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
|
|
|
|
{
|
|
|
|
struct rpc_task *task;
|
|
|
|
struct nfs4_get_lease_time_args args;
|
|
|
|
struct nfs4_get_lease_time_res res = {
|
|
|
|
.lr_fsinfo = fsinfo,
|
|
|
|
};
|
|
|
|
struct nfs4_get_lease_time_data data = {
|
|
|
|
.args = &args,
|
|
|
|
.res = &res,
|
|
|
|
.clp = clp,
|
|
|
|
};
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
|
|
|
|
.rpc_argp = &args,
|
|
|
|
.rpc_resp = &res,
|
|
|
|
};
|
|
|
|
struct rpc_task_setup task_setup = {
|
|
|
|
.rpc_client = clp->cl_rpcclient,
|
|
|
|
.rpc_message = &msg,
|
|
|
|
.callback_ops = &nfs4_get_lease_time_ops,
|
|
|
|
.callback_data = &data
|
|
|
|
};
|
|
|
|
int status;
|
|
|
|
|
|
|
|
dprintk("--> %s\n", __func__);
|
|
|
|
task = rpc_run_task(&task_setup);
|
|
|
|
|
|
|
|
if (IS_ERR(task))
|
|
|
|
status = PTR_ERR(task);
|
|
|
|
else {
|
|
|
|
status = task->tk_status;
|
|
|
|
rpc_put_task(task);
|
|
|
|
}
|
|
|
|
dprintk("<-- %s return %d\n", __func__, status);
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2009-04-01 21:23:31 +08:00
|
|
|
/*
|
|
|
|
* Reset a slot table
|
|
|
|
*/
|
2010-01-15 06:45:10 +08:00
|
|
|
static int nfs4_reset_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs,
|
|
|
|
int ivalue)
|
2009-04-01 21:22:37 +08:00
|
|
|
{
|
2010-01-15 06:45:10 +08:00
|
|
|
struct nfs4_slot *new = NULL;
|
2009-04-01 21:23:31 +08:00
|
|
|
int i;
|
2009-04-01 21:22:37 +08:00
|
|
|
int ret = 0;
|
|
|
|
|
2010-01-15 06:45:10 +08:00
|
|
|
dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__,
|
|
|
|
max_reqs, tbl->max_slots);
|
2009-04-01 21:22:37 +08:00
|
|
|
|
2010-01-15 06:45:10 +08:00
|
|
|
/* Does the newly negotiated max_reqs match the existing slot table? */
|
|
|
|
if (max_reqs != tbl->max_slots) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
new = kmalloc(max_reqs * sizeof(struct nfs4_slot),
|
2010-05-14 00:51:01 +08:00
|
|
|
GFP_NOFS);
|
2010-01-15 06:45:10 +08:00
|
|
|
if (!new)
|
|
|
|
goto out;
|
|
|
|
ret = 0;
|
|
|
|
kfree(tbl->slots);
|
2009-04-01 21:22:37 +08:00
|
|
|
}
|
|
|
|
spin_lock(&tbl->slot_tbl_lock);
|
2010-01-15 06:45:10 +08:00
|
|
|
if (new) {
|
|
|
|
tbl->slots = new;
|
|
|
|
tbl->max_slots = max_reqs;
|
|
|
|
}
|
|
|
|
for (i = 0; i < tbl->max_slots; ++i)
|
2009-04-01 21:23:31 +08:00
|
|
|
tbl->slots[i].seq_nr = ivalue;
|
2009-04-01 21:22:37 +08:00
|
|
|
spin_unlock(&tbl->slot_tbl_lock);
|
|
|
|
dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__,
|
|
|
|
tbl, tbl->slots, tbl->max_slots);
|
|
|
|
out:
|
|
|
|
dprintk("<-- %s: return %d\n", __func__, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-04-01 21:23:31 +08:00
|
|
|
/*
|
|
|
|
* Reset the forechannel and backchannel slot tables
|
|
|
|
*/
|
|
|
|
static int nfs4_reset_slot_tables(struct nfs4_session *session)
|
|
|
|
{
|
|
|
|
int status;
|
|
|
|
|
|
|
|
status = nfs4_reset_slot_table(&session->fc_slot_table,
|
2010-01-15 06:45:10 +08:00
|
|
|
session->fc_attrs.max_reqs, 1);
|
2009-04-01 21:23:33 +08:00
|
|
|
if (status)
|
|
|
|
return status;
|
|
|
|
|
|
|
|
status = nfs4_reset_slot_table(&session->bc_slot_table,
|
2010-01-15 06:45:10 +08:00
|
|
|
session->bc_attrs.max_reqs, 0);
|
2009-04-01 21:23:31 +08:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2009-04-01 21:23:33 +08:00
|
|
|
/* Destroy the slot table */
|
|
|
|
static void nfs4_destroy_slot_tables(struct nfs4_session *session)
|
|
|
|
{
|
|
|
|
if (session->fc_slot_table.slots != NULL) {
|
|
|
|
kfree(session->fc_slot_table.slots);
|
|
|
|
session->fc_slot_table.slots = NULL;
|
|
|
|
}
|
|
|
|
if (session->bc_slot_table.slots != NULL) {
|
|
|
|
kfree(session->bc_slot_table.slots);
|
|
|
|
session->bc_slot_table.slots = NULL;
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-04-01 21:22:31 +08:00
|
|
|
/*
|
|
|
|
* Initialize slot table
|
|
|
|
*/
|
2009-04-01 21:23:32 +08:00
|
|
|
static int nfs4_init_slot_table(struct nfs4_slot_table *tbl,
|
|
|
|
int max_slots, int ivalue)
|
2009-04-01 21:22:31 +08:00
|
|
|
{
|
|
|
|
struct nfs4_slot *slot;
|
|
|
|
int ret = -ENOMEM;
|
|
|
|
|
|
|
|
BUG_ON(max_slots > NFS4_MAX_SLOT_TABLE);
|
|
|
|
|
2009-04-01 21:23:32 +08:00
|
|
|
dprintk("--> %s: max_reqs=%u\n", __func__, max_slots);
|
2009-04-01 21:22:31 +08:00
|
|
|
|
2010-05-14 00:51:01 +08:00
|
|
|
slot = kcalloc(max_slots, sizeof(struct nfs4_slot), GFP_NOFS);
|
2009-04-01 21:22:31 +08:00
|
|
|
if (!slot)
|
|
|
|
goto out;
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
spin_lock(&tbl->slot_tbl_lock);
|
|
|
|
tbl->max_slots = max_slots;
|
|
|
|
tbl->slots = slot;
|
|
|
|
tbl->highest_used_slotid = -1; /* no slot is currently used */
|
|
|
|
spin_unlock(&tbl->slot_tbl_lock);
|
|
|
|
dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__,
|
|
|
|
tbl, tbl->slots, tbl->max_slots);
|
|
|
|
out:
|
|
|
|
dprintk("<-- %s: return %d\n", __func__, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-04-01 21:23:32 +08:00
|
|
|
/*
|
|
|
|
* Initialize the forechannel and backchannel tables
|
|
|
|
*/
|
|
|
|
static int nfs4_init_slot_tables(struct nfs4_session *session)
|
|
|
|
{
|
2009-12-06 08:32:11 +08:00
|
|
|
struct nfs4_slot_table *tbl;
|
|
|
|
int status = 0;
|
2009-04-01 21:23:32 +08:00
|
|
|
|
2009-12-06 08:32:11 +08:00
|
|
|
tbl = &session->fc_slot_table;
|
|
|
|
if (tbl->slots == NULL) {
|
|
|
|
status = nfs4_init_slot_table(tbl,
|
|
|
|
session->fc_attrs.max_reqs, 1);
|
|
|
|
if (status)
|
|
|
|
return status;
|
|
|
|
}
|
2009-04-01 21:23:32 +08:00
|
|
|
|
2009-12-06 08:32:11 +08:00
|
|
|
tbl = &session->bc_slot_table;
|
|
|
|
if (tbl->slots == NULL) {
|
|
|
|
status = nfs4_init_slot_table(tbl,
|
|
|
|
session->bc_attrs.max_reqs, 0);
|
|
|
|
if (status)
|
|
|
|
nfs4_destroy_slot_tables(session);
|
|
|
|
}
|
2009-04-01 21:23:33 +08:00
|
|
|
|
|
|
|
return status;
|
2009-04-01 21:21:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
|
|
|
|
{
|
|
|
|
struct nfs4_session *session;
|
|
|
|
struct nfs4_slot_table *tbl;
|
|
|
|
|
2010-05-14 00:51:01 +08:00
|
|
|
session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
|
2009-04-01 21:21:53 +08:00
|
|
|
if (!session)
|
|
|
|
return NULL;
|
2009-04-01 21:22:38 +08:00
|
|
|
|
2009-04-01 21:21:53 +08:00
|
|
|
tbl = &session->fc_slot_table;
|
2009-12-07 01:57:34 +08:00
|
|
|
tbl->highest_used_slotid = -1;
|
2009-04-01 21:21:53 +08:00
|
|
|
spin_lock_init(&tbl->slot_tbl_lock);
|
2009-12-15 13:27:56 +08:00
|
|
|
rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table");
|
2011-01-06 10:04:34 +08:00
|
|
|
init_completion(&tbl->complete);
|
2009-04-01 21:23:33 +08:00
|
|
|
|
|
|
|
tbl = &session->bc_slot_table;
|
2009-12-07 01:57:34 +08:00
|
|
|
tbl->highest_used_slotid = -1;
|
2009-04-01 21:23:33 +08:00
|
|
|
spin_lock_init(&tbl->slot_tbl_lock);
|
|
|
|
rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table");
|
2011-01-06 10:04:34 +08:00
|
|
|
init_completion(&tbl->complete);
|
2009-04-01 21:23:33 +08:00
|
|
|
|
2010-06-16 21:52:27 +08:00
|
|
|
session->session_state = 1<<NFS4_SESSION_INITING;
|
|
|
|
|
2009-04-01 21:21:53 +08:00
|
|
|
session->clp = clp;
|
|
|
|
return session;
|
|
|
|
}
|
|
|
|
|
|
|
|
void nfs4_destroy_session(struct nfs4_session *session)
|
|
|
|
{
|
2009-04-01 21:23:18 +08:00
|
|
|
nfs4_proc_destroy_session(session);
|
|
|
|
dprintk("%s Destroy backchannel for xprt %p\n",
|
|
|
|
__func__, session->clp->cl_rpcclient->cl_xprt);
|
|
|
|
xprt_destroy_backchannel(session->clp->cl_rpcclient->cl_xprt,
|
|
|
|
NFS41_BC_MIN_CALLBACKS);
|
2009-04-01 21:23:33 +08:00
|
|
|
nfs4_destroy_slot_tables(session);
|
2009-04-01 21:21:53 +08:00
|
|
|
kfree(session);
|
|
|
|
}
|
|
|
|
|
2009-04-01 21:22:31 +08:00
|
|
|
/*
|
|
|
|
* Initialize the values to be used by the client in CREATE_SESSION
|
|
|
|
* If nfs4_init_session set the fore channel request and response sizes,
|
|
|
|
* use them.
|
|
|
|
*
|
|
|
|
* Set the back channel max_resp_sz_cached to zero to force the client to
|
|
|
|
* always set csa_cachethis to FALSE because the current implementation
|
|
|
|
* of the back channel DRC only supports caching the CB_SEQUENCE operation.
|
|
|
|
*/
|
|
|
|
static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
|
|
|
|
{
|
|
|
|
struct nfs4_session *session = args->client->cl_session;
|
|
|
|
unsigned int mxrqst_sz = session->fc_attrs.max_rqst_sz,
|
|
|
|
mxresp_sz = session->fc_attrs.max_resp_sz;
|
|
|
|
|
|
|
|
if (mxrqst_sz == 0)
|
|
|
|
mxrqst_sz = NFS_MAX_FILE_IO_SIZE;
|
|
|
|
if (mxresp_sz == 0)
|
|
|
|
mxresp_sz = NFS_MAX_FILE_IO_SIZE;
|
|
|
|
/* Fore channel attributes */
|
|
|
|
args->fc_attrs.headerpadsz = 0;
|
|
|
|
args->fc_attrs.max_rqst_sz = mxrqst_sz;
|
|
|
|
args->fc_attrs.max_resp_sz = mxresp_sz;
|
|
|
|
args->fc_attrs.max_ops = NFS4_MAX_OPS;
|
|
|
|
args->fc_attrs.max_reqs = session->clp->cl_rpcclient->cl_xprt->max_reqs;
|
|
|
|
|
|
|
|
dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
|
2009-12-18 01:06:26 +08:00
|
|
|
"max_ops=%u max_reqs=%u\n",
|
2009-04-01 21:22:31 +08:00
|
|
|
__func__,
|
|
|
|
args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
|
2009-12-18 01:06:26 +08:00
|
|
|
args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
|
2009-04-01 21:22:31 +08:00
|
|
|
|
|
|
|
/* Back channel attributes */
|
|
|
|
args->bc_attrs.headerpadsz = 0;
|
|
|
|
args->bc_attrs.max_rqst_sz = PAGE_SIZE;
|
|
|
|
args->bc_attrs.max_resp_sz = PAGE_SIZE;
|
|
|
|
args->bc_attrs.max_resp_sz_cached = 0;
|
|
|
|
args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
|
|
|
|
args->bc_attrs.max_reqs = 1;
|
|
|
|
|
|
|
|
dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
|
|
|
|
"max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
|
|
|
|
__func__,
|
|
|
|
args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
|
|
|
|
args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
|
|
|
|
args->bc_attrs.max_reqs);
|
|
|
|
}
|
|
|
|
|
2010-10-03 03:19:01 +08:00
|
|
|
static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
|
2009-04-01 21:22:32 +08:00
|
|
|
{
|
2010-10-03 03:19:01 +08:00
|
|
|
struct nfs4_channel_attrs *sent = &args->fc_attrs;
|
|
|
|
struct nfs4_channel_attrs *rcvd = &session->fc_attrs;
|
|
|
|
|
|
|
|
if (rcvd->headerpadsz > sent->headerpadsz)
|
|
|
|
return -EINVAL;
|
|
|
|
if (rcvd->max_resp_sz > sent->max_resp_sz)
|
|
|
|
return -EINVAL;
|
|
|
|
/*
|
|
|
|
* Our requested max_ops is the minimum we need; we're not
|
|
|
|
* prepared to break up compounds into smaller pieces than that.
|
|
|
|
* So, no point even trying to continue if the server won't
|
|
|
|
* cooperate:
|
|
|
|
*/
|
|
|
|
if (rcvd->max_ops < sent->max_ops)
|
|
|
|
return -EINVAL;
|
|
|
|
if (rcvd->max_reqs == 0)
|
|
|
|
return -EINVAL;
|
|
|
|
return 0;
|
2009-04-01 21:22:32 +08:00
|
|
|
}
|
|
|
|
|
2010-10-03 03:19:01 +08:00
|
|
|
static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
|
|
|
|
{
|
|
|
|
struct nfs4_channel_attrs *sent = &args->bc_attrs;
|
|
|
|
struct nfs4_channel_attrs *rcvd = &session->bc_attrs;
|
2009-04-01 21:22:32 +08:00
|
|
|
|
2010-10-03 03:19:01 +08:00
|
|
|
if (rcvd->max_rqst_sz > sent->max_rqst_sz)
|
|
|
|
return -EINVAL;
|
|
|
|
if (rcvd->max_resp_sz < sent->max_resp_sz)
|
|
|
|
return -EINVAL;
|
|
|
|
if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
|
|
|
|
return -EINVAL;
|
|
|
|
/* These would render the backchannel useless: */
|
|
|
|
if (rcvd->max_ops == 0)
|
|
|
|
return -EINVAL;
|
|
|
|
if (rcvd->max_reqs == 0)
|
|
|
|
return -EINVAL;
|
|
|
|
return 0;
|
|
|
|
}
|
2009-04-01 21:22:32 +08:00
|
|
|
|
|
|
|
static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
|
|
|
|
struct nfs4_session *session)
|
|
|
|
{
|
2010-10-03 03:19:01 +08:00
|
|
|
int ret;
|
2009-04-01 21:22:32 +08:00
|
|
|
|
2010-10-03 03:19:01 +08:00
|
|
|
ret = nfs4_verify_fore_channel_attrs(args, session);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
return nfs4_verify_back_channel_attrs(args, session);
|
2009-04-01 21:22:32 +08:00
|
|
|
}
|
|
|
|
|
2009-04-01 21:22:31 +08:00
|
|
|
static int _nfs4_proc_create_session(struct nfs_client *clp)
|
|
|
|
{
|
|
|
|
struct nfs4_session *session = clp->cl_session;
|
|
|
|
struct nfs41_create_session_args args = {
|
|
|
|
.client = clp,
|
|
|
|
.cb_program = NFS4_CALLBACK,
|
|
|
|
};
|
|
|
|
struct nfs41_create_session_res res = {
|
|
|
|
.client = clp,
|
|
|
|
};
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
|
|
|
|
.rpc_argp = &args,
|
|
|
|
.rpc_resp = &res,
|
|
|
|
};
|
|
|
|
int status;
|
|
|
|
|
|
|
|
nfs4_init_channel_attrs(&args);
|
2009-04-01 21:23:16 +08:00
|
|
|
args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
|
2009-04-01 21:22:31 +08:00
|
|
|
|
|
|
|
status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 0);
|
|
|
|
|
2009-04-01 21:22:32 +08:00
|
|
|
if (!status)
|
|
|
|
/* Verify the session's negotiated channel_attrs values */
|
|
|
|
status = nfs4_verify_channel_attrs(&args, session);
|
2009-04-01 21:22:31 +08:00
|
|
|
if (!status) {
|
|
|
|
/* Increment the clientid slot sequence id */
|
|
|
|
clp->cl_seqid++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Issues a CREATE_SESSION operation to the server.
|
|
|
|
* It is the responsibility of the caller to verify the session is
|
|
|
|
* expired before calling this routine.
|
|
|
|
*/
|
2009-12-06 08:32:11 +08:00
|
|
|
int nfs4_proc_create_session(struct nfs_client *clp)
|
2009-04-01 21:22:31 +08:00
|
|
|
{
|
|
|
|
int status;
|
|
|
|
unsigned *ptr;
|
|
|
|
struct nfs4_session *session = clp->cl_session;
|
2011-03-10 02:13:44 +08:00
|
|
|
long timeout = 0;
|
|
|
|
int err;
|
2009-04-01 21:22:31 +08:00
|
|
|
|
|
|
|
dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
|
|
|
|
|
2011-03-10 02:13:44 +08:00
|
|
|
do {
|
|
|
|
status = _nfs4_proc_create_session(clp);
|
|
|
|
if (status == -NFS4ERR_DELAY) {
|
|
|
|
err = nfs4_delay(clp->cl_rpcclient, &timeout);
|
|
|
|
if (err)
|
|
|
|
status = err;
|
|
|
|
}
|
|
|
|
} while (status == -NFS4ERR_DELAY);
|
|
|
|
|
2009-04-01 21:22:31 +08:00
|
|
|
if (status)
|
|
|
|
goto out;
|
|
|
|
|
2009-12-06 08:32:11 +08:00
|
|
|
/* Init and reset the fore channel */
|
|
|
|
status = nfs4_init_slot_tables(session);
|
|
|
|
dprintk("slot table initialization returned %d\n", status);
|
|
|
|
if (status)
|
|
|
|
goto out;
|
|
|
|
status = nfs4_reset_slot_tables(session);
|
|
|
|
dprintk("slot table reset returned %d\n", status);
|
2009-04-01 21:22:31 +08:00
|
|
|
if (status)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ptr = (unsigned *)&session->sess_id.data[0];
|
|
|
|
dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
|
|
|
|
clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
|
|
|
|
out:
|
|
|
|
dprintk("<-- %s\n", __func__);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2009-04-01 21:22:34 +08:00
|
|
|
/*
|
|
|
|
* Issue the over-the-wire RPC DESTROY_SESSION.
|
|
|
|
* The caller must serialize access to this routine.
|
|
|
|
*/
|
|
|
|
int nfs4_proc_destroy_session(struct nfs4_session *session)
|
|
|
|
{
|
|
|
|
int status = 0;
|
|
|
|
struct rpc_message msg;
|
|
|
|
|
|
|
|
dprintk("--> nfs4_proc_destroy_session\n");
|
|
|
|
|
|
|
|
/* session is still being setup */
|
|
|
|
if (session->clp->cl_cons_state != NFS_CS_READY)
|
|
|
|
return status;
|
|
|
|
|
|
|
|
msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION];
|
|
|
|
msg.rpc_argp = session;
|
|
|
|
msg.rpc_resp = NULL;
|
|
|
|
msg.rpc_cred = NULL;
|
|
|
|
status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 0);
|
|
|
|
|
|
|
|
if (status)
|
|
|
|
printk(KERN_WARNING
|
|
|
|
"Got error %d from the server on DESTROY_SESSION. "
|
|
|
|
"Session has been destroyed regardless...\n", status);
|
|
|
|
|
|
|
|
dprintk("<-- nfs4_proc_destroy_session\n");
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2009-07-22 04:48:07 +08:00
|
|
|
int nfs4_init_session(struct nfs_server *server)
|
|
|
|
{
|
|
|
|
struct nfs_client *clp = server->nfs_client;
|
2009-12-06 02:36:55 +08:00
|
|
|
struct nfs4_session *session;
|
2009-12-16 01:55:02 +08:00
|
|
|
unsigned int rsize, wsize;
|
2009-07-22 04:48:07 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!nfs4_has_session(clp))
|
|
|
|
return 0;
|
|
|
|
|
2010-06-16 21:52:27 +08:00
|
|
|
session = clp->cl_session;
|
|
|
|
if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state))
|
|
|
|
return 0;
|
|
|
|
|
2009-12-16 01:55:02 +08:00
|
|
|
rsize = server->rsize;
|
|
|
|
if (rsize == 0)
|
|
|
|
rsize = NFS_MAX_FILE_IO_SIZE;
|
|
|
|
wsize = server->wsize;
|
|
|
|
if (wsize == 0)
|
|
|
|
wsize = NFS_MAX_FILE_IO_SIZE;
|
|
|
|
|
|
|
|
session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead;
|
|
|
|
session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead;
|
2009-12-06 02:36:55 +08:00
|
|
|
|
2009-07-22 04:48:07 +08:00
|
|
|
ret = nfs4_recover_expired_lease(server);
|
|
|
|
if (!ret)
|
|
|
|
ret = nfs4_check_client_ready(clp);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-03-01 09:34:17 +08:00
|
|
|
int nfs4_init_ds_session(struct nfs_client *clp)
|
|
|
|
{
|
|
|
|
struct nfs4_session *session = clp->cl_session;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = nfs4_client_recover_expired_lease(clp);
|
|
|
|
if (!ret)
|
|
|
|
/* Test for the DS role */
|
|
|
|
if (!is_ds_client(clp))
|
|
|
|
ret = -ENODEV;
|
|
|
|
if (!ret)
|
|
|
|
ret = nfs4_check_client_ready(clp);
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nfs4_init_ds_session);
|
|
|
|
|
|
|
|
|
2009-04-01 21:22:36 +08:00
|
|
|
/*
|
|
|
|
* Renew the cl_session lease.
|
|
|
|
*/
|
2010-06-16 21:52:25 +08:00
|
|
|
struct nfs4_sequence_data {
|
|
|
|
struct nfs_client *clp;
|
|
|
|
struct nfs4_sequence_args args;
|
|
|
|
struct nfs4_sequence_res res;
|
|
|
|
};
|
|
|
|
|
2010-02-05 19:45:04 +08:00
|
|
|
static void nfs41_sequence_release(void *data)
|
|
|
|
{
|
2010-06-16 21:52:25 +08:00
|
|
|
struct nfs4_sequence_data *calldata = data;
|
|
|
|
struct nfs_client *clp = calldata->clp;
|
2010-02-05 19:45:04 +08:00
|
|
|
|
2010-02-05 19:45:05 +08:00
|
|
|
if (atomic_read(&clp->cl_count) > 1)
|
|
|
|
nfs4_schedule_state_renewal(clp);
|
|
|
|
nfs_put_client(clp);
|
2010-06-16 21:52:25 +08:00
|
|
|
kfree(calldata);
|
2010-02-05 19:45:04 +08:00
|
|
|
}
|
|
|
|
|
2010-06-16 21:52:25 +08:00
|
|
|
static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
|
|
|
|
{
|
|
|
|
switch(task->tk_status) {
|
|
|
|
case -NFS4ERR_DELAY:
|
|
|
|
rpc_delay(task, NFS4_POLL_RETRY_MAX);
|
|
|
|
return -EAGAIN;
|
|
|
|
default:
|
2011-03-10 05:00:53 +08:00
|
|
|
nfs4_schedule_lease_recovery(clp);
|
2010-06-16 21:52:25 +08:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-02-05 19:45:04 +08:00
|
|
|
static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
|
2009-04-01 21:22:36 +08:00
|
|
|
{
|
2010-06-16 21:52:25 +08:00
|
|
|
struct nfs4_sequence_data *calldata = data;
|
|
|
|
struct nfs_client *clp = calldata->clp;
|
2009-04-01 21:22:36 +08:00
|
|
|
|
2010-08-01 02:29:06 +08:00
|
|
|
if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
|
|
|
|
return;
|
2009-04-01 21:22:36 +08:00
|
|
|
|
|
|
|
if (task->tk_status < 0) {
|
|
|
|
dprintk("%s ERROR %d\n", __func__, task->tk_status);
|
2010-02-05 19:45:05 +08:00
|
|
|
if (atomic_read(&clp->cl_count) == 1)
|
|
|
|
goto out;
|
2009-04-01 21:22:36 +08:00
|
|
|
|
2010-06-16 21:52:25 +08:00
|
|
|
if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
|
|
|
|
rpc_restart_call_prepare(task);
|
2009-04-01 21:22:36 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
|
2010-02-05 19:45:05 +08:00
|
|
|
out:
|
2009-04-01 21:22:36 +08:00
|
|
|
dprintk("<-- %s\n", __func__);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
|
|
|
|
{
|
2010-06-16 21:52:25 +08:00
|
|
|
struct nfs4_sequence_data *calldata = data;
|
|
|
|
struct nfs_client *clp = calldata->clp;
|
2009-04-01 21:22:36 +08:00
|
|
|
struct nfs4_sequence_args *args;
|
|
|
|
struct nfs4_sequence_res *res;
|
|
|
|
|
|
|
|
args = task->tk_msg.rpc_argp;
|
|
|
|
res = task->tk_msg.rpc_resp;
|
|
|
|
|
2010-06-16 21:52:26 +08:00
|
|
|
if (nfs41_setup_sequence(clp->cl_session, args, res, 0, task))
|
2009-04-01 21:22:36 +08:00
|
|
|
return;
|
|
|
|
rpc_call_start(task);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct rpc_call_ops nfs41_sequence_ops = {
|
|
|
|
.rpc_call_done = nfs41_sequence_call_done,
|
|
|
|
.rpc_call_prepare = nfs41_sequence_prepare,
|
2010-02-05 19:45:04 +08:00
|
|
|
.rpc_release = nfs41_sequence_release,
|
2009-04-01 21:22:36 +08:00
|
|
|
};
|
|
|
|
|
2010-06-16 21:52:26 +08:00
|
|
|
static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
|
2009-04-01 21:22:36 +08:00
|
|
|
{
|
2010-06-16 21:52:25 +08:00
|
|
|
struct nfs4_sequence_data *calldata;
|
2009-04-01 21:22:36 +08:00
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
|
|
|
|
.rpc_cred = cred,
|
|
|
|
};
|
2010-06-16 21:52:26 +08:00
|
|
|
struct rpc_task_setup task_setup_data = {
|
|
|
|
.rpc_client = clp->cl_rpcclient,
|
|
|
|
.rpc_message = &msg,
|
|
|
|
.callback_ops = &nfs41_sequence_ops,
|
|
|
|
.flags = RPC_TASK_ASYNC | RPC_TASK_SOFT,
|
|
|
|
};
|
2009-04-01 21:22:36 +08:00
|
|
|
|
2010-02-05 19:45:05 +08:00
|
|
|
if (!atomic_inc_not_zero(&clp->cl_count))
|
2010-06-16 21:52:26 +08:00
|
|
|
return ERR_PTR(-EIO);
|
2010-09-24 21:17:01 +08:00
|
|
|
calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
|
2010-06-16 21:52:25 +08:00
|
|
|
if (calldata == NULL) {
|
2010-02-05 19:45:05 +08:00
|
|
|
nfs_put_client(clp);
|
2010-06-16 21:52:26 +08:00
|
|
|
return ERR_PTR(-ENOMEM);
|
2009-04-01 21:22:36 +08:00
|
|
|
}
|
2010-06-16 21:52:25 +08:00
|
|
|
msg.rpc_argp = &calldata->args;
|
|
|
|
msg.rpc_resp = &calldata->res;
|
|
|
|
calldata->clp = clp;
|
2010-06-16 21:52:26 +08:00
|
|
|
task_setup_data.callback_data = calldata;
|
2009-04-01 21:22:36 +08:00
|
|
|
|
2010-06-16 21:52:26 +08:00
|
|
|
return rpc_run_task(&task_setup_data);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred)
|
|
|
|
{
|
|
|
|
struct rpc_task *task;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
task = _nfs41_proc_sequence(clp, cred);
|
|
|
|
if (IS_ERR(task))
|
|
|
|
ret = PTR_ERR(task);
|
|
|
|
else
|
2011-02-22 03:05:41 +08:00
|
|
|
rpc_put_task_async(task);
|
2010-06-16 21:52:26 +08:00
|
|
|
dprintk("<-- %s status=%d\n", __func__, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
|
|
|
|
{
|
|
|
|
struct rpc_task *task;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
task = _nfs41_proc_sequence(clp, cred);
|
|
|
|
if (IS_ERR(task)) {
|
|
|
|
ret = PTR_ERR(task);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
ret = rpc_wait_for_completion_task(task);
|
2011-03-10 05:00:55 +08:00
|
|
|
if (!ret) {
|
|
|
|
struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
|
|
|
|
|
|
|
|
if (task->tk_status == 0)
|
|
|
|
nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
|
2010-06-16 21:52:26 +08:00
|
|
|
ret = task->tk_status;
|
2011-03-10 05:00:55 +08:00
|
|
|
}
|
2010-06-16 21:52:26 +08:00
|
|
|
rpc_put_task(task);
|
|
|
|
out:
|
|
|
|
dprintk("<-- %s status=%d\n", __func__, ret);
|
|
|
|
return ret;
|
2009-04-01 21:22:36 +08:00
|
|
|
}
|
|
|
|
|
2009-12-06 05:08:41 +08:00
|
|
|
struct nfs4_reclaim_complete_data {
|
|
|
|
struct nfs_client *clp;
|
|
|
|
struct nfs41_reclaim_complete_args arg;
|
|
|
|
struct nfs41_reclaim_complete_res res;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
|
|
|
|
{
|
|
|
|
struct nfs4_reclaim_complete_data *calldata = data;
|
|
|
|
|
2009-12-15 13:27:57 +08:00
|
|
|
rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
|
2010-06-16 21:52:26 +08:00
|
|
|
if (nfs41_setup_sequence(calldata->clp->cl_session,
|
|
|
|
&calldata->arg.seq_args,
|
2009-12-06 05:08:41 +08:00
|
|
|
&calldata->res.seq_res, 0, task))
|
|
|
|
return;
|
|
|
|
|
|
|
|
rpc_call_start(task);
|
|
|
|
}
|
|
|
|
|
2010-06-16 21:52:25 +08:00
|
|
|
static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
|
|
|
|
{
|
|
|
|
switch(task->tk_status) {
|
|
|
|
case 0:
|
|
|
|
case -NFS4ERR_COMPLETE_ALREADY:
|
|
|
|
case -NFS4ERR_WRONG_CRED: /* What to do here? */
|
|
|
|
break;
|
|
|
|
case -NFS4ERR_DELAY:
|
|
|
|
rpc_delay(task, NFS4_POLL_RETRY_MAX);
|
|
|
|
return -EAGAIN;
|
|
|
|
default:
|
2011-03-10 05:00:53 +08:00
|
|
|
nfs4_schedule_lease_recovery(clp);
|
2010-06-16 21:52:25 +08:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-12-06 05:08:41 +08:00
|
|
|
static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
|
|
|
|
{
|
|
|
|
struct nfs4_reclaim_complete_data *calldata = data;
|
|
|
|
struct nfs_client *clp = calldata->clp;
|
|
|
|
struct nfs4_sequence_res *res = &calldata->res.seq_res;
|
|
|
|
|
|
|
|
dprintk("--> %s\n", __func__);
|
2010-08-01 02:29:06 +08:00
|
|
|
if (!nfs41_sequence_done(task, res))
|
|
|
|
return;
|
2009-12-06 05:08:41 +08:00
|
|
|
|
2010-06-16 21:52:25 +08:00
|
|
|
if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
|
|
|
|
rpc_restart_call_prepare(task);
|
|
|
|
return;
|
|
|
|
}
|
2009-12-06 05:08:41 +08:00
|
|
|
dprintk("<-- %s\n", __func__);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs4_free_reclaim_complete_data(void *data)
|
|
|
|
{
|
|
|
|
struct nfs4_reclaim_complete_data *calldata = data;
|
|
|
|
|
|
|
|
kfree(calldata);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
|
|
|
|
.rpc_call_prepare = nfs4_reclaim_complete_prepare,
|
|
|
|
.rpc_call_done = nfs4_reclaim_complete_done,
|
|
|
|
.rpc_release = nfs4_free_reclaim_complete_data,
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Issue a global reclaim complete.
|
|
|
|
*/
|
|
|
|
static int nfs41_proc_reclaim_complete(struct nfs_client *clp)
|
|
|
|
{
|
|
|
|
struct nfs4_reclaim_complete_data *calldata;
|
|
|
|
struct rpc_task *task;
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
|
|
|
|
};
|
|
|
|
struct rpc_task_setup task_setup_data = {
|
|
|
|
.rpc_client = clp->cl_rpcclient,
|
|
|
|
.rpc_message = &msg,
|
|
|
|
.callback_ops = &nfs4_reclaim_complete_call_ops,
|
|
|
|
.flags = RPC_TASK_ASYNC,
|
|
|
|
};
|
|
|
|
int status = -ENOMEM;
|
|
|
|
|
|
|
|
dprintk("--> %s\n", __func__);
|
2010-05-14 00:51:01 +08:00
|
|
|
calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
|
2009-12-06 05:08:41 +08:00
|
|
|
if (calldata == NULL)
|
|
|
|
goto out;
|
|
|
|
calldata->clp = clp;
|
|
|
|
calldata->arg.one_fs = 0;
|
|
|
|
|
|
|
|
msg.rpc_argp = &calldata->arg;
|
|
|
|
msg.rpc_resp = &calldata->res;
|
|
|
|
task_setup_data.callback_data = calldata;
|
|
|
|
task = rpc_run_task(&task_setup_data);
|
2010-04-22 17:28:39 +08:00
|
|
|
if (IS_ERR(task)) {
|
2009-12-06 05:08:41 +08:00
|
|
|
status = PTR_ERR(task);
|
2010-04-22 17:28:39 +08:00
|
|
|
goto out;
|
|
|
|
}
|
2011-03-10 02:13:46 +08:00
|
|
|
status = nfs4_wait_for_completion_rpc_task(task);
|
|
|
|
if (status == 0)
|
|
|
|
status = task->tk_status;
|
2009-12-06 05:08:41 +08:00
|
|
|
rpc_put_task(task);
|
2010-04-22 17:28:39 +08:00
|
|
|
return 0;
|
2009-12-06 05:08:41 +08:00
|
|
|
out:
|
|
|
|
dprintk("<-- %s status=%d\n", __func__, status);
|
|
|
|
return status;
|
|
|
|
}
|
2010-10-20 12:18:03 +08:00
|
|
|
|
|
|
|
static void
|
|
|
|
nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_layoutget *lgp = calldata;
|
2011-01-06 19:36:24 +08:00
|
|
|
struct nfs_server *server = NFS_SERVER(lgp->args.inode);
|
2010-10-20 12:18:03 +08:00
|
|
|
|
|
|
|
dprintk("--> %s\n", __func__);
|
2011-01-06 19:36:24 +08:00
|
|
|
/* Note the is a race here, where a CB_LAYOUTRECALL can come in
|
|
|
|
* right now covering the LAYOUTGET we are about to send.
|
|
|
|
* However, that is not so catastrophic, and there seems
|
|
|
|
* to be no way to prevent it completely.
|
|
|
|
*/
|
2010-10-20 12:18:03 +08:00
|
|
|
if (nfs4_setup_sequence(server, &lgp->args.seq_args,
|
|
|
|
&lgp->res.seq_res, 0, task))
|
|
|
|
return;
|
2011-01-06 19:36:25 +08:00
|
|
|
if (pnfs_choose_layoutget_stateid(&lgp->args.stateid,
|
|
|
|
NFS_I(lgp->args.inode)->layout,
|
|
|
|
lgp->args.ctx->state)) {
|
|
|
|
rpc_exit(task, NFS4_OK);
|
|
|
|
return;
|
|
|
|
}
|
2010-10-20 12:18:03 +08:00
|
|
|
rpc_call_start(task);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_layoutget *lgp = calldata;
|
|
|
|
struct nfs_server *server = NFS_SERVER(lgp->args.inode);
|
|
|
|
|
|
|
|
dprintk("--> %s\n", __func__);
|
|
|
|
|
|
|
|
if (!nfs4_sequence_done(task, &lgp->res.seq_res))
|
|
|
|
return;
|
|
|
|
|
|
|
|
switch (task->tk_status) {
|
|
|
|
case 0:
|
|
|
|
break;
|
|
|
|
case -NFS4ERR_LAYOUTTRYLATER:
|
|
|
|
case -NFS4ERR_RECALLCONFLICT:
|
|
|
|
task->tk_status = -NFS4ERR_DELAY;
|
|
|
|
/* Fall through */
|
|
|
|
default:
|
|
|
|
if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
|
|
|
|
rpc_restart_call_prepare(task);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
dprintk("<-- %s\n", __func__);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs4_layoutget_release(void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_layoutget *lgp = calldata;
|
|
|
|
|
|
|
|
dprintk("--> %s\n", __func__);
|
|
|
|
put_nfs_open_context(lgp->args.ctx);
|
|
|
|
kfree(calldata);
|
|
|
|
dprintk("<-- %s\n", __func__);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct rpc_call_ops nfs4_layoutget_call_ops = {
|
|
|
|
.rpc_call_prepare = nfs4_layoutget_prepare,
|
|
|
|
.rpc_call_done = nfs4_layoutget_done,
|
|
|
|
.rpc_release = nfs4_layoutget_release,
|
|
|
|
};
|
|
|
|
|
|
|
|
int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
|
|
|
|
{
|
|
|
|
struct nfs_server *server = NFS_SERVER(lgp->args.inode);
|
|
|
|
struct rpc_task *task;
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
|
|
|
|
.rpc_argp = &lgp->args,
|
|
|
|
.rpc_resp = &lgp->res,
|
|
|
|
};
|
|
|
|
struct rpc_task_setup task_setup_data = {
|
|
|
|
.rpc_client = server->client,
|
|
|
|
.rpc_message = &msg,
|
|
|
|
.callback_ops = &nfs4_layoutget_call_ops,
|
|
|
|
.callback_data = lgp,
|
|
|
|
.flags = RPC_TASK_ASYNC,
|
|
|
|
};
|
|
|
|
int status = 0;
|
|
|
|
|
|
|
|
dprintk("--> %s\n", __func__);
|
|
|
|
|
2011-03-25 04:48:21 +08:00
|
|
|
lgp->res.layoutp = &lgp->args.layout;
|
2010-10-20 12:18:03 +08:00
|
|
|
lgp->res.seq_res.sr_slot = NULL;
|
|
|
|
task = rpc_run_task(&task_setup_data);
|
|
|
|
if (IS_ERR(task))
|
|
|
|
return PTR_ERR(task);
|
|
|
|
status = nfs4_wait_for_completion_rpc_task(task);
|
2011-01-06 19:36:24 +08:00
|
|
|
if (status == 0)
|
|
|
|
status = task->tk_status;
|
|
|
|
if (status == 0)
|
|
|
|
status = pnfs_layout_process(lgp);
|
2010-10-20 12:18:03 +08:00
|
|
|
rpc_put_task(task);
|
|
|
|
dprintk("<-- %s status=%d\n", __func__, status);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
_nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
|
|
|
|
{
|
|
|
|
struct nfs4_getdeviceinfo_args args = {
|
|
|
|
.pdev = pdev,
|
|
|
|
};
|
|
|
|
struct nfs4_getdeviceinfo_res res = {
|
|
|
|
.pdev = pdev,
|
|
|
|
};
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
|
|
|
|
.rpc_argp = &args,
|
|
|
|
.rpc_resp = &res,
|
|
|
|
};
|
|
|
|
int status;
|
|
|
|
|
|
|
|
dprintk("--> %s\n", __func__);
|
2011-03-25 01:12:24 +08:00
|
|
|
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
|
2010-10-20 12:18:03 +08:00
|
|
|
dprintk("<-- %s status=%d\n", __func__, status);
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
int nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
|
|
|
|
{
|
|
|
|
struct nfs4_exception exception = { };
|
|
|
|
int err;
|
|
|
|
|
|
|
|
do {
|
|
|
|
err = nfs4_handle_exception(server,
|
|
|
|
_nfs4_proc_getdeviceinfo(server, pdev),
|
|
|
|
&exception);
|
|
|
|
} while (exception.retry);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
|
|
|
|
|
2011-03-23 21:27:54 +08:00
|
|
|
static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_layoutcommit_data *data = calldata;
|
|
|
|
struct nfs_server *server = NFS_SERVER(data->args.inode);
|
|
|
|
|
|
|
|
if (nfs4_setup_sequence(server, &data->args.seq_args,
|
|
|
|
&data->res.seq_res, 1, task))
|
|
|
|
return;
|
|
|
|
rpc_call_start(task);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_layoutcommit_data *data = calldata;
|
|
|
|
struct nfs_server *server = NFS_SERVER(data->args.inode);
|
|
|
|
|
|
|
|
if (!nfs4_sequence_done(task, &data->res.seq_res))
|
|
|
|
return;
|
|
|
|
|
|
|
|
switch (task->tk_status) { /* Just ignore these failures */
|
|
|
|
case NFS4ERR_DELEG_REVOKED: /* layout was recalled */
|
|
|
|
case NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
|
|
|
|
case NFS4ERR_BADLAYOUT: /* no layout */
|
|
|
|
case NFS4ERR_GRACE: /* loca_recalim always false */
|
|
|
|
task->tk_status = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
|
|
|
|
nfs_restart_rpc(task, server->nfs_client);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (task->tk_status == 0)
|
|
|
|
nfs_post_op_update_inode_force_wcc(data->args.inode,
|
|
|
|
data->res.fattr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs4_layoutcommit_release(void *calldata)
|
|
|
|
{
|
|
|
|
struct nfs4_layoutcommit_data *data = calldata;
|
|
|
|
|
|
|
|
/* Matched by references in pnfs_set_layoutcommit */
|
|
|
|
put_lseg(data->lseg);
|
|
|
|
put_rpccred(data->cred);
|
|
|
|
kfree(data);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct rpc_call_ops nfs4_layoutcommit_ops = {
|
|
|
|
.rpc_call_prepare = nfs4_layoutcommit_prepare,
|
|
|
|
.rpc_call_done = nfs4_layoutcommit_done,
|
|
|
|
.rpc_release = nfs4_layoutcommit_release,
|
|
|
|
};
|
|
|
|
|
|
|
|
int
|
2011-03-12 15:58:10 +08:00
|
|
|
nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
|
2011-03-23 21:27:54 +08:00
|
|
|
{
|
|
|
|
struct rpc_message msg = {
|
|
|
|
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
|
|
|
|
.rpc_argp = &data->args,
|
|
|
|
.rpc_resp = &data->res,
|
|
|
|
.rpc_cred = data->cred,
|
|
|
|
};
|
|
|
|
struct rpc_task_setup task_setup_data = {
|
|
|
|
.task = &data->task,
|
|
|
|
.rpc_client = NFS_CLIENT(data->args.inode),
|
|
|
|
.rpc_message = &msg,
|
|
|
|
.callback_ops = &nfs4_layoutcommit_ops,
|
|
|
|
.callback_data = data,
|
|
|
|
.flags = RPC_TASK_ASYNC,
|
|
|
|
};
|
|
|
|
struct rpc_task *task;
|
|
|
|
int status = 0;
|
|
|
|
|
|
|
|
dprintk("NFS: %4d initiating layoutcommit call. sync %d "
|
|
|
|
"lbw: %llu inode %lu\n",
|
|
|
|
data->task.tk_pid, sync,
|
|
|
|
data->args.lastbytewritten,
|
|
|
|
data->args.inode->i_ino);
|
|
|
|
|
|
|
|
task = rpc_run_task(&task_setup_data);
|
|
|
|
if (IS_ERR(task))
|
|
|
|
return PTR_ERR(task);
|
2011-03-12 15:58:10 +08:00
|
|
|
if (sync == false)
|
2011-03-23 21:27:54 +08:00
|
|
|
goto out;
|
|
|
|
status = nfs4_wait_for_completion_rpc_task(task);
|
|
|
|
if (status != 0)
|
|
|
|
goto out;
|
|
|
|
status = task->tk_status;
|
|
|
|
out:
|
|
|
|
dprintk("%s: status %d\n", __func__, status);
|
|
|
|
rpc_put_task(task);
|
|
|
|
return status;
|
|
|
|
}
|
2009-04-01 21:21:53 +08:00
|
|
|
#endif /* CONFIG_NFS_V4_1 */
|
|
|
|
|
2009-04-01 21:22:47 +08:00
|
|
|
struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
|
2008-12-24 04:21:43 +08:00
|
|
|
.owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
|
2008-12-24 04:21:41 +08:00
|
|
|
.state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
|
2005-04-17 06:20:36 +08:00
|
|
|
.recover_open = nfs4_open_reclaim,
|
|
|
|
.recover_lock = nfs4_lock_reclaim,
|
2009-04-01 21:22:47 +08:00
|
|
|
.establish_clid = nfs4_init_clientid,
|
2009-04-01 21:22:48 +08:00
|
|
|
.get_clid_cred = nfs4_get_setclientid_cred,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2009-04-01 21:22:47 +08:00
|
|
|
#if defined(CONFIG_NFS_V4_1)
|
|
|
|
struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
|
|
|
|
.owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
|
|
|
|
.state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
|
|
|
|
.recover_open = nfs4_open_reclaim,
|
|
|
|
.recover_lock = nfs4_lock_reclaim,
|
2009-12-05 04:52:24 +08:00
|
|
|
.establish_clid = nfs41_init_clientid,
|
2009-04-01 21:22:49 +08:00
|
|
|
.get_clid_cred = nfs4_get_exchange_id_cred,
|
2009-12-06 05:08:41 +08:00
|
|
|
.reclaim_complete = nfs41_proc_reclaim_complete,
|
2009-04-01 21:22:47 +08:00
|
|
|
};
|
|
|
|
#endif /* CONFIG_NFS_V4_1 */
|
|
|
|
|
|
|
|
struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
|
|
|
|
.owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
|
|
|
|
.state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
|
|
|
|
.recover_open = nfs4_open_expired,
|
|
|
|
.recover_lock = nfs4_lock_expired,
|
|
|
|
.establish_clid = nfs4_init_clientid,
|
2009-04-01 21:22:48 +08:00
|
|
|
.get_clid_cred = nfs4_get_setclientid_cred,
|
2009-04-01 21:22:47 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
#if defined(CONFIG_NFS_V4_1)
|
|
|
|
struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
|
2008-12-24 04:21:43 +08:00
|
|
|
.owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
|
2008-12-24 04:21:41 +08:00
|
|
|
.state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
|
2005-04-17 06:20:36 +08:00
|
|
|
.recover_open = nfs4_open_expired,
|
|
|
|
.recover_lock = nfs4_lock_expired,
|
2009-12-05 04:52:24 +08:00
|
|
|
.establish_clid = nfs41_init_clientid,
|
2009-04-01 21:22:49 +08:00
|
|
|
.get_clid_cred = nfs4_get_exchange_id_cred,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
2009-04-01 21:22:47 +08:00
|
|
|
#endif /* CONFIG_NFS_V4_1 */
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-04-01 21:22:44 +08:00
|
|
|
struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
|
|
|
|
.sched_state_renewal = nfs4_proc_async_renew,
|
2009-04-01 21:22:46 +08:00
|
|
|
.get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
|
2009-04-01 21:22:45 +08:00
|
|
|
.renew_lease = nfs4_proc_renew,
|
2009-04-01 21:22:44 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
#if defined(CONFIG_NFS_V4_1)
|
|
|
|
struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
|
|
|
|
.sched_state_renewal = nfs41_proc_async_sequence,
|
2009-04-01 21:22:46 +08:00
|
|
|
.get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
|
2009-04-01 21:22:45 +08:00
|
|
|
.renew_lease = nfs4_proc_sequence,
|
2009-04-01 21:22:44 +08:00
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2010-06-16 21:52:26 +08:00
|
|
|
static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
|
|
|
|
.minor_version = 0,
|
|
|
|
.call_sync = _nfs4_call_sync,
|
2010-06-16 21:52:27 +08:00
|
|
|
.validate_stateid = nfs4_validate_delegation_stateid,
|
2010-06-16 21:52:27 +08:00
|
|
|
.reboot_recovery_ops = &nfs40_reboot_recovery_ops,
|
|
|
|
.nograce_recovery_ops = &nfs40_nograce_recovery_ops,
|
|
|
|
.state_renewal_ops = &nfs40_state_renewal_ops,
|
2010-06-16 21:52:26 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
#if defined(CONFIG_NFS_V4_1)
|
|
|
|
static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
|
|
|
|
.minor_version = 1,
|
|
|
|
.call_sync = _nfs4_call_sync_session,
|
2010-06-16 21:52:27 +08:00
|
|
|
.validate_stateid = nfs41_validate_delegation_stateid,
|
2010-06-16 21:52:27 +08:00
|
|
|
.reboot_recovery_ops = &nfs41_reboot_recovery_ops,
|
|
|
|
.nograce_recovery_ops = &nfs41_nograce_recovery_ops,
|
|
|
|
.state_renewal_ops = &nfs41_state_renewal_ops,
|
2010-06-16 21:52:26 +08:00
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
|
|
|
const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
|
|
|
|
[0] = &nfs_v4_0_minor_ops,
|
|
|
|
#if defined(CONFIG_NFS_V4_1)
|
|
|
|
[1] = &nfs_v4_1_minor_ops,
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
2007-02-12 16:55:39 +08:00
|
|
|
static const struct inode_operations nfs4_file_inode_operations = {
|
2005-06-23 01:16:22 +08:00
|
|
|
.permission = nfs_permission,
|
|
|
|
.getattr = nfs_getattr,
|
|
|
|
.setattr = nfs_setattr,
|
2010-12-09 19:35:25 +08:00
|
|
|
.getxattr = generic_getxattr,
|
|
|
|
.setxattr = generic_setxattr,
|
|
|
|
.listxattr = generic_listxattr,
|
|
|
|
.removexattr = generic_removexattr,
|
2005-06-23 01:16:22 +08:00
|
|
|
};
|
|
|
|
|
2006-08-23 08:06:11 +08:00
|
|
|
const struct nfs_rpc_ops nfs_v4_clientops = {
|
2005-04-17 06:20:36 +08:00
|
|
|
.version = 4, /* protocol version */
|
|
|
|
.dentry_ops = &nfs4_dentry_operations,
|
|
|
|
.dir_inode_ops = &nfs4_dir_inode_operations,
|
2005-06-23 01:16:22 +08:00
|
|
|
.file_inode_ops = &nfs4_file_inode_operations,
|
2005-04-17 06:20:36 +08:00
|
|
|
.getroot = nfs4_proc_get_root,
|
|
|
|
.getattr = nfs4_proc_getattr,
|
|
|
|
.setattr = nfs4_proc_setattr,
|
2006-08-23 08:06:09 +08:00
|
|
|
.lookupfh = nfs4_proc_lookupfh,
|
2005-04-17 06:20:36 +08:00
|
|
|
.lookup = nfs4_proc_lookup,
|
|
|
|
.access = nfs4_proc_access,
|
|
|
|
.readlink = nfs4_proc_readlink,
|
|
|
|
.create = nfs4_proc_create,
|
|
|
|
.remove = nfs4_proc_remove,
|
|
|
|
.unlink_setup = nfs4_proc_unlink_setup,
|
|
|
|
.unlink_done = nfs4_proc_unlink_done,
|
|
|
|
.rename = nfs4_proc_rename,
|
2010-09-18 05:31:57 +08:00
|
|
|
.rename_setup = nfs4_proc_rename_setup,
|
|
|
|
.rename_done = nfs4_proc_rename_done,
|
2005-04-17 06:20:36 +08:00
|
|
|
.link = nfs4_proc_link,
|
|
|
|
.symlink = nfs4_proc_symlink,
|
|
|
|
.mkdir = nfs4_proc_mkdir,
|
|
|
|
.rmdir = nfs4_proc_remove,
|
|
|
|
.readdir = nfs4_proc_readdir,
|
|
|
|
.mknod = nfs4_proc_mknod,
|
|
|
|
.statfs = nfs4_proc_statfs,
|
|
|
|
.fsinfo = nfs4_proc_fsinfo,
|
|
|
|
.pathconf = nfs4_proc_pathconf,
|
2006-08-23 08:06:10 +08:00
|
|
|
.set_capabilities = nfs4_server_capabilities,
|
2005-04-17 06:20:36 +08:00
|
|
|
.decode_dirent = nfs4_decode_dirent,
|
|
|
|
.read_setup = nfs4_proc_read_setup,
|
2006-03-21 02:44:27 +08:00
|
|
|
.read_done = nfs4_read_done,
|
2005-04-17 06:20:36 +08:00
|
|
|
.write_setup = nfs4_proc_write_setup,
|
2006-03-21 02:44:27 +08:00
|
|
|
.write_done = nfs4_write_done,
|
2005-04-17 06:20:36 +08:00
|
|
|
.commit_setup = nfs4_proc_commit_setup,
|
2006-03-21 02:44:27 +08:00
|
|
|
.commit_done = nfs4_commit_done,
|
2005-04-17 06:20:36 +08:00
|
|
|
.lock = nfs4_proc_lock,
|
2005-06-23 01:16:23 +08:00
|
|
|
.clear_acl_cache = nfs4_zap_acl_attr,
|
2009-03-20 03:35:50 +08:00
|
|
|
.close_context = nfs4_close_context,
|
2010-09-17 22:56:51 +08:00
|
|
|
.open_context = nfs4_atomic_open,
|
2011-03-01 09:34:08 +08:00
|
|
|
.init_client = nfs4_init_client,
|
2011-03-25 01:12:29 +08:00
|
|
|
.secinfo = nfs4_proc_secinfo,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2010-12-09 19:35:25 +08:00
|
|
|
static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
|
|
|
|
.prefix = XATTR_NAME_NFSV4_ACL,
|
|
|
|
.list = nfs4_xattr_list_nfs4_acl,
|
|
|
|
.get = nfs4_xattr_get_nfs4_acl,
|
|
|
|
.set = nfs4_xattr_set_nfs4_acl,
|
|
|
|
};
|
|
|
|
|
|
|
|
const struct xattr_handler *nfs4_xattr_handlers[] = {
|
|
|
|
&nfs4_xattr_nfs4_acl_handler,
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Local variables:
|
|
|
|
* c-basic-offset: 8
|
|
|
|
* End:
|
|
|
|
*/
|