NFS client updates for Linux 4.10
Highlights include: Stable bugfixes: - Fix a pnfs deadlock between read resends and layoutreturn - Don't invalidate the layout stateid while a layout return is outstanding - Don't schedule a layoutreturn if the layout stateid is marked as invalid - On a pNFS error, do not send LAYOUTGET until the LAYOUTRETURN is complete - SUNRPC: fix refcounting problems with auth_gss messages. Features: - Add client support for the NFSv4 umask attribute. - NFSv4: Correct support for flock() stateids. - Add a LAYOUTRETURN operation to CLOSE and DELEGRETURN when return-on-close is specified - Allow the pNFS/flexfiles layoutstat information to piggyback on LAYOUTRETURN - Optimise away redundant GETATTR calls when doing state recovery and/or when not required by cache revalidation rules or close-to-open cache consistency. - Attribute cache improvements - RPC/RDMA support for SG_GAP devices Bugfixes: - NFS: Fix performance regressions in readdir - pNFS/flexfiles: Fix a deadlock on LAYOUTGET - NFSv4: Add missing nfs_put_lock_context() - NFSv4.1: Fix regression in callback retry handling - Fix false positive NFSv4.0 trunking detection. - pNFS/flexfiles: Only send layoutstats updates for mirrors that were updated - Various layout stateid related bugfixes - RPC/RDMA bugfixes -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJYUyemAAoJEGcL54qWCgDy96wP/Ry86cknfLUqLKJCbFVV4nV8 HdovCY8if8JQO0HUPDJ25ITvoRJNVRRwJMWnVq5XHRrPUHletDks6/UYfa63UDMv umHvGST1cQPU1G+vBIQ3sdkVi1X1GeyBY4rU8aDWxLyKWwyeNptCK12i80ifyaGV GZIIxuKVDOFS15M7NwMPRkrBacF8TyVK6S7275z6ZNmhFtvYwMAbvMxLabTwWAe8 4A03m4RDBTYhQIc2xLJbHfOTYoHi34l90wrn3C7Wv0I2zp8EJlzCY2tSbYKhfPg7 0HVKNdruRL+cHwLwJEcjFbxOg9MArgRxyup3dwAYQq7Ivsf9oR8/D61CDhanXAzy cAWyrCyxaAoPWCOb8k4OFRh6jOF9LBGb5WTNpXRi1LoGrbvi6/WLlJccV60325wd gmSAiwIE7aLG8pFk54J0Et86VaQ6qQNBUtJY/4m87uf1FSv3yzQvh7qDr7s+t8ZQ kmSTZJzMWZLEEeyvEPZCfjygFu7n4PuTePJu31217styvat39TpY2p0HaaMhgC0V /Y0ygGH7VlGp0oaVQ70CtBzGsCWTKU2DU8di7nvsCKg6iLv89QBILIJhVeP42tKd juNCWVw4bpW1Zex7HXKecKfMXkDJ4qSDLFzGWj6Ue85f/rCOSKQH01jfvwBlvtBc 3E6fk85ExTw2+siHWiGy =MapM -----END PGP SIGNATURE----- Merge tag 'nfs-for-4.10-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs Pull NFS client updates from Trond Myklebust: "Highlights include: Stable bugfixes: - Fix a pnfs deadlock between read resends and layoutreturn - Don't invalidate the layout stateid while a layout return is outstanding - Don't schedule a layoutreturn if the layout stateid is marked as invalid - On a pNFS error, do not send LAYOUTGET until the LAYOUTRETURN is complete - SUNRPC: fix refcounting problems with auth_gss messages. Features: - Add client support for the NFSv4 umask attribute. - NFSv4: Correct support for flock() stateids. - Add a LAYOUTRETURN operation to CLOSE and DELEGRETURN when return-on-close is specified - Allow the pNFS/flexfiles layoutstat information to piggyback on LAYOUTRETURN - Optimise away redundant GETATTR calls when doing state recovery and/or when not required by cache revalidation rules or close-to-open cache consistency. - Attribute cache improvements - RPC/RDMA support for SG_GAP devices Bugfixes: - NFS: Fix performance regressions in readdir - pNFS/flexfiles: Fix a deadlock on LAYOUTGET - NFSv4: Add missing nfs_put_lock_context() - NFSv4.1: Fix regression in callback retry handling - Fix false positive NFSv4.0 trunking detection. - pNFS/flexfiles: Only send layoutstats updates for mirrors that were updated - Various layout stateid related bugfixes - RPC/RDMA bugfixes" * tag 'nfs-for-4.10-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs: (82 commits) SUNRPC: fix refcounting problems with auth_gss messages. nfs: add support for the umask attribute pNFS/flexfiles: Ensure we have enough buffer for layoutreturn pNFS/flexfiles: Remove a redundant parameter in ff_layout_encode_ioerr() pNFS/flexfiles: Fix a deadlock on LAYOUTGET pNFS: Layoutreturn must free the layout after the layout-private data pNFS/flexfiles: Fix ff_layout_add_ds_error_locked() NFSv4: Add missing nfs_put_lock_context() pNFS: Release NFS_LAYOUT_RETURN when invalidating the layout stateid NFSv4.1: Don't schedule lease recovery in nfs4_schedule_session_recovery() NFSv4.1: Handle NFS4ERR_BADSESSION/NFS4ERR_DEADSESSION replies to OP_SEQUENCE NFS: Only look at the change attribute cache state in nfs_check_verifier NFS: Fix incorrect size revalidation when holding a delegation NFS: Fix incorrect mapping revalidation when holding a delegation pNFS/flexfiles: Support sending layoutstats in layoutreturn pNFS/flexfiles: Minor refactoring before adding iostats to layoutreturn NFS: Fix up read of mirror stats pNFS/flexfiles: Clean up layoutstats pNFS/flexfiles: Refactor encoding of the layoutreturn payload pNFS: Add a layoutreturn callback to performa layout-private setup ...
This commit is contained in:
commit
73e2e0c9b1
|
@ -110,20 +110,52 @@ out:
|
|||
#if defined(CONFIG_NFS_V4_1)
|
||||
|
||||
/*
|
||||
* Lookup a layout by filehandle.
|
||||
* Lookup a layout inode by stateid
|
||||
*
|
||||
* Note: gets a refcount on the layout hdr and on its respective inode.
|
||||
* Caller must put the layout hdr and the inode.
|
||||
*
|
||||
* TODO: keep track of all layouts (and delegations) in a hash table
|
||||
* hashed by filehandle.
|
||||
* Note: returns a refcount on the inode and superblock
|
||||
*/
|
||||
static struct pnfs_layout_hdr * get_layout_by_fh_locked(struct nfs_client *clp,
|
||||
struct nfs_fh *fh)
|
||||
static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp,
|
||||
const nfs4_stateid *stateid)
|
||||
{
|
||||
struct nfs_server *server;
|
||||
struct inode *inode;
|
||||
struct pnfs_layout_hdr *lo;
|
||||
|
||||
restart:
|
||||
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
|
||||
list_for_each_entry(lo, &server->layouts, plh_layouts) {
|
||||
if (stateid != NULL &&
|
||||
!nfs4_stateid_match_other(stateid, &lo->plh_stateid))
|
||||
continue;
|
||||
inode = igrab(lo->plh_inode);
|
||||
if (!inode)
|
||||
continue;
|
||||
if (!nfs_sb_active(inode->i_sb)) {
|
||||
rcu_read_lock();
|
||||
spin_unlock(&clp->cl_lock);
|
||||
iput(inode);
|
||||
spin_lock(&clp->cl_lock);
|
||||
goto restart;
|
||||
}
|
||||
return inode;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Lookup a layout inode by filehandle.
|
||||
*
|
||||
* Note: returns a refcount on the inode and superblock
|
||||
*
|
||||
*/
|
||||
static struct inode *nfs_layout_find_inode_by_fh(struct nfs_client *clp,
|
||||
const struct nfs_fh *fh)
|
||||
{
|
||||
struct nfs_server *server;
|
||||
struct nfs_inode *nfsi;
|
||||
struct inode *ino;
|
||||
struct inode *inode;
|
||||
struct pnfs_layout_hdr *lo;
|
||||
|
||||
restart:
|
||||
|
@ -134,37 +166,38 @@ restart:
|
|||
continue;
|
||||
if (nfsi->layout != lo)
|
||||
continue;
|
||||
ino = igrab(lo->plh_inode);
|
||||
if (!ino)
|
||||
break;
|
||||
spin_lock(&ino->i_lock);
|
||||
/* Is this layout in the process of being freed? */
|
||||
if (nfsi->layout != lo) {
|
||||
spin_unlock(&ino->i_lock);
|
||||
iput(ino);
|
||||
inode = igrab(lo->plh_inode);
|
||||
if (!inode)
|
||||
continue;
|
||||
if (!nfs_sb_active(inode->i_sb)) {
|
||||
rcu_read_lock();
|
||||
spin_unlock(&clp->cl_lock);
|
||||
iput(inode);
|
||||
spin_lock(&clp->cl_lock);
|
||||
goto restart;
|
||||
}
|
||||
pnfs_get_layout_hdr(lo);
|
||||
spin_unlock(&ino->i_lock);
|
||||
return lo;
|
||||
return inode;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct pnfs_layout_hdr * get_layout_by_fh(struct nfs_client *clp,
|
||||
struct nfs_fh *fh)
|
||||
static struct inode *nfs_layout_find_inode(struct nfs_client *clp,
|
||||
const struct nfs_fh *fh,
|
||||
const nfs4_stateid *stateid)
|
||||
{
|
||||
struct pnfs_layout_hdr *lo;
|
||||
struct inode *inode;
|
||||
|
||||
spin_lock(&clp->cl_lock);
|
||||
rcu_read_lock();
|
||||
lo = get_layout_by_fh_locked(clp, fh);
|
||||
inode = nfs_layout_find_inode_by_stateid(clp, stateid);
|
||||
if (!inode)
|
||||
inode = nfs_layout_find_inode_by_fh(clp, fh);
|
||||
rcu_read_unlock();
|
||||
spin_unlock(&clp->cl_lock);
|
||||
|
||||
return lo;
|
||||
return inode;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -213,18 +246,20 @@ static u32 initiate_file_draining(struct nfs_client *clp,
|
|||
u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
|
||||
LIST_HEAD(free_me_list);
|
||||
|
||||
lo = get_layout_by_fh(clp, &args->cbl_fh);
|
||||
if (!lo) {
|
||||
trace_nfs4_cb_layoutrecall_file(clp, &args->cbl_fh, NULL,
|
||||
&args->cbl_stateid, -rv);
|
||||
ino = nfs_layout_find_inode(clp, &args->cbl_fh, &args->cbl_stateid);
|
||||
if (!ino)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ino = lo->plh_inode;
|
||||
pnfs_layoutcommit_inode(ino, false);
|
||||
|
||||
|
||||
spin_lock(&ino->i_lock);
|
||||
lo = NFS_I(ino)->layout;
|
||||
if (!lo) {
|
||||
spin_unlock(&ino->i_lock);
|
||||
goto out;
|
||||
}
|
||||
pnfs_get_layout_hdr(lo);
|
||||
rv = pnfs_check_callback_stateid(lo, &args->cbl_stateid);
|
||||
if (rv != NFS_OK)
|
||||
goto unlock;
|
||||
|
@ -258,10 +293,10 @@ unlock:
|
|||
/* Free all lsegs that are attached to commit buckets */
|
||||
nfs_commit_inode(ino, 0);
|
||||
pnfs_put_layout_hdr(lo);
|
||||
out:
|
||||
trace_nfs4_cb_layoutrecall_file(clp, &args->cbl_fh, ino,
|
||||
&args->cbl_stateid, -rv);
|
||||
iput(ino);
|
||||
out:
|
||||
nfs_iput_and_deactive(ino);
|
||||
return rv;
|
||||
}
|
||||
|
||||
|
|
|
@ -369,9 +369,7 @@ nfs_found_client(const struct nfs_client_initdata *cl_init,
|
|||
* Look up a client by IP address and protocol version
|
||||
* - creates a new record if one doesn't yet exist
|
||||
*/
|
||||
struct nfs_client *
|
||||
nfs_get_client(const struct nfs_client_initdata *cl_init,
|
||||
rpc_authflavor_t authflavour)
|
||||
struct nfs_client *nfs_get_client(const struct nfs_client_initdata *cl_init)
|
||||
{
|
||||
struct nfs_client *clp, *new = NULL;
|
||||
struct nfs_net *nn = net_generic(cl_init->net, nfs_net_id);
|
||||
|
@ -655,7 +653,7 @@ static int nfs_init_server(struct nfs_server *server,
|
|||
set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
|
||||
|
||||
/* Allocate or find a client reference we can use */
|
||||
clp = nfs_get_client(&cl_init, RPC_AUTH_UNIX);
|
||||
clp = nfs_get_client(&cl_init);
|
||||
if (IS_ERR(clp)) {
|
||||
dprintk("<-- nfs_init_server() = error %ld\n", PTR_ERR(clp));
|
||||
return PTR_ERR(clp);
|
||||
|
|
|
@ -391,10 +391,6 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
|
|||
rcu_assign_pointer(nfsi->delegation, delegation);
|
||||
delegation = NULL;
|
||||
|
||||
/* Ensure we revalidate the attributes and page cache! */
|
||||
spin_lock(&inode->i_lock);
|
||||
nfsi->cache_validity |= NFS_INO_REVAL_FORCED;
|
||||
spin_unlock(&inode->i_lock);
|
||||
trace_nfs4_set_delegation(inode, res->delegation_type);
|
||||
|
||||
out:
|
||||
|
|
73
fs/nfs/dir.c
73
fs/nfs/dir.c
|
@ -455,14 +455,17 @@ bool nfs_use_readdirplus(struct inode *dir, struct dir_context *ctx)
|
|||
}
|
||||
|
||||
/*
|
||||
* This function is called by the lookup code to request the use of
|
||||
* readdirplus to accelerate any future lookups in the same
|
||||
* This function is called by the lookup and getattr code to request the
|
||||
* use of readdirplus to accelerate any future lookups in the same
|
||||
* directory.
|
||||
*/
|
||||
static
|
||||
void nfs_advise_use_readdirplus(struct inode *dir)
|
||||
{
|
||||
set_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(dir)->flags);
|
||||
struct nfs_inode *nfsi = NFS_I(dir);
|
||||
|
||||
if (nfs_server_capable(dir, NFS_CAP_READDIRPLUS) &&
|
||||
!list_empty(&nfsi->open_files))
|
||||
set_bit(NFS_INO_ADVISE_RDPLUS, &nfsi->flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -475,9 +478,12 @@ void nfs_advise_use_readdirplus(struct inode *dir)
|
|||
*/
|
||||
void nfs_force_use_readdirplus(struct inode *dir)
|
||||
{
|
||||
if (!list_empty(&NFS_I(dir)->open_files)) {
|
||||
nfs_advise_use_readdirplus(dir);
|
||||
nfs_zap_mapping(dir, dir->i_mapping);
|
||||
struct nfs_inode *nfsi = NFS_I(dir);
|
||||
|
||||
if (nfs_server_capable(dir, NFS_CAP_READDIRPLUS) &&
|
||||
!list_empty(&nfsi->open_files)) {
|
||||
set_bit(NFS_INO_ADVISE_RDPLUS, &nfsi->flags);
|
||||
invalidate_mapping_pages(dir->i_mapping, 0, -1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -886,17 +892,6 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc)
|
|||
goto out;
|
||||
}
|
||||
|
||||
static bool nfs_dir_mapping_need_revalidate(struct inode *dir)
|
||||
{
|
||||
struct nfs_inode *nfsi = NFS_I(dir);
|
||||
|
||||
if (nfs_attribute_cache_expired(dir))
|
||||
return true;
|
||||
if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/* The file offset position represents the dirent entry number. A
|
||||
last cookie cache takes care of the common case of reading the
|
||||
whole directory.
|
||||
|
@ -928,7 +923,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
|
|||
desc->decode = NFS_PROTO(inode)->decode_dirent;
|
||||
desc->plus = nfs_use_readdirplus(inode, ctx) ? 1 : 0;
|
||||
|
||||
if (ctx->pos == 0 || nfs_dir_mapping_need_revalidate(inode))
|
||||
if (ctx->pos == 0 || nfs_attribute_cache_expired(inode))
|
||||
res = nfs_revalidate_mapping(inode, file->f_mapping);
|
||||
if (res < 0)
|
||||
goto out;
|
||||
|
@ -1035,8 +1030,6 @@ EXPORT_SYMBOL_GPL(nfs_force_lookup_revalidate);
|
|||
static int nfs_check_verifier(struct inode *dir, struct dentry *dentry,
|
||||
int rcu_walk)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (IS_ROOT(dentry))
|
||||
return 1;
|
||||
if (NFS_SERVER(dir)->flags & NFS_MOUNT_LOOKUP_CACHE_NONE)
|
||||
|
@ -1044,12 +1037,12 @@ static int nfs_check_verifier(struct inode *dir, struct dentry *dentry,
|
|||
if (!nfs_verify_change_attribute(dir, dentry->d_time))
|
||||
return 0;
|
||||
/* Revalidate nfsi->cache_change_attribute before we declare a match */
|
||||
if (rcu_walk)
|
||||
ret = nfs_revalidate_inode_rcu(NFS_SERVER(dir), dir);
|
||||
else
|
||||
ret = nfs_revalidate_inode(NFS_SERVER(dir), dir);
|
||||
if (ret < 0)
|
||||
return 0;
|
||||
if (nfs_mapping_need_revalidate_inode(dir)) {
|
||||
if (rcu_walk)
|
||||
return 0;
|
||||
if (__nfs_revalidate_inode(NFS_SERVER(dir), dir) < 0)
|
||||
return 0;
|
||||
}
|
||||
if (!nfs_verify_change_attribute(dir, dentry->d_time))
|
||||
return 0;
|
||||
return 1;
|
||||
|
@ -1161,7 +1154,7 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
|
|||
return -ECHILD;
|
||||
goto out_bad;
|
||||
}
|
||||
goto out_valid_noent;
|
||||
goto out_valid;
|
||||
}
|
||||
|
||||
if (is_bad_inode(inode)) {
|
||||
|
@ -1184,6 +1177,7 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
|
|||
return -ECHILD;
|
||||
goto out_zap_parent;
|
||||
}
|
||||
nfs_advise_use_readdirplus(dir);
|
||||
goto out_valid;
|
||||
}
|
||||
|
||||
|
@ -1219,12 +1213,12 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
|
|||
nfs_free_fhandle(fhandle);
|
||||
nfs4_label_free(label);
|
||||
|
||||
/* set a readdirplus hint that we had a cache miss */
|
||||
nfs_force_use_readdirplus(dir);
|
||||
|
||||
out_set_verifier:
|
||||
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
|
||||
out_valid:
|
||||
/* Success: notify readdir to use READDIRPLUS */
|
||||
nfs_advise_use_readdirplus(dir);
|
||||
out_valid_noent:
|
||||
if (flags & LOOKUP_RCU) {
|
||||
if (parent != ACCESS_ONCE(dentry->d_parent))
|
||||
return -ECHILD;
|
||||
|
@ -1424,8 +1418,8 @@ struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned in
|
|||
if (IS_ERR(res))
|
||||
goto out_label;
|
||||
|
||||
/* Success: notify readdir to use READDIRPLUS */
|
||||
nfs_advise_use_readdirplus(dir);
|
||||
/* Notify readdir to use READDIRPLUS */
|
||||
nfs_force_use_readdirplus(dir);
|
||||
|
||||
no_entry:
|
||||
res = d_splice_alias(inode, dentry);
|
||||
|
@ -1467,9 +1461,9 @@ static fmode_t flags_to_mode(int flags)
|
|||
return res;
|
||||
}
|
||||
|
||||
static struct nfs_open_context *create_nfs_open_context(struct dentry *dentry, int open_flags)
|
||||
static struct nfs_open_context *create_nfs_open_context(struct dentry *dentry, int open_flags, struct file *filp)
|
||||
{
|
||||
return alloc_nfs_open_context(dentry, flags_to_mode(open_flags));
|
||||
return alloc_nfs_open_context(dentry, flags_to_mode(open_flags), filp);
|
||||
}
|
||||
|
||||
static int do_open(struct inode *inode, struct file *filp)
|
||||
|
@ -1535,8 +1529,13 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
|
|||
return -ENAMETOOLONG;
|
||||
|
||||
if (open_flags & O_CREAT) {
|
||||
struct nfs_server *server = NFS_SERVER(dir);
|
||||
|
||||
if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
|
||||
mode &= ~current_umask();
|
||||
|
||||
attr.ia_valid |= ATTR_MODE;
|
||||
attr.ia_mode = mode & ~current_umask();
|
||||
attr.ia_mode = mode;
|
||||
}
|
||||
if (open_flags & O_TRUNC) {
|
||||
attr.ia_valid |= ATTR_SIZE;
|
||||
|
@ -1554,7 +1553,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
|
|||
return finish_no_open(file, dentry);
|
||||
}
|
||||
|
||||
ctx = create_nfs_open_context(dentry, open_flags);
|
||||
ctx = create_nfs_open_context(dentry, open_flags, file);
|
||||
err = PTR_ERR(ctx);
|
||||
if (IS_ERR(ctx))
|
||||
goto out;
|
||||
|
|
|
@ -105,7 +105,7 @@ struct nfs_direct_req {
|
|||
|
||||
static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
|
||||
static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
|
||||
static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
|
||||
static void nfs_direct_write_complete(struct nfs_direct_req *dreq);
|
||||
static void nfs_direct_write_schedule_work(struct work_struct *work);
|
||||
|
||||
static inline void get_dreq(struct nfs_direct_req *dreq)
|
||||
|
@ -684,7 +684,7 @@ out_failed:
|
|||
}
|
||||
|
||||
if (put_dreq(dreq))
|
||||
nfs_direct_write_complete(dreq, dreq->inode);
|
||||
nfs_direct_write_complete(dreq);
|
||||
}
|
||||
|
||||
static void nfs_direct_commit_complete(struct nfs_commit_data *data)
|
||||
|
@ -717,7 +717,7 @@ static void nfs_direct_commit_complete(struct nfs_commit_data *data)
|
|||
}
|
||||
|
||||
if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
|
||||
nfs_direct_write_complete(dreq, data->inode);
|
||||
nfs_direct_write_complete(dreq);
|
||||
}
|
||||
|
||||
static void nfs_direct_resched_write(struct nfs_commit_info *cinfo,
|
||||
|
@ -768,7 +768,7 @@ static void nfs_direct_write_schedule_work(struct work_struct *work)
|
|||
}
|
||||
}
|
||||
|
||||
static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
|
||||
static void nfs_direct_write_complete(struct nfs_direct_req *dreq)
|
||||
{
|
||||
schedule_work(&dreq->work); /* Calls nfs_direct_write_schedule_work */
|
||||
}
|
||||
|
@ -824,7 +824,7 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
|
|||
|
||||
out_put:
|
||||
if (put_dreq(dreq))
|
||||
nfs_direct_write_complete(dreq, hdr->inode);
|
||||
nfs_direct_write_complete(dreq);
|
||||
hdr->release(hdr);
|
||||
}
|
||||
|
||||
|
@ -953,7 +953,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
|
|||
}
|
||||
|
||||
if (put_dreq(dreq))
|
||||
nfs_direct_write_complete(dreq, dreq->inode);
|
||||
nfs_direct_write_complete(dreq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -102,8 +102,11 @@ static int nfs_revalidate_file_size(struct inode *inode, struct file *filp)
|
|||
{
|
||||
struct nfs_server *server = NFS_SERVER(inode);
|
||||
struct nfs_inode *nfsi = NFS_I(inode);
|
||||
const unsigned long force_reval = NFS_INO_REVAL_PAGECACHE|NFS_INO_REVAL_FORCED;
|
||||
unsigned long cache_validity = nfsi->cache_validity;
|
||||
|
||||
if (nfs_have_delegated_attributes(inode))
|
||||
if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ) &&
|
||||
(cache_validity & force_reval) != force_reval)
|
||||
goto out_noreval;
|
||||
|
||||
if (filp->f_flags & O_DIRECT)
|
||||
|
|
|
@ -279,8 +279,7 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
|
|||
|
||||
nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo,
|
||||
dataserver_retrans, 4,
|
||||
s->nfs_client->cl_minorversion,
|
||||
s->nfs_client->cl_rpcclient->cl_auth->au_flavor);
|
||||
s->nfs_client->cl_minorversion);
|
||||
|
||||
out_test_devid:
|
||||
if (filelayout_test_devid_unavailable(devid))
|
||||
|
|
|
@ -25,9 +25,20 @@
|
|||
#define NFSDBG_FACILITY NFSDBG_PNFS_LD
|
||||
|
||||
#define FF_LAYOUT_POLL_RETRY_MAX (15*HZ)
|
||||
#define FF_LAYOUTRETURN_MAXERR 20
|
||||
|
||||
|
||||
static struct group_info *ff_zero_group;
|
||||
|
||||
static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
|
||||
struct nfs_pgio_header *hdr);
|
||||
static int ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
|
||||
struct nfs42_layoutstat_devinfo *devinfo,
|
||||
int dev_limit);
|
||||
static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
|
||||
const struct nfs42_layoutstat_devinfo *devinfo,
|
||||
struct nfs4_ff_layout_mirror *mirror);
|
||||
|
||||
static struct pnfs_layout_hdr *
|
||||
ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
|
||||
{
|
||||
|
@ -172,7 +183,7 @@ ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
|
|||
|
||||
spin_lock(&inode->i_lock);
|
||||
list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
|
||||
if (mirror->mirror_ds != pos->mirror_ds)
|
||||
if (memcmp(&mirror->devid, &pos->devid, sizeof(pos->devid)) != 0)
|
||||
continue;
|
||||
if (!ff_mirror_match_fh(mirror, pos))
|
||||
continue;
|
||||
|
@ -349,19 +360,6 @@ static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
|
|||
}
|
||||
}
|
||||
|
||||
static void ff_layout_mark_devices_valid(struct nfs4_ff_layout_segment *fls)
|
||||
{
|
||||
struct nfs4_deviceid_node *node;
|
||||
int i;
|
||||
|
||||
if (!(fls->flags & FF_FLAGS_NO_IO_THRU_MDS))
|
||||
return;
|
||||
for (i = 0; i < fls->mirror_array_cnt; i++) {
|
||||
node = &fls->mirror_array[i]->mirror_ds->id_node;
|
||||
clear_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags);
|
||||
}
|
||||
}
|
||||
|
||||
static struct pnfs_layout_segment *
|
||||
ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
|
||||
struct nfs4_layoutget_res *lgr,
|
||||
|
@ -415,8 +413,6 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
|
|||
|
||||
for (i = 0; i < fls->mirror_array_cnt; i++) {
|
||||
struct nfs4_ff_layout_mirror *mirror;
|
||||
struct nfs4_deviceid devid;
|
||||
struct nfs4_deviceid_node *idnode;
|
||||
struct auth_cred acred = { .group_info = ff_zero_group };
|
||||
struct rpc_cred __rcu *cred;
|
||||
u32 ds_count, fh_count, id;
|
||||
|
@ -441,24 +437,10 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
|
|||
fls->mirror_array[i]->ds_count = ds_count;
|
||||
|
||||
/* deviceid */
|
||||
rc = decode_deviceid(&stream, &devid);
|
||||
rc = decode_deviceid(&stream, &fls->mirror_array[i]->devid);
|
||||
if (rc)
|
||||
goto out_err_free;
|
||||
|
||||
idnode = nfs4_find_get_deviceid(NFS_SERVER(lh->plh_inode),
|
||||
&devid, lh->plh_lc_cred,
|
||||
gfp_flags);
|
||||
/*
|
||||
* upon success, mirror_ds is allocated by previous
|
||||
* getdeviceinfo, or newly by .alloc_deviceid_node
|
||||
* nfs4_find_get_deviceid failure is indeed getdeviceinfo falure
|
||||
*/
|
||||
if (idnode)
|
||||
fls->mirror_array[i]->mirror_ds =
|
||||
FF_LAYOUT_MIRROR_DS(idnode);
|
||||
else
|
||||
goto out_err_free;
|
||||
|
||||
/* efficiency */
|
||||
rc = -EIO;
|
||||
p = xdr_inline_decode(&stream, 4);
|
||||
|
@ -556,8 +538,6 @@ out_sort_mirrors:
|
|||
rc = ff_layout_check_layout(lgr);
|
||||
if (rc)
|
||||
goto out_err_free;
|
||||
ff_layout_mark_devices_valid(fls);
|
||||
|
||||
ret = &fls->generic_hdr;
|
||||
dprintk("<-- %s (success)\n", __func__);
|
||||
out_free_page:
|
||||
|
@ -702,6 +682,7 @@ nfs4_ff_layout_stat_io_start_read(struct inode *inode,
|
|||
spin_lock(&mirror->lock);
|
||||
report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now);
|
||||
nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
|
||||
set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
|
||||
spin_unlock(&mirror->lock);
|
||||
|
||||
if (report)
|
||||
|
@ -718,6 +699,7 @@ nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
|
|||
nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
|
||||
requested, completed,
|
||||
ktime_get(), task->tk_start);
|
||||
set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
|
||||
spin_unlock(&mirror->lock);
|
||||
}
|
||||
|
||||
|
@ -731,6 +713,7 @@ nfs4_ff_layout_stat_io_start_write(struct inode *inode,
|
|||
spin_lock(&mirror->lock);
|
||||
report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now);
|
||||
nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
|
||||
set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
|
||||
spin_unlock(&mirror->lock);
|
||||
|
||||
if (report)
|
||||
|
@ -750,6 +733,7 @@ nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
|
|||
spin_lock(&mirror->lock);
|
||||
nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
|
||||
requested, completed, ktime_get(), task->tk_start);
|
||||
set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
|
||||
spin_unlock(&mirror->lock);
|
||||
}
|
||||
|
||||
|
@ -1293,6 +1277,7 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
|
|||
hdr->pgio_mirror_idx + 1,
|
||||
&hdr->pgio_mirror_idx))
|
||||
goto out_eagain;
|
||||
ff_layout_read_record_layoutstats_done(task, hdr);
|
||||
pnfs_read_resend_pnfs(hdr);
|
||||
return task->tk_status;
|
||||
case -NFS4ERR_RESET_TO_MDS:
|
||||
|
@ -1961,38 +1946,88 @@ ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
|
|||
id_node));
|
||||
}
|
||||
|
||||
static int ff_layout_encode_ioerr(struct nfs4_flexfile_layout *flo,
|
||||
struct xdr_stream *xdr,
|
||||
const struct nfs4_layoutreturn_args *args)
|
||||
static int ff_layout_encode_ioerr(struct xdr_stream *xdr,
|
||||
const struct nfs4_layoutreturn_args *args,
|
||||
const struct nfs4_flexfile_layoutreturn_args *ff_args)
|
||||
{
|
||||
struct pnfs_layout_hdr *hdr = &flo->generic_hdr;
|
||||
__be32 *start;
|
||||
int count = 0, ret = 0;
|
||||
|
||||
start = xdr_reserve_space(xdr, 4);
|
||||
if (unlikely(!start))
|
||||
return -E2BIG;
|
||||
|
||||
*start = cpu_to_be32(ff_args->num_errors);
|
||||
/* This assume we always return _ALL_ layouts */
|
||||
spin_lock(&hdr->plh_inode->i_lock);
|
||||
ret = ff_layout_encode_ds_ioerr(flo, xdr, &count, &args->range);
|
||||
spin_unlock(&hdr->plh_inode->i_lock);
|
||||
|
||||
*start = cpu_to_be32(count);
|
||||
|
||||
return ret;
|
||||
return ff_layout_encode_ds_ioerr(xdr, &ff_args->errors);
|
||||
}
|
||||
|
||||
/* report nothing for now */
|
||||
static void ff_layout_encode_iostats(struct nfs4_flexfile_layout *flo,
|
||||
struct xdr_stream *xdr,
|
||||
const struct nfs4_layoutreturn_args *args)
|
||||
static void
|
||||
encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len)
|
||||
{
|
||||
__be32 *p;
|
||||
|
||||
p = xdr_reserve_space(xdr, len);
|
||||
xdr_encode_opaque_fixed(p, buf, len);
|
||||
}
|
||||
|
||||
static void
|
||||
ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr,
|
||||
const nfs4_stateid *stateid,
|
||||
const struct nfs42_layoutstat_devinfo *devinfo)
|
||||
{
|
||||
__be32 *p;
|
||||
|
||||
p = xdr_reserve_space(xdr, 8 + 8);
|
||||
p = xdr_encode_hyper(p, devinfo->offset);
|
||||
p = xdr_encode_hyper(p, devinfo->length);
|
||||
encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE);
|
||||
p = xdr_reserve_space(xdr, 4*8);
|
||||
p = xdr_encode_hyper(p, devinfo->read_count);
|
||||
p = xdr_encode_hyper(p, devinfo->read_bytes);
|
||||
p = xdr_encode_hyper(p, devinfo->write_count);
|
||||
p = xdr_encode_hyper(p, devinfo->write_bytes);
|
||||
encode_opaque_fixed(xdr, devinfo->dev_id.data, NFS4_DEVICEID4_SIZE);
|
||||
}
|
||||
|
||||
static void
|
||||
ff_layout_encode_ff_iostat(struct xdr_stream *xdr,
|
||||
const nfs4_stateid *stateid,
|
||||
const struct nfs42_layoutstat_devinfo *devinfo)
|
||||
{
|
||||
ff_layout_encode_ff_iostat_head(xdr, stateid, devinfo);
|
||||
ff_layout_encode_ff_layoutupdate(xdr, devinfo,
|
||||
devinfo->ld_private.data);
|
||||
}
|
||||
|
||||
/* report nothing for now */
|
||||
static void ff_layout_encode_iostats_array(struct xdr_stream *xdr,
|
||||
const struct nfs4_layoutreturn_args *args,
|
||||
struct nfs4_flexfile_layoutreturn_args *ff_args)
|
||||
{
|
||||
__be32 *p;
|
||||
int i;
|
||||
|
||||
p = xdr_reserve_space(xdr, 4);
|
||||
if (likely(p))
|
||||
*p = cpu_to_be32(0);
|
||||
*p = cpu_to_be32(ff_args->num_dev);
|
||||
for (i = 0; i < ff_args->num_dev; i++)
|
||||
ff_layout_encode_ff_iostat(xdr,
|
||||
&args->layout->plh_stateid,
|
||||
&ff_args->devinfo[i]);
|
||||
}
|
||||
|
||||
static void
|
||||
ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo *devinfo,
|
||||
unsigned int num_entries)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < num_entries; i++) {
|
||||
if (!devinfo[i].ld_private.ops)
|
||||
continue;
|
||||
if (!devinfo[i].ld_private.ops->free)
|
||||
continue;
|
||||
devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
|
||||
}
|
||||
}
|
||||
|
||||
static struct nfs4_deviceid_node *
|
||||
|
@ -2008,24 +2043,91 @@ ff_layout_alloc_deviceid_node(struct nfs_server *server,
|
|||
}
|
||||
|
||||
static void
|
||||
ff_layout_encode_layoutreturn(struct pnfs_layout_hdr *lo,
|
||||
struct xdr_stream *xdr,
|
||||
const struct nfs4_layoutreturn_args *args)
|
||||
ff_layout_encode_layoutreturn(struct xdr_stream *xdr,
|
||||
const void *voidargs,
|
||||
const struct nfs4_xdr_opaque_data *ff_opaque)
|
||||
{
|
||||
struct nfs4_flexfile_layout *flo = FF_LAYOUT_FROM_HDR(lo);
|
||||
const struct nfs4_layoutreturn_args *args = voidargs;
|
||||
struct nfs4_flexfile_layoutreturn_args *ff_args = ff_opaque->data;
|
||||
struct xdr_buf tmp_buf = {
|
||||
.head = {
|
||||
[0] = {
|
||||
.iov_base = page_address(ff_args->pages[0]),
|
||||
},
|
||||
},
|
||||
.buflen = PAGE_SIZE,
|
||||
};
|
||||
struct xdr_stream tmp_xdr;
|
||||
__be32 *start;
|
||||
|
||||
dprintk("%s: Begin\n", __func__);
|
||||
|
||||
xdr_init_encode(&tmp_xdr, &tmp_buf, NULL);
|
||||
|
||||
ff_layout_encode_ioerr(&tmp_xdr, args, ff_args);
|
||||
ff_layout_encode_iostats_array(&tmp_xdr, args, ff_args);
|
||||
|
||||
start = xdr_reserve_space(xdr, 4);
|
||||
BUG_ON(!start);
|
||||
*start = cpu_to_be32(tmp_buf.len);
|
||||
xdr_write_pages(xdr, ff_args->pages, 0, tmp_buf.len);
|
||||
|
||||
ff_layout_encode_ioerr(flo, xdr, args);
|
||||
ff_layout_encode_iostats(flo, xdr, args);
|
||||
|
||||
*start = cpu_to_be32((xdr->p - start - 1) * 4);
|
||||
dprintk("%s: Return\n", __func__);
|
||||
}
|
||||
|
||||
static void
|
||||
ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args)
|
||||
{
|
||||
struct nfs4_flexfile_layoutreturn_args *ff_args;
|
||||
|
||||
if (!args->data)
|
||||
return;
|
||||
ff_args = args->data;
|
||||
args->data = NULL;
|
||||
|
||||
ff_layout_free_ds_ioerr(&ff_args->errors);
|
||||
ff_layout_free_iostats_array(ff_args->devinfo, ff_args->num_dev);
|
||||
|
||||
put_page(ff_args->pages[0]);
|
||||
kfree(ff_args);
|
||||
}
|
||||
|
||||
const struct nfs4_xdr_opaque_ops layoutreturn_ops = {
|
||||
.encode = ff_layout_encode_layoutreturn,
|
||||
.free = ff_layout_free_layoutreturn,
|
||||
};
|
||||
|
||||
static int
|
||||
ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args)
|
||||
{
|
||||
struct nfs4_flexfile_layoutreturn_args *ff_args;
|
||||
struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(args->layout);
|
||||
|
||||
ff_args = kmalloc(sizeof(*ff_args), GFP_KERNEL);
|
||||
if (!ff_args)
|
||||
goto out_nomem;
|
||||
ff_args->pages[0] = alloc_page(GFP_KERNEL);
|
||||
if (!ff_args->pages[0])
|
||||
goto out_nomem_free;
|
||||
|
||||
INIT_LIST_HEAD(&ff_args->errors);
|
||||
ff_args->num_errors = ff_layout_fetch_ds_ioerr(args->layout,
|
||||
&args->range, &ff_args->errors,
|
||||
FF_LAYOUTRETURN_MAXERR);
|
||||
|
||||
spin_lock(&args->inode->i_lock);
|
||||
ff_args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
|
||||
&ff_args->devinfo[0], ARRAY_SIZE(ff_args->devinfo));
|
||||
spin_unlock(&args->inode->i_lock);
|
||||
|
||||
args->ld_private->ops = &layoutreturn_ops;
|
||||
args->ld_private->data = ff_args;
|
||||
return 0;
|
||||
out_nomem_free:
|
||||
kfree(ff_args);
|
||||
out_nomem:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int
|
||||
ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
|
||||
{
|
||||
|
@ -2146,21 +2248,18 @@ ff_layout_encode_io_latency(struct xdr_stream *xdr,
|
|||
}
|
||||
|
||||
static void
|
||||
ff_layout_encode_layoutstats(struct xdr_stream *xdr,
|
||||
struct nfs42_layoutstat_args *args,
|
||||
struct nfs42_layoutstat_devinfo *devinfo)
|
||||
ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
|
||||
const struct nfs42_layoutstat_devinfo *devinfo,
|
||||
struct nfs4_ff_layout_mirror *mirror)
|
||||
{
|
||||
struct nfs4_ff_layout_mirror *mirror = devinfo->layout_private;
|
||||
struct nfs4_pnfs_ds_addr *da;
|
||||
struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
|
||||
struct nfs_fh *fh = &mirror->fh_versions[0];
|
||||
__be32 *p, *start;
|
||||
__be32 *p;
|
||||
|
||||
da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
|
||||
dprintk("%s: DS %s: encoding address %s\n",
|
||||
__func__, ds->ds_remotestr, da->da_remotestr);
|
||||
/* layoutupdate length */
|
||||
start = xdr_reserve_space(xdr, 4);
|
||||
/* netaddr4 */
|
||||
ff_layout_encode_netaddr(xdr, da);
|
||||
/* nfs_fh4 */
|
||||
|
@ -2177,42 +2276,71 @@ ff_layout_encode_layoutstats(struct xdr_stream *xdr,
|
|||
/* bool */
|
||||
p = xdr_reserve_space(xdr, 4);
|
||||
*p = cpu_to_be32(false);
|
||||
}
|
||||
|
||||
static void
|
||||
ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args,
|
||||
const struct nfs4_xdr_opaque_data *opaque)
|
||||
{
|
||||
struct nfs42_layoutstat_devinfo *devinfo = container_of(opaque,
|
||||
struct nfs42_layoutstat_devinfo, ld_private);
|
||||
__be32 *start;
|
||||
|
||||
/* layoutupdate length */
|
||||
start = xdr_reserve_space(xdr, 4);
|
||||
ff_layout_encode_ff_layoutupdate(xdr, devinfo, opaque->data);
|
||||
|
||||
*start = cpu_to_be32((xdr->p - start - 1) * 4);
|
||||
}
|
||||
|
||||
static void
|
||||
ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque)
|
||||
{
|
||||
struct nfs4_ff_layout_mirror *mirror = opaque->data;
|
||||
|
||||
ff_layout_put_mirror(mirror);
|
||||
}
|
||||
|
||||
static const struct nfs4_xdr_opaque_ops layoutstat_ops = {
|
||||
.encode = ff_layout_encode_layoutstats,
|
||||
.free = ff_layout_free_layoutstats,
|
||||
};
|
||||
|
||||
static int
|
||||
ff_layout_mirror_prepare_stats(struct nfs42_layoutstat_args *args,
|
||||
struct pnfs_layout_hdr *lo,
|
||||
ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
|
||||
struct nfs42_layoutstat_devinfo *devinfo,
|
||||
int dev_limit)
|
||||
{
|
||||
struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
|
||||
struct nfs4_ff_layout_mirror *mirror;
|
||||
struct nfs4_deviceid_node *dev;
|
||||
struct nfs42_layoutstat_devinfo *devinfo;
|
||||
int i = 0;
|
||||
|
||||
list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
|
||||
if (i >= dev_limit)
|
||||
break;
|
||||
if (!mirror->mirror_ds)
|
||||
if (IS_ERR_OR_NULL(mirror->mirror_ds))
|
||||
continue;
|
||||
if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags))
|
||||
continue;
|
||||
/* mirror refcount put in cleanup_layoutstats */
|
||||
if (!atomic_inc_not_zero(&mirror->ref))
|
||||
continue;
|
||||
dev = &mirror->mirror_ds->id_node;
|
||||
devinfo = &args->devinfo[i];
|
||||
memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
|
||||
devinfo->offset = 0;
|
||||
devinfo->length = NFS4_MAX_UINT64;
|
||||
spin_lock(&mirror->lock);
|
||||
devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
|
||||
devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
|
||||
devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
|
||||
devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
|
||||
spin_unlock(&mirror->lock);
|
||||
devinfo->layout_type = LAYOUT_FLEX_FILES;
|
||||
devinfo->layoutstats_encode = ff_layout_encode_layoutstats;
|
||||
devinfo->layout_private = mirror;
|
||||
devinfo->ld_private.ops = &layoutstat_ops;
|
||||
devinfo->ld_private.data = mirror;
|
||||
|
||||
devinfo++;
|
||||
i++;
|
||||
}
|
||||
return i;
|
||||
|
@ -2222,47 +2350,27 @@ static int
|
|||
ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
|
||||
{
|
||||
struct nfs4_flexfile_layout *ff_layout;
|
||||
struct nfs4_ff_layout_mirror *mirror;
|
||||
int dev_count = 0;
|
||||
const int dev_count = PNFS_LAYOUTSTATS_MAXDEV;
|
||||
|
||||
spin_lock(&args->inode->i_lock);
|
||||
ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout);
|
||||
list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
|
||||
if (atomic_read(&mirror->ref) != 0)
|
||||
dev_count ++;
|
||||
}
|
||||
spin_unlock(&args->inode->i_lock);
|
||||
/* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
|
||||
if (dev_count > PNFS_LAYOUTSTATS_MAXDEV) {
|
||||
dprintk("%s: truncating devinfo to limit (%d:%d)\n",
|
||||
__func__, dev_count, PNFS_LAYOUTSTATS_MAXDEV);
|
||||
dev_count = PNFS_LAYOUTSTATS_MAXDEV;
|
||||
}
|
||||
args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo), GFP_NOIO);
|
||||
if (!args->devinfo)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock(&args->inode->i_lock);
|
||||
args->num_dev = ff_layout_mirror_prepare_stats(args,
|
||||
&ff_layout->generic_hdr, dev_count);
|
||||
ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout);
|
||||
args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
|
||||
&args->devinfo[0], dev_count);
|
||||
spin_unlock(&args->inode->i_lock);
|
||||
if (!args->num_dev) {
|
||||
kfree(args->devinfo);
|
||||
args->devinfo = NULL;
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
ff_layout_cleanup_layoutstats(struct nfs42_layoutstat_data *data)
|
||||
{
|
||||
struct nfs4_ff_layout_mirror *mirror;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < data->args.num_dev; i++) {
|
||||
mirror = data->args.devinfo[i].layout_private;
|
||||
data->args.devinfo[i].layout_private = NULL;
|
||||
ff_layout_put_mirror(mirror);
|
||||
}
|
||||
}
|
||||
|
||||
static struct pnfs_layoutdriver_type flexfilelayout_type = {
|
||||
.id = LAYOUT_FLEX_FILES,
|
||||
.name = "LAYOUT_FLEX_FILES",
|
||||
|
@ -2284,10 +2392,9 @@ static struct pnfs_layoutdriver_type flexfilelayout_type = {
|
|||
.read_pagelist = ff_layout_read_pagelist,
|
||||
.write_pagelist = ff_layout_write_pagelist,
|
||||
.alloc_deviceid_node = ff_layout_alloc_deviceid_node,
|
||||
.encode_layoutreturn = ff_layout_encode_layoutreturn,
|
||||
.prepare_layoutreturn = ff_layout_prepare_layoutreturn,
|
||||
.sync = pnfs_nfs_generic_sync,
|
||||
.prepare_layoutstats = ff_layout_prepare_layoutstats,
|
||||
.cleanup_layoutstats = ff_layout_cleanup_layoutstats,
|
||||
};
|
||||
|
||||
static int __init nfs4flexfilelayout_init(void)
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
|
||||
/* LAYOUTSTATS report interval in ms */
|
||||
#define FF_LAYOUTSTATS_REPORT_INTERVAL (60000L)
|
||||
#define FF_LAYOUTSTATS_MAXDEV 4
|
||||
|
||||
struct nfs4_ff_ds_version {
|
||||
u32 version;
|
||||
|
@ -73,6 +74,7 @@ struct nfs4_ff_layout_mirror {
|
|||
struct list_head mirrors;
|
||||
u32 ds_count;
|
||||
u32 efficiency;
|
||||
struct nfs4_deviceid devid;
|
||||
struct nfs4_ff_layout_ds *mirror_ds;
|
||||
u32 fh_versions_cnt;
|
||||
struct nfs_fh *fh_versions;
|
||||
|
@ -81,12 +83,15 @@ struct nfs4_ff_layout_mirror {
|
|||
struct rpc_cred __rcu *rw_cred;
|
||||
atomic_t ref;
|
||||
spinlock_t lock;
|
||||
unsigned long flags;
|
||||
struct nfs4_ff_layoutstat read_stat;
|
||||
struct nfs4_ff_layoutstat write_stat;
|
||||
ktime_t start_time;
|
||||
u32 report_interval;
|
||||
};
|
||||
|
||||
#define NFS4_FF_MIRROR_STAT_AVAIL (0)
|
||||
|
||||
struct nfs4_ff_layout_segment {
|
||||
struct pnfs_layout_segment generic_hdr;
|
||||
u64 stripe_unit;
|
||||
|
@ -103,6 +108,14 @@ struct nfs4_flexfile_layout {
|
|||
ktime_t last_report_time; /* Layoutstat report times */
|
||||
};
|
||||
|
||||
struct nfs4_flexfile_layoutreturn_args {
|
||||
struct list_head errors;
|
||||
struct nfs42_layoutstat_devinfo devinfo[FF_LAYOUTSTATS_MAXDEV];
|
||||
unsigned int num_errors;
|
||||
unsigned int num_dev;
|
||||
struct page *pages[1];
|
||||
};
|
||||
|
||||
static inline struct nfs4_flexfile_layout *
|
||||
FF_LAYOUT_FROM_HDR(struct pnfs_layout_hdr *lo)
|
||||
{
|
||||
|
@ -180,9 +193,12 @@ int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo,
|
|||
struct nfs4_ff_layout_mirror *mirror, u64 offset,
|
||||
u64 length, int status, enum nfs_opnum4 opnum,
|
||||
gfp_t gfp_flags);
|
||||
int ff_layout_encode_ds_ioerr(struct nfs4_flexfile_layout *flo,
|
||||
struct xdr_stream *xdr, int *count,
|
||||
const struct pnfs_layout_range *range);
|
||||
int ff_layout_encode_ds_ioerr(struct xdr_stream *xdr, const struct list_head *head);
|
||||
void ff_layout_free_ds_ioerr(struct list_head *head);
|
||||
unsigned int ff_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo,
|
||||
const struct pnfs_layout_range *range,
|
||||
struct list_head *head,
|
||||
unsigned int maxnum);
|
||||
struct nfs_fh *
|
||||
nfs4_ff_layout_select_ds_fh(struct pnfs_layout_segment *lseg, u32 mirror_idx);
|
||||
|
||||
|
@ -197,7 +213,6 @@ nfs4_ff_find_or_create_ds_client(struct pnfs_layout_segment *lseg,
|
|||
struct inode *inode);
|
||||
struct rpc_cred *ff_layout_get_ds_cred(struct pnfs_layout_segment *lseg,
|
||||
u32 ds_idx, struct rpc_cred *mdscred);
|
||||
bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg);
|
||||
bool ff_layout_avoid_mds_available_ds(struct pnfs_layout_segment *lseg);
|
||||
bool ff_layout_avoid_read_on_rw(struct pnfs_layout_segment *lseg);
|
||||
|
||||
|
|
|
@ -20,9 +20,11 @@
|
|||
static unsigned int dataserver_timeo = NFS_DEF_TCP_RETRANS;
|
||||
static unsigned int dataserver_retrans;
|
||||
|
||||
static bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg);
|
||||
|
||||
void nfs4_ff_layout_put_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
|
||||
{
|
||||
if (mirror_ds)
|
||||
if (!IS_ERR_OR_NULL(mirror_ds))
|
||||
nfs4_put_deviceid_node(&mirror_ds->id_node);
|
||||
}
|
||||
|
||||
|
@ -182,12 +184,29 @@ static void ff_layout_mark_devid_invalid(struct pnfs_layout_segment *lseg,
|
|||
}
|
||||
|
||||
static bool ff_layout_mirror_valid(struct pnfs_layout_segment *lseg,
|
||||
struct nfs4_ff_layout_mirror *mirror)
|
||||
struct nfs4_ff_layout_mirror *mirror,
|
||||
bool create)
|
||||
{
|
||||
if (mirror == NULL || mirror->mirror_ds == NULL) {
|
||||
pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
|
||||
lseg);
|
||||
return false;
|
||||
if (mirror == NULL || IS_ERR(mirror->mirror_ds))
|
||||
goto outerr;
|
||||
if (mirror->mirror_ds == NULL) {
|
||||
if (create) {
|
||||
struct nfs4_deviceid_node *node;
|
||||
struct pnfs_layout_hdr *lh = lseg->pls_layout;
|
||||
struct nfs4_ff_layout_ds *mirror_ds = ERR_PTR(-ENODEV);
|
||||
|
||||
node = nfs4_find_get_deviceid(NFS_SERVER(lh->plh_inode),
|
||||
&mirror->devid, lh->plh_lc_cred,
|
||||
GFP_KERNEL);
|
||||
if (node)
|
||||
mirror_ds = FF_LAYOUT_MIRROR_DS(node);
|
||||
|
||||
/* check for race with another call to this function */
|
||||
if (cmpxchg(&mirror->mirror_ds, NULL, mirror_ds) &&
|
||||
mirror_ds != ERR_PTR(-ENODEV))
|
||||
nfs4_put_deviceid_node(node);
|
||||
} else
|
||||
goto outerr;
|
||||
}
|
||||
if (mirror->mirror_ds->ds == NULL) {
|
||||
struct nfs4_deviceid_node *devid;
|
||||
|
@ -196,15 +215,9 @@ static bool ff_layout_mirror_valid(struct pnfs_layout_segment *lseg,
|
|||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static u64
|
||||
end_offset(u64 start, u64 len)
|
||||
{
|
||||
u64 end;
|
||||
|
||||
end = start + len;
|
||||
return end >= start ? end : NFS4_MAX_UINT64;
|
||||
outerr:
|
||||
pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode, lseg);
|
||||
return false;
|
||||
}
|
||||
|
||||
static void extend_ds_error(struct nfs4_ff_layout_ds_err *err,
|
||||
|
@ -212,8 +225,8 @@ static void extend_ds_error(struct nfs4_ff_layout_ds_err *err,
|
|||
{
|
||||
u64 end;
|
||||
|
||||
end = max_t(u64, end_offset(err->offset, err->length),
|
||||
end_offset(offset, length));
|
||||
end = max_t(u64, pnfs_end_offset(err->offset, err->length),
|
||||
pnfs_end_offset(offset, length));
|
||||
err->offset = min_t(u64, err->offset, offset);
|
||||
err->length = end - err->offset;
|
||||
}
|
||||
|
@ -235,9 +248,9 @@ ff_ds_error_match(const struct nfs4_ff_layout_ds_err *e1,
|
|||
ret = memcmp(&e1->deviceid, &e2->deviceid, sizeof(e1->deviceid));
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
if (end_offset(e1->offset, e1->length) < e2->offset)
|
||||
if (pnfs_end_offset(e1->offset, e1->length) < e2->offset)
|
||||
return -1;
|
||||
if (e1->offset > end_offset(e2->offset, e2->length))
|
||||
if (e1->offset > pnfs_end_offset(e2->offset, e2->length))
|
||||
return 1;
|
||||
/* If ranges overlap or are contiguous, they are the same */
|
||||
return 0;
|
||||
|
@ -263,8 +276,9 @@ ff_layout_add_ds_error_locked(struct nfs4_flexfile_layout *flo,
|
|||
}
|
||||
/* Entries match, so merge "err" into "dserr" */
|
||||
extend_ds_error(dserr, err->offset, err->length);
|
||||
list_del(&err->list);
|
||||
list_replace(&err->list, &dserr->list);
|
||||
kfree(err);
|
||||
return;
|
||||
}
|
||||
|
||||
list_add_tail(&dserr->list, head);
|
||||
|
@ -331,7 +345,7 @@ nfs4_ff_layout_select_ds_fh(struct pnfs_layout_segment *lseg, u32 mirror_idx)
|
|||
struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, mirror_idx);
|
||||
struct nfs_fh *fh = NULL;
|
||||
|
||||
if (!ff_layout_mirror_valid(lseg, mirror)) {
|
||||
if (!ff_layout_mirror_valid(lseg, mirror, false)) {
|
||||
pr_err_ratelimited("NFS: %s: No data server for mirror offset index %d\n",
|
||||
__func__, mirror_idx);
|
||||
goto out;
|
||||
|
@ -371,7 +385,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
|
|||
struct nfs_server *s = NFS_SERVER(ino);
|
||||
unsigned int max_payload;
|
||||
|
||||
if (!ff_layout_mirror_valid(lseg, mirror)) {
|
||||
if (!ff_layout_mirror_valid(lseg, mirror, true)) {
|
||||
pr_err_ratelimited("NFS: %s: No data server for offset index %d\n",
|
||||
__func__, ds_idx);
|
||||
goto out;
|
||||
|
@ -393,8 +407,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
|
|||
nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo,
|
||||
dataserver_retrans,
|
||||
mirror->mirror_ds->ds_versions[0].version,
|
||||
mirror->mirror_ds->ds_versions[0].minor_version,
|
||||
RPC_AUTH_UNIX);
|
||||
mirror->mirror_ds->ds_versions[0].minor_version);
|
||||
|
||||
/* connect success, check rsize/wsize limit */
|
||||
if (ds->ds_clp) {
|
||||
|
@ -457,28 +470,26 @@ nfs4_ff_find_or_create_ds_client(struct pnfs_layout_segment *lseg, u32 ds_idx,
|
|||
}
|
||||
}
|
||||
|
||||
static bool is_range_intersecting(u64 offset1, u64 length1,
|
||||
u64 offset2, u64 length2)
|
||||
void ff_layout_free_ds_ioerr(struct list_head *head)
|
||||
{
|
||||
u64 end1 = end_offset(offset1, length1);
|
||||
u64 end2 = end_offset(offset2, length2);
|
||||
struct nfs4_ff_layout_ds_err *err;
|
||||
|
||||
return (end1 == NFS4_MAX_UINT64 || end1 > offset2) &&
|
||||
(end2 == NFS4_MAX_UINT64 || end2 > offset1);
|
||||
while (!list_empty(head)) {
|
||||
err = list_first_entry(head,
|
||||
struct nfs4_ff_layout_ds_err,
|
||||
list);
|
||||
list_del(&err->list);
|
||||
kfree(err);
|
||||
}
|
||||
}
|
||||
|
||||
/* called with inode i_lock held */
|
||||
int ff_layout_encode_ds_ioerr(struct nfs4_flexfile_layout *flo,
|
||||
struct xdr_stream *xdr, int *count,
|
||||
const struct pnfs_layout_range *range)
|
||||
int ff_layout_encode_ds_ioerr(struct xdr_stream *xdr, const struct list_head *head)
|
||||
{
|
||||
struct nfs4_ff_layout_ds_err *err, *n;
|
||||
struct nfs4_ff_layout_ds_err *err;
|
||||
__be32 *p;
|
||||
|
||||
list_for_each_entry_safe(err, n, &flo->error_list, list) {
|
||||
if (!is_range_intersecting(err->offset, err->length,
|
||||
range->offset, range->length))
|
||||
continue;
|
||||
list_for_each_entry(err, head, list) {
|
||||
/* offset(8) + length(8) + stateid(NFS4_STATEID_SIZE)
|
||||
* + array length + deviceid(NFS4_DEVICEID4_SIZE)
|
||||
* + status(4) + opnum(4)
|
||||
|
@ -497,17 +508,59 @@ int ff_layout_encode_ds_ioerr(struct nfs4_flexfile_layout *flo,
|
|||
NFS4_DEVICEID4_SIZE);
|
||||
*p++ = cpu_to_be32(err->status);
|
||||
*p++ = cpu_to_be32(err->opnum);
|
||||
*count += 1;
|
||||
list_del(&err->list);
|
||||
dprintk("%s: offset %llu length %llu status %d op %d count %d\n",
|
||||
dprintk("%s: offset %llu length %llu status %d op %d\n",
|
||||
__func__, err->offset, err->length, err->status,
|
||||
err->opnum, *count);
|
||||
kfree(err);
|
||||
err->opnum);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static
|
||||
unsigned int do_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo,
|
||||
const struct pnfs_layout_range *range,
|
||||
struct list_head *head,
|
||||
unsigned int maxnum)
|
||||
{
|
||||
struct nfs4_flexfile_layout *flo = FF_LAYOUT_FROM_HDR(lo);
|
||||
struct inode *inode = lo->plh_inode;
|
||||
struct nfs4_ff_layout_ds_err *err, *n;
|
||||
unsigned int ret = 0;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
list_for_each_entry_safe(err, n, &flo->error_list, list) {
|
||||
if (!pnfs_is_range_intersecting(err->offset,
|
||||
pnfs_end_offset(err->offset, err->length),
|
||||
range->offset,
|
||||
pnfs_end_offset(range->offset, range->length)))
|
||||
continue;
|
||||
if (!maxnum)
|
||||
break;
|
||||
list_move(&err->list, head);
|
||||
maxnum--;
|
||||
ret++;
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
unsigned int ff_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo,
|
||||
const struct pnfs_layout_range *range,
|
||||
struct list_head *head,
|
||||
unsigned int maxnum)
|
||||
{
|
||||
unsigned int ret;
|
||||
|
||||
ret = do_layout_fetch_ds_ioerr(lo, range, head, maxnum);
|
||||
/* If we're over the max, discard all remaining entries */
|
||||
if (ret == maxnum) {
|
||||
LIST_HEAD(discard);
|
||||
do_layout_fetch_ds_ioerr(lo, range, &discard, -1);
|
||||
ff_layout_free_ds_ioerr(&discard);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool ff_read_layout_has_available_ds(struct pnfs_layout_segment *lseg)
|
||||
{
|
||||
struct nfs4_ff_layout_mirror *mirror;
|
||||
|
@ -516,7 +569,11 @@ static bool ff_read_layout_has_available_ds(struct pnfs_layout_segment *lseg)
|
|||
|
||||
for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) {
|
||||
mirror = FF_LAYOUT_COMP(lseg, idx);
|
||||
if (mirror && mirror->mirror_ds) {
|
||||
if (mirror) {
|
||||
if (!mirror->mirror_ds)
|
||||
return true;
|
||||
if (IS_ERR(mirror->mirror_ds))
|
||||
continue;
|
||||
devid = &mirror->mirror_ds->id_node;
|
||||
if (!ff_layout_test_devid_unavailable(devid))
|
||||
return true;
|
||||
|
@ -534,8 +591,10 @@ static bool ff_rw_layout_has_available_ds(struct pnfs_layout_segment *lseg)
|
|||
|
||||
for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) {
|
||||
mirror = FF_LAYOUT_COMP(lseg, idx);
|
||||
if (!mirror || !mirror->mirror_ds)
|
||||
if (!mirror || IS_ERR(mirror->mirror_ds))
|
||||
return false;
|
||||
if (!mirror->mirror_ds)
|
||||
continue;
|
||||
devid = &mirror->mirror_ds->id_node;
|
||||
if (ff_layout_test_devid_unavailable(devid))
|
||||
return false;
|
||||
|
@ -544,7 +603,7 @@ static bool ff_rw_layout_has_available_ds(struct pnfs_layout_segment *lseg)
|
|||
return FF_LAYOUT_MIRROR_COUNT(lseg) != 0;
|
||||
}
|
||||
|
||||
bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg)
|
||||
static bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg)
|
||||
{
|
||||
if (lseg->pls_range.iomode == IOMODE_READ)
|
||||
return ff_read_layout_has_available_ds(lseg);
|
||||
|
|
|
@ -634,15 +634,28 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(nfs_setattr_update_inode);
|
||||
|
||||
static void nfs_request_parent_use_readdirplus(struct dentry *dentry)
|
||||
static void nfs_readdirplus_parent_cache_miss(struct dentry *dentry)
|
||||
{
|
||||
struct dentry *parent;
|
||||
|
||||
if (!nfs_server_capable(d_inode(dentry), NFS_CAP_READDIRPLUS))
|
||||
return;
|
||||
parent = dget_parent(dentry);
|
||||
nfs_force_use_readdirplus(d_inode(parent));
|
||||
dput(parent);
|
||||
}
|
||||
|
||||
static void nfs_readdirplus_parent_cache_hit(struct dentry *dentry)
|
||||
{
|
||||
struct dentry *parent;
|
||||
|
||||
if (!nfs_server_capable(d_inode(dentry), NFS_CAP_READDIRPLUS))
|
||||
return;
|
||||
parent = dget_parent(dentry);
|
||||
nfs_advise_use_readdirplus(d_inode(parent));
|
||||
dput(parent);
|
||||
}
|
||||
|
||||
static bool nfs_need_revalidate_inode(struct inode *inode)
|
||||
{
|
||||
if (NFS_I(inode)->cache_validity &
|
||||
|
@ -683,10 +696,10 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
|
|||
if (need_atime || nfs_need_revalidate_inode(inode)) {
|
||||
struct nfs_server *server = NFS_SERVER(inode);
|
||||
|
||||
if (server->caps & NFS_CAP_READDIRPLUS)
|
||||
nfs_request_parent_use_readdirplus(dentry);
|
||||
nfs_readdirplus_parent_cache_miss(dentry);
|
||||
err = __nfs_revalidate_inode(server, inode);
|
||||
}
|
||||
} else
|
||||
nfs_readdirplus_parent_cache_hit(dentry);
|
||||
if (!err) {
|
||||
generic_fillattr(inode, stat);
|
||||
stat->ino = nfs_compat_user_ino64(NFS_FILEID(inode));
|
||||
|
@ -702,8 +715,7 @@ EXPORT_SYMBOL_GPL(nfs_getattr);
|
|||
static void nfs_init_lock_context(struct nfs_lock_context *l_ctx)
|
||||
{
|
||||
atomic_set(&l_ctx->count, 1);
|
||||
l_ctx->lockowner.l_owner = current->files;
|
||||
l_ctx->lockowner.l_pid = current->tgid;
|
||||
l_ctx->lockowner = current->files;
|
||||
INIT_LIST_HEAD(&l_ctx->list);
|
||||
atomic_set(&l_ctx->io_count, 0);
|
||||
}
|
||||
|
@ -714,9 +726,7 @@ static struct nfs_lock_context *__nfs_find_lock_context(struct nfs_open_context
|
|||
struct nfs_lock_context *pos = head;
|
||||
|
||||
do {
|
||||
if (pos->lockowner.l_owner != current->files)
|
||||
continue;
|
||||
if (pos->lockowner.l_pid != current->tgid)
|
||||
if (pos->lockowner != current->files)
|
||||
continue;
|
||||
atomic_inc(&pos->count);
|
||||
return pos;
|
||||
|
@ -799,7 +809,9 @@ void nfs_close_context(struct nfs_open_context *ctx, int is_sync)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(nfs_close_context);
|
||||
|
||||
struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode)
|
||||
struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry,
|
||||
fmode_t f_mode,
|
||||
struct file *filp)
|
||||
{
|
||||
struct nfs_open_context *ctx;
|
||||
struct rpc_cred *cred = rpc_lookup_cred();
|
||||
|
@ -818,6 +830,7 @@ struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f
|
|||
ctx->mode = f_mode;
|
||||
ctx->flags = 0;
|
||||
ctx->error = 0;
|
||||
ctx->flock_owner = (fl_owner_t)filp;
|
||||
nfs_init_lock_context(&ctx->lock_context);
|
||||
ctx->lock_context.open_context = ctx;
|
||||
INIT_LIST_HEAD(&ctx->list);
|
||||
|
@ -942,7 +955,7 @@ int nfs_open(struct inode *inode, struct file *filp)
|
|||
{
|
||||
struct nfs_open_context *ctx;
|
||||
|
||||
ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode);
|
||||
ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode, filp);
|
||||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
nfs_file_set_open_context(filp, ctx);
|
||||
|
@ -1099,11 +1112,17 @@ static int nfs_invalidate_mapping(struct inode *inode, struct address_space *map
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool nfs_mapping_need_revalidate_inode(struct inode *inode)
|
||||
bool nfs_mapping_need_revalidate_inode(struct inode *inode)
|
||||
{
|
||||
if (nfs_have_delegated_attributes(inode))
|
||||
return false;
|
||||
return (NFS_I(inode)->cache_validity & NFS_INO_REVAL_PAGECACHE)
|
||||
unsigned long cache_validity = NFS_I(inode)->cache_validity;
|
||||
|
||||
if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) {
|
||||
const unsigned long force_reval =
|
||||
NFS_INO_REVAL_PAGECACHE|NFS_INO_REVAL_FORCED;
|
||||
return (cache_validity & force_reval) == force_reval;
|
||||
}
|
||||
|
||||
return (cache_validity & NFS_INO_REVAL_PAGECACHE)
|
||||
|| nfs_attribute_timeout(inode)
|
||||
|| NFS_STALE(inode);
|
||||
}
|
||||
|
@ -1317,7 +1336,7 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
|
|||
invalid |= NFS_INO_INVALID_ATIME;
|
||||
|
||||
if (invalid != 0)
|
||||
nfs_set_cache_invalid(inode, invalid);
|
||||
nfs_set_cache_invalid(inode, invalid | NFS_INO_REVAL_FORCED);
|
||||
|
||||
nfsi->read_cache_jiffies = fattr->time_start;
|
||||
return 0;
|
||||
|
|
|
@ -154,8 +154,7 @@ extern const struct rpc_program nfs_program;
|
|||
extern void nfs_clients_init(struct net *net);
|
||||
extern struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *);
|
||||
int nfs_create_rpc_client(struct nfs_client *, const struct nfs_client_initdata *, rpc_authflavor_t);
|
||||
struct nfs_client *nfs_get_client(const struct nfs_client_initdata *,
|
||||
rpc_authflavor_t);
|
||||
struct nfs_client *nfs_get_client(const struct nfs_client_initdata *);
|
||||
int nfs_probe_fsinfo(struct nfs_server *server, struct nfs_fh *, struct nfs_fattr *);
|
||||
void nfs_server_insert_lists(struct nfs_server *);
|
||||
void nfs_server_remove_lists(struct nfs_server *);
|
||||
|
@ -194,14 +193,13 @@ extern struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
|
|||
int ds_addrlen, int ds_proto,
|
||||
unsigned int ds_timeo,
|
||||
unsigned int ds_retrans,
|
||||
u32 minor_version,
|
||||
rpc_authflavor_t au_flavor);
|
||||
u32 minor_version);
|
||||
extern struct rpc_clnt *nfs4_find_or_create_ds_client(struct nfs_client *,
|
||||
struct inode *);
|
||||
extern struct nfs_client *nfs3_set_ds_client(struct nfs_server *mds_srv,
|
||||
const struct sockaddr *ds_addr, int ds_addrlen,
|
||||
int ds_proto, unsigned int ds_timeo,
|
||||
unsigned int ds_retrans, rpc_authflavor_t au_flavor);
|
||||
unsigned int ds_retrans);
|
||||
#ifdef CONFIG_PROC_FS
|
||||
extern int __init nfs_fs_proc_init(void);
|
||||
extern void nfs_fs_proc_exit(void);
|
||||
|
@ -346,6 +344,7 @@ extern struct nfs_client *nfs_init_client(struct nfs_client *clp,
|
|||
const struct nfs_client_initdata *);
|
||||
|
||||
/* dir.c */
|
||||
extern void nfs_advise_use_readdirplus(struct inode *dir);
|
||||
extern void nfs_force_use_readdirplus(struct inode *dir);
|
||||
extern unsigned long nfs_access_cache_count(struct shrinker *shrink,
|
||||
struct shrink_control *sc);
|
||||
|
|
|
@ -78,8 +78,7 @@ struct nfs_server *nfs3_clone_server(struct nfs_server *source,
|
|||
*/
|
||||
struct nfs_client *nfs3_set_ds_client(struct nfs_server *mds_srv,
|
||||
const struct sockaddr *ds_addr, int ds_addrlen,
|
||||
int ds_proto, unsigned int ds_timeo, unsigned int ds_retrans,
|
||||
rpc_authflavor_t au_flavor)
|
||||
int ds_proto, unsigned int ds_timeo, unsigned int ds_retrans)
|
||||
{
|
||||
struct rpc_timeout ds_timeout;
|
||||
struct nfs_client *mds_clp = mds_srv->nfs_client;
|
||||
|
@ -106,7 +105,7 @@ struct nfs_client *nfs3_set_ds_client(struct nfs_server *mds_srv,
|
|||
|
||||
/* Use the MDS nfs_client cl_ipaddr. */
|
||||
nfs_init_timeout_values(&ds_timeout, ds_proto, ds_timeo, ds_retrans);
|
||||
clp = nfs_get_client(&cl_init, au_flavor);
|
||||
clp = nfs_get_client(&cl_init);
|
||||
|
||||
return clp;
|
||||
}
|
||||
|
|
|
@ -397,10 +397,13 @@ static void
|
|||
nfs42_layoutstat_release(void *calldata)
|
||||
{
|
||||
struct nfs42_layoutstat_data *data = calldata;
|
||||
struct nfs_server *nfss = NFS_SERVER(data->args.inode);
|
||||
struct nfs42_layoutstat_devinfo *devinfo = data->args.devinfo;
|
||||
int i;
|
||||
|
||||
if (nfss->pnfs_curr_ld->cleanup_layoutstats)
|
||||
nfss->pnfs_curr_ld->cleanup_layoutstats(data);
|
||||
for (i = 0; i < data->args.num_dev; i++) {
|
||||
if (devinfo[i].ld_private.ops && devinfo[i].ld_private.ops->free)
|
||||
devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
|
||||
}
|
||||
|
||||
pnfs_put_layout_hdr(NFS_I(data->args.inode)->layout);
|
||||
smp_mb__before_atomic();
|
||||
|
|
|
@ -181,8 +181,9 @@ static void encode_layoutstats(struct xdr_stream *xdr,
|
|||
NFS4_DEVICEID4_SIZE);
|
||||
/* Encode layoutupdate4 */
|
||||
*p++ = cpu_to_be32(devinfo->layout_type);
|
||||
if (devinfo->layoutstats_encode != NULL)
|
||||
devinfo->layoutstats_encode(xdr, args, devinfo);
|
||||
if (devinfo->ld_private.ops)
|
||||
devinfo->ld_private.ops->encode(xdr, args,
|
||||
&devinfo->ld_private);
|
||||
else
|
||||
encode_uint32(xdr, 0);
|
||||
}
|
||||
|
|
|
@ -457,7 +457,7 @@ extern void nfs41_handle_server_scope(struct nfs_client *,
|
|||
extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
|
||||
extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
|
||||
extern int nfs4_select_rw_stateid(struct nfs4_state *, fmode_t,
|
||||
const struct nfs_lockowner *, nfs4_stateid *,
|
||||
const struct nfs_lock_context *, nfs4_stateid *,
|
||||
struct rpc_cred **);
|
||||
|
||||
extern struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask);
|
||||
|
|
|
@ -464,6 +464,11 @@ static bool nfs4_match_client_owner_id(const struct nfs_client *clp1,
|
|||
return strcmp(clp1->cl_owner_id, clp2->cl_owner_id) == 0;
|
||||
}
|
||||
|
||||
static bool nfs4_same_verifier(nfs4_verifier *v1, nfs4_verifier *v2)
|
||||
{
|
||||
return memcmp(v1->data, v2->data, sizeof(v1->data)) == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* nfs40_walk_client_list - Find server that recognizes a client ID
|
||||
*
|
||||
|
@ -521,7 +526,21 @@ int nfs40_walk_client_list(struct nfs_client *new,
|
|||
|
||||
if (!nfs4_match_client_owner_id(pos, new))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* We just sent a new SETCLIENTID, which should have
|
||||
* caused the server to return a new cl_confirm. So if
|
||||
* cl_confirm is the same, then this is a different
|
||||
* server that just returned the same cl_confirm by
|
||||
* coincidence:
|
||||
*/
|
||||
if ((new != pos) && nfs4_same_verifier(&pos->cl_confirm,
|
||||
&new->cl_confirm))
|
||||
continue;
|
||||
/*
|
||||
* But if the cl_confirm's are different, then the only
|
||||
* way that a SETCLIENTID_CONFIRM to pos can succeed is
|
||||
* if new and pos point to the same server:
|
||||
*/
|
||||
atomic_inc(&pos->cl_count);
|
||||
spin_unlock(&nn->nfs_client_lock);
|
||||
|
||||
|
@ -534,6 +553,7 @@ int nfs40_walk_client_list(struct nfs_client *new,
|
|||
break;
|
||||
case 0:
|
||||
nfs4_swap_callback_idents(pos, new);
|
||||
pos->cl_confirm = new->cl_confirm;
|
||||
|
||||
prev = NULL;
|
||||
*result = pos;
|
||||
|
@ -881,7 +901,6 @@ static int nfs4_set_client(struct nfs_server *server,
|
|||
const struct sockaddr *addr,
|
||||
const size_t addrlen,
|
||||
const char *ip_addr,
|
||||
rpc_authflavor_t authflavour,
|
||||
int proto, const struct rpc_timeout *timeparms,
|
||||
u32 minorversion, struct net *net)
|
||||
{
|
||||
|
@ -907,7 +926,7 @@ static int nfs4_set_client(struct nfs_server *server,
|
|||
set_bit(NFS_CS_MIGRATION, &cl_init.init_flags);
|
||||
|
||||
/* Allocate or find a client reference we can use */
|
||||
clp = nfs_get_client(&cl_init, authflavour);
|
||||
clp = nfs_get_client(&cl_init);
|
||||
if (IS_ERR(clp)) {
|
||||
error = PTR_ERR(clp);
|
||||
goto error;
|
||||
|
@ -948,7 +967,7 @@ error:
|
|||
struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
|
||||
const struct sockaddr *ds_addr, int ds_addrlen,
|
||||
int ds_proto, unsigned int ds_timeo, unsigned int ds_retrans,
|
||||
u32 minor_version, rpc_authflavor_t au_flavor)
|
||||
u32 minor_version)
|
||||
{
|
||||
struct rpc_timeout ds_timeout;
|
||||
struct nfs_client *mds_clp = mds_srv->nfs_client;
|
||||
|
@ -979,7 +998,7 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
|
|||
* (section 13.1 RFC 5661).
|
||||
*/
|
||||
nfs_init_timeout_values(&ds_timeout, ds_proto, ds_timeo, ds_retrans);
|
||||
clp = nfs_get_client(&cl_init, au_flavor);
|
||||
clp = nfs_get_client(&cl_init);
|
||||
|
||||
dprintk("<-- %s %p\n", __func__, clp);
|
||||
return clp;
|
||||
|
@ -1103,7 +1122,6 @@ static int nfs4_init_server(struct nfs_server *server,
|
|||
(const struct sockaddr *)&data->nfs_server.address,
|
||||
data->nfs_server.addrlen,
|
||||
data->client_address,
|
||||
data->selected_flavor,
|
||||
data->nfs_server.protocol,
|
||||
&timeparms,
|
||||
data->minorversion,
|
||||
|
@ -1200,7 +1218,6 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
|
|||
data->addr,
|
||||
data->addrlen,
|
||||
parent_client->cl_ipaddr,
|
||||
data->authflavor,
|
||||
rpc_protocol(parent_server->client),
|
||||
parent_server->client->cl_timeout,
|
||||
parent_client->cl_mvops->minor_version,
|
||||
|
@ -1311,7 +1328,6 @@ int nfs4_update_server(struct nfs_server *server, const char *hostname,
|
|||
|
||||
nfs_server_remove_lists(server);
|
||||
error = nfs4_set_client(server, hostname, sap, salen, buf,
|
||||
clp->cl_rpcclient->cl_auth->au_flavor,
|
||||
clp->cl_proto, clnt->cl_timeout,
|
||||
clp->cl_minorversion, net);
|
||||
nfs_put_client(clp);
|
||||
|
|
|
@ -57,7 +57,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
|
|||
parent = dget_parent(dentry);
|
||||
dir = d_inode(parent);
|
||||
|
||||
ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode);
|
||||
ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode, filp);
|
||||
err = PTR_ERR(ctx);
|
||||
if (IS_ERR(ctx))
|
||||
goto out;
|
||||
|
|
|
@ -94,7 +94,7 @@ static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fa
|
|||
static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr, struct nfs4_label *label);
|
||||
static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
|
||||
struct nfs_fattr *fattr, struct iattr *sattr,
|
||||
struct nfs4_state *state, struct nfs4_label *ilabel,
|
||||
struct nfs_open_context *ctx, struct nfs4_label *ilabel,
|
||||
struct nfs4_label *olabel);
|
||||
#ifdef CONFIG_NFS_V4_1
|
||||
static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
|
||||
|
@ -226,7 +226,6 @@ static const u32 nfs4_pnfs_open_bitmap[3] = {
|
|||
|
||||
static const u32 nfs4_open_noattr_bitmap[3] = {
|
||||
FATTR4_WORD0_TYPE
|
||||
| FATTR4_WORD0_CHANGE
|
||||
| FATTR4_WORD0_FILEID,
|
||||
};
|
||||
|
||||
|
@ -817,6 +816,10 @@ static int nfs41_sequence_process(struct rpc_task *task,
|
|||
case -NFS4ERR_SEQ_FALSE_RETRY:
|
||||
++slot->seq_nr;
|
||||
goto retry_nowait;
|
||||
case -NFS4ERR_DEADSESSION:
|
||||
case -NFS4ERR_BADSESSION:
|
||||
nfs4_schedule_session_recovery(session, res->sr_status);
|
||||
goto retry_nowait;
|
||||
default:
|
||||
/* Just update the slot sequence no. */
|
||||
slot->seq_done = 1;
|
||||
|
@ -1221,6 +1224,8 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
|
|||
atomic_inc(&sp->so_count);
|
||||
p->o_arg.open_flags = flags;
|
||||
p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
|
||||
p->o_arg.umask = current_umask();
|
||||
p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
|
||||
p->o_arg.share_access = nfs4_map_atomic_open_share(server,
|
||||
fmode, flags);
|
||||
/* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS
|
||||
|
@ -1228,8 +1233,16 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
|
|||
if (!(flags & O_EXCL)) {
|
||||
/* ask server to check for all possible rights as results
|
||||
* are cached */
|
||||
p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY |
|
||||
NFS4_ACCESS_EXTEND | NFS4_ACCESS_EXECUTE;
|
||||
switch (p->o_arg.claim) {
|
||||
default:
|
||||
break;
|
||||
case NFS4_OPEN_CLAIM_NULL:
|
||||
case NFS4_OPEN_CLAIM_FH:
|
||||
p->o_arg.access = NFS4_ACCESS_READ |
|
||||
NFS4_ACCESS_MODIFY |
|
||||
NFS4_ACCESS_EXTEND |
|
||||
NFS4_ACCESS_EXECUTE;
|
||||
}
|
||||
}
|
||||
p->o_arg.clientid = server->nfs_client->cl_clientid;
|
||||
p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
|
||||
|
@ -1239,7 +1252,6 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
|
|||
p->o_arg.bitmask = nfs4_bitmask(server, label);
|
||||
p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
|
||||
p->o_arg.label = nfs4_label_copy(p->a_label, label);
|
||||
p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
|
||||
switch (p->o_arg.claim) {
|
||||
case NFS4_OPEN_CLAIM_NULL:
|
||||
case NFS4_OPEN_CLAIM_DELEGATE_CUR:
|
||||
|
@ -2819,7 +2831,7 @@ static int _nfs4_do_open(struct inode *dir,
|
|||
nfs_fattr_init(opendata->o_res.f_attr);
|
||||
status = nfs4_do_setattr(state->inode, cred,
|
||||
opendata->o_res.f_attr, sattr,
|
||||
state, label, olabel);
|
||||
ctx, label, olabel);
|
||||
if (status == 0) {
|
||||
nfs_setattr_update_inode(state->inode, sattr,
|
||||
opendata->o_res.f_attr);
|
||||
|
@ -2914,7 +2926,7 @@ static int _nfs4_do_setattr(struct inode *inode,
|
|||
struct nfs_setattrargs *arg,
|
||||
struct nfs_setattrres *res,
|
||||
struct rpc_cred *cred,
|
||||
struct nfs4_state *state)
|
||||
struct nfs_open_context *ctx)
|
||||
{
|
||||
struct nfs_server *server = NFS_SERVER(inode);
|
||||
struct rpc_message msg = {
|
||||
|
@ -2937,15 +2949,17 @@ static int _nfs4_do_setattr(struct inode *inode,
|
|||
|
||||
if (nfs4_copy_delegation_stateid(inode, fmode, &arg->stateid, &delegation_cred)) {
|
||||
/* Use that stateid */
|
||||
} else if (truncate && state != NULL) {
|
||||
struct nfs_lockowner lockowner = {
|
||||
.l_owner = current->files,
|
||||
.l_pid = current->tgid,
|
||||
};
|
||||
if (!nfs4_valid_open_stateid(state))
|
||||
} else if (truncate && ctx != NULL) {
|
||||
struct nfs_lock_context *l_ctx;
|
||||
if (!nfs4_valid_open_stateid(ctx->state))
|
||||
return -EBADF;
|
||||
if (nfs4_select_rw_stateid(state, FMODE_WRITE, &lockowner,
|
||||
&arg->stateid, &delegation_cred) == -EIO)
|
||||
l_ctx = nfs_get_lock_context(ctx);
|
||||
if (IS_ERR(l_ctx))
|
||||
return PTR_ERR(l_ctx);
|
||||
status = nfs4_select_rw_stateid(ctx->state, FMODE_WRITE, l_ctx,
|
||||
&arg->stateid, &delegation_cred);
|
||||
nfs_put_lock_context(l_ctx);
|
||||
if (status == -EIO)
|
||||
return -EBADF;
|
||||
} else
|
||||
nfs4_stateid_copy(&arg->stateid, &zero_stateid);
|
||||
|
@ -2955,7 +2969,7 @@ static int _nfs4_do_setattr(struct inode *inode,
|
|||
status = nfs4_call_sync(server->client, server, &msg, &arg->seq_args, &res->seq_res, 1);
|
||||
|
||||
put_rpccred(delegation_cred);
|
||||
if (status == 0 && state != NULL)
|
||||
if (status == 0 && ctx != NULL)
|
||||
renew_lease(server, timestamp);
|
||||
trace_nfs4_setattr(inode, &arg->stateid, status);
|
||||
return status;
|
||||
|
@ -2963,10 +2977,11 @@ static int _nfs4_do_setattr(struct inode *inode,
|
|||
|
||||
static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
|
||||
struct nfs_fattr *fattr, struct iattr *sattr,
|
||||
struct nfs4_state *state, struct nfs4_label *ilabel,
|
||||
struct nfs_open_context *ctx, struct nfs4_label *ilabel,
|
||||
struct nfs4_label *olabel)
|
||||
{
|
||||
struct nfs_server *server = NFS_SERVER(inode);
|
||||
struct nfs4_state *state = ctx ? ctx->state : NULL;
|
||||
struct nfs_setattrargs arg = {
|
||||
.fh = NFS_FH(inode),
|
||||
.iap = sattr,
|
||||
|
@ -2991,7 +3006,7 @@ static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
|
|||
arg.bitmask = nfs4_bitmask(server, olabel);
|
||||
|
||||
do {
|
||||
err = _nfs4_do_setattr(inode, &arg, &res, cred, state);
|
||||
err = _nfs4_do_setattr(inode, &arg, &res, cred, ctx);
|
||||
switch (err) {
|
||||
case -NFS4ERR_OPENMODE:
|
||||
if (!(sattr->ia_valid & ATTR_SIZE)) {
|
||||
|
@ -3028,10 +3043,15 @@ struct nfs4_closedata {
|
|||
struct nfs4_state *state;
|
||||
struct nfs_closeargs arg;
|
||||
struct nfs_closeres res;
|
||||
struct {
|
||||
struct nfs4_layoutreturn_args arg;
|
||||
struct nfs4_layoutreturn_res res;
|
||||
struct nfs4_xdr_opaque_data ld_private;
|
||||
u32 roc_barrier;
|
||||
bool roc;
|
||||
} lr;
|
||||
struct nfs_fattr fattr;
|
||||
unsigned long timestamp;
|
||||
bool roc;
|
||||
u32 roc_barrier;
|
||||
};
|
||||
|
||||
static void nfs4_free_closedata(void *data)
|
||||
|
@ -3040,8 +3060,9 @@ static void nfs4_free_closedata(void *data)
|
|||
struct nfs4_state_owner *sp = calldata->state->owner;
|
||||
struct super_block *sb = calldata->state->inode->i_sb;
|
||||
|
||||
if (calldata->roc)
|
||||
pnfs_roc_release(calldata->state->inode);
|
||||
if (calldata->lr.roc)
|
||||
pnfs_roc_release(&calldata->lr.arg, &calldata->lr.res,
|
||||
calldata->res.lr_ret);
|
||||
nfs4_put_open_state(calldata->state);
|
||||
nfs_free_seqid(calldata->arg.seqid);
|
||||
nfs4_put_state_owner(sp);
|
||||
|
@ -3060,15 +3081,38 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
|
|||
if (!nfs4_sequence_done(task, &calldata->res.seq_res))
|
||||
return;
|
||||
trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
|
||||
|
||||
/* Handle Layoutreturn errors */
|
||||
if (calldata->arg.lr_args && task->tk_status != 0) {
|
||||
switch (calldata->res.lr_ret) {
|
||||
default:
|
||||
calldata->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
|
||||
break;
|
||||
case 0:
|
||||
calldata->arg.lr_args = NULL;
|
||||
calldata->res.lr_res = NULL;
|
||||
break;
|
||||
case -NFS4ERR_ADMIN_REVOKED:
|
||||
case -NFS4ERR_DELEG_REVOKED:
|
||||
case -NFS4ERR_EXPIRED:
|
||||
case -NFS4ERR_BAD_STATEID:
|
||||
case -NFS4ERR_OLD_STATEID:
|
||||
case -NFS4ERR_UNKNOWN_LAYOUTTYPE:
|
||||
case -NFS4ERR_WRONG_CRED:
|
||||
calldata->arg.lr_args = NULL;
|
||||
calldata->res.lr_res = NULL;
|
||||
calldata->res.lr_ret = 0;
|
||||
rpc_restart_call_prepare(task);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* hmm. we are done with the inode, and in the process of freeing
|
||||
* the state_owner. we keep this around to process errors
|
||||
*/
|
||||
switch (task->tk_status) {
|
||||
case 0:
|
||||
res_stateid = &calldata->res.stateid;
|
||||
if (calldata->roc)
|
||||
pnfs_roc_set_barrier(state->inode,
|
||||
calldata->roc_barrier);
|
||||
renew_lease(server, calldata->timestamp);
|
||||
break;
|
||||
case -NFS4ERR_ADMIN_REVOKED:
|
||||
|
@ -3144,15 +3188,20 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
|
|||
goto out_no_action;
|
||||
}
|
||||
|
||||
if (nfs4_wait_on_layoutreturn(inode, task)) {
|
||||
if (!calldata->lr.roc && nfs4_wait_on_layoutreturn(inode, task)) {
|
||||
nfs_release_seqid(calldata->arg.seqid);
|
||||
goto out_wait;
|
||||
}
|
||||
|
||||
if (calldata->arg.fmode == 0)
|
||||
if (calldata->arg.fmode == 0) {
|
||||
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
|
||||
if (calldata->roc)
|
||||
pnfs_roc_get_barrier(inode, &calldata->roc_barrier);
|
||||
|
||||
/* Close-to-open cache consistency revalidation */
|
||||
if (!nfs4_have_delegation(inode, FMODE_READ))
|
||||
calldata->arg.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
|
||||
else
|
||||
calldata->arg.bitmask = NULL;
|
||||
}
|
||||
|
||||
calldata->arg.share_access =
|
||||
nfs4_map_atomic_open_share(NFS_SERVER(inode),
|
||||
|
@ -3179,13 +3228,6 @@ static const struct rpc_call_ops nfs4_close_ops = {
|
|||
.rpc_release = nfs4_free_closedata,
|
||||
};
|
||||
|
||||
static bool nfs4_roc(struct inode *inode)
|
||||
{
|
||||
if (!nfs_have_layout(inode))
|
||||
return false;
|
||||
return pnfs_roc(inode);
|
||||
}
|
||||
|
||||
/*
|
||||
* It is possible for data to be read/written from a mem-mapped file
|
||||
* after the sys_close call (which hits the vfs layer as a flush).
|
||||
|
@ -3233,11 +3275,17 @@ int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
|
|||
if (IS_ERR(calldata->arg.seqid))
|
||||
goto out_free_calldata;
|
||||
calldata->arg.fmode = 0;
|
||||
calldata->arg.bitmask = server->cache_consistency_bitmask;
|
||||
calldata->lr.arg.ld_private = &calldata->lr.ld_private;
|
||||
calldata->res.fattr = &calldata->fattr;
|
||||
calldata->res.seqid = calldata->arg.seqid;
|
||||
calldata->res.server = server;
|
||||
calldata->roc = nfs4_roc(state->inode);
|
||||
calldata->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
|
||||
calldata->lr.roc = pnfs_roc(state->inode,
|
||||
&calldata->lr.arg, &calldata->lr.res, msg.rpc_cred);
|
||||
if (calldata->lr.roc) {
|
||||
calldata->arg.lr_args = &calldata->lr.arg;
|
||||
calldata->res.lr_res = &calldata->lr.res;
|
||||
}
|
||||
nfs_sb_active(calldata->inode->i_sb);
|
||||
|
||||
msg.rpc_argp = &calldata->arg;
|
||||
|
@ -3290,7 +3338,7 @@ static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
|
|||
|
||||
#define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL)
|
||||
#define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL)
|
||||
#define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_SECURITY_LABEL - 1UL)
|
||||
#define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_MODE_UMASK - 1UL)
|
||||
|
||||
static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
|
||||
{
|
||||
|
@ -3687,7 +3735,7 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
|
|||
{
|
||||
struct inode *inode = d_inode(dentry);
|
||||
struct rpc_cred *cred = NULL;
|
||||
struct nfs4_state *state = NULL;
|
||||
struct nfs_open_context *ctx = NULL;
|
||||
struct nfs4_label *label = NULL;
|
||||
int status;
|
||||
|
||||
|
@ -3708,20 +3756,17 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
|
|||
|
||||
/* Search for an existing open(O_WRITE) file */
|
||||
if (sattr->ia_valid & ATTR_FILE) {
|
||||
struct nfs_open_context *ctx;
|
||||
|
||||
ctx = nfs_file_open_context(sattr->ia_file);
|
||||
if (ctx) {
|
||||
if (ctx)
|
||||
cred = ctx->cred;
|
||||
state = ctx->state;
|
||||
}
|
||||
}
|
||||
|
||||
label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
|
||||
if (IS_ERR(label))
|
||||
return PTR_ERR(label);
|
||||
|
||||
status = nfs4_do_setattr(inode, cred, fattr, sattr, state, NULL, label);
|
||||
status = nfs4_do_setattr(inode, cred, fattr, sattr, ctx, NULL, label);
|
||||
if (status == 0) {
|
||||
nfs_setattr_update_inode(inode, sattr, fattr);
|
||||
nfs_setsecurity(inode, fattr, label);
|
||||
|
@ -3966,18 +4011,20 @@ static int
|
|||
nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
|
||||
int flags)
|
||||
{
|
||||
struct nfs_server *server = NFS_SERVER(dir);
|
||||
struct nfs4_label l, *ilabel = NULL;
|
||||
struct nfs_open_context *ctx;
|
||||
struct nfs4_state *state;
|
||||
int status = 0;
|
||||
|
||||
ctx = alloc_nfs_open_context(dentry, FMODE_READ);
|
||||
ctx = alloc_nfs_open_context(dentry, FMODE_READ, NULL);
|
||||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
ilabel = nfs4_label_init_security(dir, dentry, sattr, &l);
|
||||
|
||||
sattr->ia_mode &= ~current_umask();
|
||||
if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
|
||||
sattr->ia_mode &= ~current_umask();
|
||||
state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL);
|
||||
if (IS_ERR(state)) {
|
||||
status = PTR_ERR(state);
|
||||
|
@ -4185,6 +4232,7 @@ static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
|
|||
data->arg.attrs = sattr;
|
||||
data->arg.ftype = ftype;
|
||||
data->arg.bitmask = nfs4_bitmask(server, data->label);
|
||||
data->arg.umask = current_umask();
|
||||
data->res.server = server;
|
||||
data->res.fh = &data->fh;
|
||||
data->res.fattr = &data->fattr;
|
||||
|
@ -4282,13 +4330,15 @@ out:
|
|||
static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
|
||||
struct iattr *sattr)
|
||||
{
|
||||
struct nfs_server *server = NFS_SERVER(dir);
|
||||
struct nfs4_exception exception = { };
|
||||
struct nfs4_label l, *label = NULL;
|
||||
int err;
|
||||
|
||||
label = nfs4_label_init_security(dir, dentry, sattr, &l);
|
||||
|
||||
sattr->ia_mode &= ~current_umask();
|
||||
if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
|
||||
sattr->ia_mode &= ~current_umask();
|
||||
do {
|
||||
err = _nfs4_proc_mkdir(dir, dentry, sattr, label);
|
||||
trace_nfs4_mkdir(dir, &dentry->d_name, err);
|
||||
|
@ -4391,13 +4441,15 @@ out:
|
|||
static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
|
||||
struct iattr *sattr, dev_t rdev)
|
||||
{
|
||||
struct nfs_server *server = NFS_SERVER(dir);
|
||||
struct nfs4_exception exception = { };
|
||||
struct nfs4_label l, *label = NULL;
|
||||
int err;
|
||||
|
||||
label = nfs4_label_init_security(dir, dentry, sattr, &l);
|
||||
|
||||
sattr->ia_mode &= ~current_umask();
|
||||
if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
|
||||
sattr->ia_mode &= ~current_umask();
|
||||
do {
|
||||
err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev);
|
||||
trace_nfs4_mknod(dir, &dentry->d_name, err);
|
||||
|
@ -4541,11 +4593,7 @@ int nfs4_set_rw_stateid(nfs4_stateid *stateid,
|
|||
const struct nfs_lock_context *l_ctx,
|
||||
fmode_t fmode)
|
||||
{
|
||||
const struct nfs_lockowner *lockowner = NULL;
|
||||
|
||||
if (l_ctx != NULL)
|
||||
lockowner = &l_ctx->lockowner;
|
||||
return nfs4_select_rw_stateid(ctx->state, fmode, lockowner, stateid, NULL);
|
||||
return nfs4_select_rw_stateid(ctx->state, fmode, l_ctx, stateid, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid);
|
||||
|
||||
|
@ -5564,11 +5612,16 @@ struct nfs4_delegreturndata {
|
|||
struct nfs_fh fh;
|
||||
nfs4_stateid stateid;
|
||||
unsigned long timestamp;
|
||||
struct {
|
||||
struct nfs4_layoutreturn_args arg;
|
||||
struct nfs4_layoutreturn_res res;
|
||||
struct nfs4_xdr_opaque_data ld_private;
|
||||
u32 roc_barrier;
|
||||
bool roc;
|
||||
} lr;
|
||||
struct nfs_fattr fattr;
|
||||
int rpc_status;
|
||||
struct inode *inode;
|
||||
bool roc;
|
||||
u32 roc_barrier;
|
||||
};
|
||||
|
||||
static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
|
||||
|
@ -5579,6 +5632,32 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
|
|||
return;
|
||||
|
||||
trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
|
||||
|
||||
/* Handle Layoutreturn errors */
|
||||
if (data->args.lr_args && task->tk_status != 0) {
|
||||
switch(data->res.lr_ret) {
|
||||
default:
|
||||
data->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
|
||||
break;
|
||||
case 0:
|
||||
data->args.lr_args = NULL;
|
||||
data->res.lr_res = NULL;
|
||||
break;
|
||||
case -NFS4ERR_ADMIN_REVOKED:
|
||||
case -NFS4ERR_DELEG_REVOKED:
|
||||
case -NFS4ERR_EXPIRED:
|
||||
case -NFS4ERR_BAD_STATEID:
|
||||
case -NFS4ERR_OLD_STATEID:
|
||||
case -NFS4ERR_UNKNOWN_LAYOUTTYPE:
|
||||
case -NFS4ERR_WRONG_CRED:
|
||||
data->args.lr_args = NULL;
|
||||
data->res.lr_res = NULL;
|
||||
data->res.lr_ret = 0;
|
||||
rpc_restart_call_prepare(task);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
switch (task->tk_status) {
|
||||
case 0:
|
||||
renew_lease(data->res.server, data->timestamp);
|
||||
|
@ -5602,8 +5681,6 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
|
|||
}
|
||||
}
|
||||
data->rpc_status = task->tk_status;
|
||||
if (data->roc && data->rpc_status == 0)
|
||||
pnfs_roc_set_barrier(data->inode, data->roc_barrier);
|
||||
}
|
||||
|
||||
static void nfs4_delegreturn_release(void *calldata)
|
||||
|
@ -5612,8 +5689,9 @@ static void nfs4_delegreturn_release(void *calldata)
|
|||
struct inode *inode = data->inode;
|
||||
|
||||
if (inode) {
|
||||
if (data->roc)
|
||||
pnfs_roc_release(inode);
|
||||
if (data->lr.roc)
|
||||
pnfs_roc_release(&data->lr.arg, &data->lr.res,
|
||||
data->res.lr_ret);
|
||||
nfs_iput_and_deactive(inode);
|
||||
}
|
||||
kfree(calldata);
|
||||
|
@ -5625,12 +5703,9 @@ static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
|
|||
|
||||
d_data = (struct nfs4_delegreturndata *)data;
|
||||
|
||||
if (nfs4_wait_on_layoutreturn(d_data->inode, task))
|
||||
if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task))
|
||||
return;
|
||||
|
||||
if (d_data->roc)
|
||||
pnfs_roc_get_barrier(d_data->inode, &d_data->roc_barrier);
|
||||
|
||||
nfs4_setup_sequence(d_data->res.server,
|
||||
&d_data->args.seq_args,
|
||||
&d_data->res.seq_res,
|
||||
|
@ -5676,12 +5751,22 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co
|
|||
nfs4_stateid_copy(&data->stateid, stateid);
|
||||
data->res.fattr = &data->fattr;
|
||||
data->res.server = server;
|
||||
data->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
|
||||
data->lr.arg.ld_private = &data->lr.ld_private;
|
||||
nfs_fattr_init(data->res.fattr);
|
||||
data->timestamp = jiffies;
|
||||
data->rpc_status = 0;
|
||||
data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res, cred);
|
||||
data->inode = nfs_igrab_and_active(inode);
|
||||
if (data->inode)
|
||||
data->roc = nfs4_roc(inode);
|
||||
if (data->inode) {
|
||||
if (data->lr.roc) {
|
||||
data->args.lr_args = &data->lr.arg;
|
||||
data->res.lr_res = &data->lr.res;
|
||||
}
|
||||
} else if (data->lr.roc) {
|
||||
pnfs_roc_release(&data->lr.arg, &data->lr.res, 0);
|
||||
data->lr.roc = false;
|
||||
}
|
||||
|
||||
task_setup_data.callback_data = data;
|
||||
msg.rpc_argp = &data->args;
|
||||
|
@ -8559,21 +8644,13 @@ static void nfs4_layoutreturn_release(void *calldata)
|
|||
{
|
||||
struct nfs4_layoutreturn *lrp = calldata;
|
||||
struct pnfs_layout_hdr *lo = lrp->args.layout;
|
||||
LIST_HEAD(freeme);
|
||||
|
||||
dprintk("--> %s\n", __func__);
|
||||
spin_lock(&lo->plh_inode->i_lock);
|
||||
if (lrp->res.lrs_present) {
|
||||
pnfs_mark_matching_lsegs_invalid(lo, &freeme,
|
||||
&lrp->args.range,
|
||||
be32_to_cpu(lrp->args.stateid.seqid));
|
||||
pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
|
||||
} else
|
||||
pnfs_mark_layout_stateid_invalid(lo, &freeme);
|
||||
pnfs_clear_layoutreturn_waitbit(lo);
|
||||
spin_unlock(&lo->plh_inode->i_lock);
|
||||
pnfs_layoutreturn_free_lsegs(lo, &lrp->args.stateid, &lrp->args.range,
|
||||
lrp->res.lrs_present ? &lrp->res.stateid : NULL);
|
||||
nfs4_sequence_free_slot(&lrp->res.seq_res);
|
||||
pnfs_free_lseg_list(&freeme);
|
||||
if (lrp->ld_private.ops && lrp->ld_private.ops->free)
|
||||
lrp->ld_private.ops->free(&lrp->ld_private);
|
||||
pnfs_put_layout_hdr(lrp->args.layout);
|
||||
nfs_iput_and_deactive(lrp->inode);
|
||||
kfree(calldata);
|
||||
|
|
|
@ -169,7 +169,7 @@ bool nfs4_try_to_lock_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot)
|
|||
struct nfs4_slot *nfs4_lookup_slot(struct nfs4_slot_table *tbl, u32 slotid)
|
||||
{
|
||||
if (slotid <= tbl->max_slotid)
|
||||
return nfs4_find_or_create_slot(tbl, slotid, 1, GFP_NOWAIT);
|
||||
return nfs4_find_or_create_slot(tbl, slotid, 0, GFP_NOWAIT);
|
||||
return ERR_PTR(-E2BIG);
|
||||
}
|
||||
|
||||
|
|
|
@ -800,11 +800,13 @@ void nfs4_close_sync(struct nfs4_state *state, fmode_t fmode)
|
|||
* that is compatible with current->files
|
||||
*/
|
||||
static struct nfs4_lock_state *
|
||||
__nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
|
||||
__nfs4_find_lock_state(struct nfs4_state *state,
|
||||
fl_owner_t fl_owner, fl_owner_t fl_owner2)
|
||||
{
|
||||
struct nfs4_lock_state *pos;
|
||||
list_for_each_entry(pos, &state->lock_states, ls_locks) {
|
||||
if (pos->ls_owner != fl_owner)
|
||||
if (pos->ls_owner != fl_owner &&
|
||||
pos->ls_owner != fl_owner2)
|
||||
continue;
|
||||
atomic_inc(&pos->ls_count);
|
||||
return pos;
|
||||
|
@ -857,7 +859,7 @@ static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_
|
|||
|
||||
for(;;) {
|
||||
spin_lock(&state->state_lock);
|
||||
lsp = __nfs4_find_lock_state(state, owner);
|
||||
lsp = __nfs4_find_lock_state(state, owner, 0);
|
||||
if (lsp != NULL)
|
||||
break;
|
||||
if (new != NULL) {
|
||||
|
@ -939,22 +941,23 @@ int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
|
|||
|
||||
static int nfs4_copy_lock_stateid(nfs4_stateid *dst,
|
||||
struct nfs4_state *state,
|
||||
const struct nfs_lockowner *lockowner)
|
||||
const struct nfs_lock_context *l_ctx)
|
||||
{
|
||||
struct nfs4_lock_state *lsp;
|
||||
fl_owner_t fl_owner;
|
||||
fl_owner_t fl_owner, fl_flock_owner;
|
||||
int ret = -ENOENT;
|
||||
|
||||
|
||||
if (lockowner == NULL)
|
||||
if (l_ctx == NULL)
|
||||
goto out;
|
||||
|
||||
if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
|
||||
goto out;
|
||||
|
||||
fl_owner = lockowner->l_owner;
|
||||
fl_owner = l_ctx->lockowner;
|
||||
fl_flock_owner = l_ctx->open_context->flock_owner;
|
||||
|
||||
spin_lock(&state->state_lock);
|
||||
lsp = __nfs4_find_lock_state(state, fl_owner);
|
||||
lsp = __nfs4_find_lock_state(state, fl_owner, fl_flock_owner);
|
||||
if (lsp && test_bit(NFS_LOCK_LOST, &lsp->ls_flags))
|
||||
ret = -EIO;
|
||||
else if (lsp != NULL && test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) {
|
||||
|
@ -986,7 +989,7 @@ static void nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state)
|
|||
* requests.
|
||||
*/
|
||||
int nfs4_select_rw_stateid(struct nfs4_state *state,
|
||||
fmode_t fmode, const struct nfs_lockowner *lockowner,
|
||||
fmode_t fmode, const struct nfs_lock_context *l_ctx,
|
||||
nfs4_stateid *dst, struct rpc_cred **cred)
|
||||
{
|
||||
int ret;
|
||||
|
@ -995,7 +998,7 @@ int nfs4_select_rw_stateid(struct nfs4_state *state,
|
|||
return -EIO;
|
||||
if (cred != NULL)
|
||||
*cred = NULL;
|
||||
ret = nfs4_copy_lock_stateid(dst, state, lockowner);
|
||||
ret = nfs4_copy_lock_stateid(dst, state, l_ctx);
|
||||
if (ret == -EIO)
|
||||
/* A lost lock - don't even consider delegations */
|
||||
goto out;
|
||||
|
@ -2190,7 +2193,7 @@ void nfs4_schedule_session_recovery(struct nfs4_session *session, int err)
|
|||
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
|
||||
set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
|
||||
}
|
||||
nfs4_schedule_lease_recovery(clp);
|
||||
nfs4_schedule_state_manager(clp);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nfs4_schedule_session_recovery);
|
||||
|
||||
|
|
|
@ -52,6 +52,7 @@
|
|||
#include <linux/nfs.h>
|
||||
#include <linux/nfs4.h>
|
||||
#include <linux/nfs_fs.h>
|
||||
#include <linux/fs_struct.h>
|
||||
|
||||
#include "nfs4_fs.h"
|
||||
#include "internal.h"
|
||||
|
@ -415,6 +416,8 @@ static int nfs4_stat_to_errno(int);
|
|||
#else /* CONFIG_NFS_V4_1 */
|
||||
#define encode_sequence_maxsz 0
|
||||
#define decode_sequence_maxsz 0
|
||||
#define encode_layoutreturn_maxsz 0
|
||||
#define decode_layoutreturn_maxsz 0
|
||||
#endif /* CONFIG_NFS_V4_1 */
|
||||
|
||||
#define NFS4_enc_compound_sz (1024) /* XXX: large enough? */
|
||||
|
@ -499,22 +502,22 @@ static int nfs4_stat_to_errno(int);
|
|||
(compound_encode_hdr_maxsz + \
|
||||
encode_sequence_maxsz + \
|
||||
encode_putfh_maxsz + \
|
||||
encode_open_downgrade_maxsz + \
|
||||
encode_getattr_maxsz)
|
||||
encode_open_downgrade_maxsz)
|
||||
#define NFS4_dec_open_downgrade_sz \
|
||||
(compound_decode_hdr_maxsz + \
|
||||
decode_sequence_maxsz + \
|
||||
decode_putfh_maxsz + \
|
||||
decode_open_downgrade_maxsz + \
|
||||
decode_getattr_maxsz)
|
||||
decode_open_downgrade_maxsz)
|
||||
#define NFS4_enc_close_sz (compound_encode_hdr_maxsz + \
|
||||
encode_sequence_maxsz + \
|
||||
encode_putfh_maxsz + \
|
||||
encode_layoutreturn_maxsz + \
|
||||
encode_close_maxsz + \
|
||||
encode_getattr_maxsz)
|
||||
#define NFS4_dec_close_sz (compound_decode_hdr_maxsz + \
|
||||
decode_sequence_maxsz + \
|
||||
decode_putfh_maxsz + \
|
||||
decode_layoutreturn_maxsz + \
|
||||
decode_close_maxsz + \
|
||||
decode_getattr_maxsz)
|
||||
#define NFS4_enc_setattr_sz (compound_encode_hdr_maxsz + \
|
||||
|
@ -708,10 +711,13 @@ static int nfs4_stat_to_errno(int);
|
|||
#define NFS4_enc_delegreturn_sz (compound_encode_hdr_maxsz + \
|
||||
encode_sequence_maxsz + \
|
||||
encode_putfh_maxsz + \
|
||||
encode_layoutreturn_maxsz + \
|
||||
encode_delegreturn_maxsz + \
|
||||
encode_getattr_maxsz)
|
||||
#define NFS4_dec_delegreturn_sz (compound_decode_hdr_maxsz + \
|
||||
decode_sequence_maxsz + \
|
||||
decode_putfh_maxsz + \
|
||||
decode_layoutreturn_maxsz + \
|
||||
decode_delegreturn_maxsz + \
|
||||
decode_getattr_maxsz)
|
||||
#define NFS4_enc_getacl_sz (compound_encode_hdr_maxsz + \
|
||||
|
@ -1003,7 +1009,7 @@ static void encode_nfs4_verifier(struct xdr_stream *xdr, const nfs4_verifier *ve
|
|||
static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap,
|
||||
const struct nfs4_label *label,
|
||||
const struct nfs_server *server,
|
||||
bool excl_check)
|
||||
bool excl_check, const umode_t *umask)
|
||||
{
|
||||
char owner_name[IDMAP_NAMESZ];
|
||||
char owner_group[IDMAP_NAMESZ];
|
||||
|
@ -1017,18 +1023,21 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap,
|
|||
|
||||
/*
|
||||
* We reserve enough space to write the entire attribute buffer at once.
|
||||
* In the worst-case, this would be
|
||||
* 16(bitmap) + 4(attrlen) + 8(size) + 4(mode) + 4(atime) + 4(mtime)
|
||||
* = 40 bytes, plus any contribution from variable-length fields
|
||||
* such as owner/group.
|
||||
*/
|
||||
if (iap->ia_valid & ATTR_SIZE) {
|
||||
bmval[0] |= FATTR4_WORD0_SIZE;
|
||||
len += 8;
|
||||
}
|
||||
if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
|
||||
umask = NULL;
|
||||
if (iap->ia_valid & ATTR_MODE) {
|
||||
bmval[1] |= FATTR4_WORD1_MODE;
|
||||
len += 4;
|
||||
if (umask) {
|
||||
bmval[2] |= FATTR4_WORD2_MODE_UMASK;
|
||||
len += 8;
|
||||
} else {
|
||||
bmval[1] |= FATTR4_WORD1_MODE;
|
||||
len += 4;
|
||||
}
|
||||
}
|
||||
if (iap->ia_valid & ATTR_UID) {
|
||||
owner_namelen = nfs_map_uid_to_name(server, iap->ia_uid, owner_name, IDMAP_NAMESZ);
|
||||
|
@ -1129,6 +1138,10 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap,
|
|||
*p++ = cpu_to_be32(label->len);
|
||||
p = xdr_encode_opaque_fixed(p, label->label, label->len);
|
||||
}
|
||||
if (bmval[2] & FATTR4_WORD2_MODE_UMASK) {
|
||||
*p++ = cpu_to_be32(iap->ia_mode & S_IALLUGO);
|
||||
*p++ = cpu_to_be32(*umask);
|
||||
}
|
||||
|
||||
/* out: */
|
||||
}
|
||||
|
@ -1183,7 +1196,8 @@ static void encode_create(struct xdr_stream *xdr, const struct nfs4_create_arg *
|
|||
}
|
||||
|
||||
encode_string(xdr, create->name->len, create->name->name);
|
||||
encode_attrs(xdr, create->attrs, create->label, create->server, false);
|
||||
encode_attrs(xdr, create->attrs, create->label, create->server, false,
|
||||
&create->umask);
|
||||
}
|
||||
|
||||
static void encode_getattr_one(struct xdr_stream *xdr, uint32_t bitmap, struct compound_hdr *hdr)
|
||||
|
@ -1403,11 +1417,13 @@ static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_op
|
|||
switch(arg->createmode) {
|
||||
case NFS4_CREATE_UNCHECKED:
|
||||
*p = cpu_to_be32(NFS4_CREATE_UNCHECKED);
|
||||
encode_attrs(xdr, arg->u.attrs, arg->label, arg->server, false);
|
||||
encode_attrs(xdr, arg->u.attrs, arg->label, arg->server, false,
|
||||
&arg->umask);
|
||||
break;
|
||||
case NFS4_CREATE_GUARDED:
|
||||
*p = cpu_to_be32(NFS4_CREATE_GUARDED);
|
||||
encode_attrs(xdr, arg->u.attrs, arg->label, arg->server, false);
|
||||
encode_attrs(xdr, arg->u.attrs, arg->label, arg->server, false,
|
||||
&arg->umask);
|
||||
break;
|
||||
case NFS4_CREATE_EXCLUSIVE:
|
||||
*p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE);
|
||||
|
@ -1416,7 +1432,8 @@ static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_op
|
|||
case NFS4_CREATE_EXCLUSIVE4_1:
|
||||
*p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE4_1);
|
||||
encode_nfs4_verifier(xdr, &arg->u.verifier);
|
||||
encode_attrs(xdr, arg->u.attrs, arg->label, arg->server, true);
|
||||
encode_attrs(xdr, arg->u.attrs, arg->label, arg->server, true,
|
||||
&arg->umask);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1672,7 +1689,7 @@ static void encode_setattr(struct xdr_stream *xdr, const struct nfs_setattrargs
|
|||
{
|
||||
encode_op_hdr(xdr, OP_SETATTR, decode_setattr_maxsz, hdr);
|
||||
encode_nfs4_stateid(xdr, &arg->stateid);
|
||||
encode_attrs(xdr, arg->iap, arg->label, server, false);
|
||||
encode_attrs(xdr, arg->iap, arg->label, server, false, NULL);
|
||||
}
|
||||
|
||||
static void encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclientid *setclientid, struct compound_hdr *hdr)
|
||||
|
@ -2015,6 +2032,7 @@ encode_layoutreturn(struct xdr_stream *xdr,
|
|||
const struct nfs4_layoutreturn_args *args,
|
||||
struct compound_hdr *hdr)
|
||||
{
|
||||
const struct pnfs_layoutdriver_type *lr_ops = NFS_SERVER(args->inode)->pnfs_curr_ld;
|
||||
__be32 *p;
|
||||
|
||||
encode_op_hdr(xdr, OP_LAYOUTRETURN, decode_layoutreturn_maxsz, hdr);
|
||||
|
@ -2029,10 +2047,11 @@ encode_layoutreturn(struct xdr_stream *xdr,
|
|||
spin_lock(&args->inode->i_lock);
|
||||
encode_nfs4_stateid(xdr, &args->stateid);
|
||||
spin_unlock(&args->inode->i_lock);
|
||||
if (NFS_SERVER(args->inode)->pnfs_curr_ld->encode_layoutreturn) {
|
||||
NFS_SERVER(args->inode)->pnfs_curr_ld->encode_layoutreturn(
|
||||
NFS_I(args->inode)->layout, xdr, args);
|
||||
} else
|
||||
if (args->ld_private->ops && args->ld_private->ops->encode)
|
||||
args->ld_private->ops->encode(xdr, args, args->ld_private);
|
||||
else if (lr_ops->encode_layoutreturn)
|
||||
lr_ops->encode_layoutreturn(xdr, args);
|
||||
else
|
||||
encode_uint32(xdr, 0);
|
||||
}
|
||||
|
||||
|
@ -2062,6 +2081,13 @@ static void encode_free_stateid(struct xdr_stream *xdr,
|
|||
encode_op_hdr(xdr, OP_FREE_STATEID, decode_free_stateid_maxsz, hdr);
|
||||
encode_nfs4_stateid(xdr, &args->stateid);
|
||||
}
|
||||
#else
|
||||
static inline void
|
||||
encode_layoutreturn(struct xdr_stream *xdr,
|
||||
const struct nfs4_layoutreturn_args *args,
|
||||
struct compound_hdr *hdr)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_NFS_V4_1 */
|
||||
|
||||
/*
|
||||
|
@ -2249,8 +2275,11 @@ static void nfs4_xdr_enc_close(struct rpc_rqst *req, struct xdr_stream *xdr,
|
|||
encode_compound_hdr(xdr, req, &hdr);
|
||||
encode_sequence(xdr, &args->seq_args, &hdr);
|
||||
encode_putfh(xdr, args->fh, &hdr);
|
||||
if (args->lr_args)
|
||||
encode_layoutreturn(xdr, args->lr_args, &hdr);
|
||||
encode_close(xdr, args, &hdr);
|
||||
encode_getfattr(xdr, args->bitmask, &hdr);
|
||||
if (args->bitmask != NULL)
|
||||
encode_getfattr(xdr, args->bitmask, &hdr);
|
||||
encode_nops(&hdr);
|
||||
}
|
||||
|
||||
|
@ -2328,7 +2357,6 @@ static void nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req,
|
|||
encode_sequence(xdr, &args->seq_args, &hdr);
|
||||
encode_putfh(xdr, args->fh, &hdr);
|
||||
encode_open_downgrade(xdr, args, &hdr);
|
||||
encode_getfattr(xdr, args->bitmask, &hdr);
|
||||
encode_nops(&hdr);
|
||||
}
|
||||
|
||||
|
@ -2671,6 +2699,8 @@ static void nfs4_xdr_enc_delegreturn(struct rpc_rqst *req,
|
|||
encode_compound_hdr(xdr, req, &hdr);
|
||||
encode_sequence(xdr, &args->seq_args, &hdr);
|
||||
encode_putfh(xdr, args->fhandle, &hdr);
|
||||
if (args->lr_args)
|
||||
encode_layoutreturn(xdr, args->lr_args, &hdr);
|
||||
encode_getfattr(xdr, args->bitmask, &hdr);
|
||||
encode_delegreturn(xdr, args->stateid, &hdr);
|
||||
encode_nops(&hdr);
|
||||
|
@ -6089,6 +6119,13 @@ static int decode_free_stateid(struct xdr_stream *xdr,
|
|||
res->status = decode_op_hdr(xdr, OP_FREE_STATEID);
|
||||
return res->status;
|
||||
}
|
||||
#else
|
||||
static inline
|
||||
int decode_layoutreturn(struct xdr_stream *xdr,
|
||||
struct nfs4_layoutreturn_res *res)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_NFS_V4_1 */
|
||||
|
||||
/*
|
||||
|
@ -6115,9 +6152,6 @@ static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp,
|
|||
if (status)
|
||||
goto out;
|
||||
status = decode_open_downgrade(xdr, res);
|
||||
if (status != 0)
|
||||
goto out;
|
||||
decode_getfattr(xdr, res->fattr, res->server);
|
||||
out:
|
||||
return status;
|
||||
}
|
||||
|
@ -6444,6 +6478,12 @@ static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
|
|||
status = decode_putfh(xdr);
|
||||
if (status)
|
||||
goto out;
|
||||
if (res->lr_res) {
|
||||
status = decode_layoutreturn(xdr, res->lr_res);
|
||||
res->lr_ret = status;
|
||||
if (status)
|
||||
goto out;
|
||||
}
|
||||
status = decode_close(xdr, res);
|
||||
if (status != 0)
|
||||
goto out;
|
||||
|
@ -6920,6 +6960,12 @@ static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp,
|
|||
status = decode_putfh(xdr);
|
||||
if (status != 0)
|
||||
goto out;
|
||||
if (res->lr_res) {
|
||||
status = decode_layoutreturn(xdr, res->lr_res);
|
||||
res->lr_ret = status;
|
||||
if (status)
|
||||
goto out;
|
||||
}
|
||||
status = decode_getfattr(xdr, res->fattr, res->server);
|
||||
if (status != 0)
|
||||
goto out;
|
||||
|
|
|
@ -504,10 +504,10 @@ encode_accumulated_error(struct objlayout *objlay, __be32 *p)
|
|||
}
|
||||
|
||||
void
|
||||
objlayout_encode_layoutreturn(struct pnfs_layout_hdr *pnfslay,
|
||||
struct xdr_stream *xdr,
|
||||
objlayout_encode_layoutreturn(struct xdr_stream *xdr,
|
||||
const struct nfs4_layoutreturn_args *args)
|
||||
{
|
||||
struct pnfs_layout_hdr *pnfslay = args->layout;
|
||||
struct objlayout *objlay = OBJLAYOUT(pnfslay);
|
||||
struct objlayout_io_res *oir, *tmp;
|
||||
__be32 *start;
|
||||
|
|
|
@ -175,7 +175,6 @@ extern void objlayout_encode_layoutcommit(
|
|||
const struct nfs4_layoutcommit_args *);
|
||||
|
||||
extern void objlayout_encode_layoutreturn(
|
||||
struct pnfs_layout_hdr *,
|
||||
struct xdr_stream *,
|
||||
const struct nfs4_layoutreturn_args *);
|
||||
|
||||
|
|
|
@ -867,8 +867,7 @@ static void nfs_pageio_cleanup_mirroring(struct nfs_pageio_descriptor *pgio)
|
|||
static bool nfs_match_lock_context(const struct nfs_lock_context *l1,
|
||||
const struct nfs_lock_context *l2)
|
||||
{
|
||||
return l1->lockowner.l_owner == l2->lockowner.l_owner
|
||||
&& l1->lockowner.l_pid == l2->lockowner.l_pid;
|
||||
return l1->lockowner == l2->lockowner;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
409
fs/nfs/pnfs.c
409
fs/nfs/pnfs.c
|
@ -54,6 +54,12 @@ static DEFINE_SPINLOCK(pnfs_spinlock);
|
|||
static LIST_HEAD(pnfs_modules_tbl);
|
||||
|
||||
static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo);
|
||||
static void pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo,
|
||||
struct list_head *free_me,
|
||||
const struct pnfs_layout_range *range,
|
||||
u32 seq);
|
||||
static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
|
||||
struct list_head *tmp_list);
|
||||
|
||||
/* Return the registered pnfs layout driver module matching given id */
|
||||
static struct pnfs_layoutdriver_type *
|
||||
|
@ -299,6 +305,49 @@ pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
|
|||
}
|
||||
}
|
||||
|
||||
static void
|
||||
pnfs_set_plh_return_info(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode,
|
||||
u32 seq)
|
||||
{
|
||||
if (lo->plh_return_iomode != 0 && lo->plh_return_iomode != iomode)
|
||||
iomode = IOMODE_ANY;
|
||||
lo->plh_return_iomode = iomode;
|
||||
set_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
|
||||
if (seq != 0) {
|
||||
WARN_ON_ONCE(lo->plh_return_seq != 0 && lo->plh_return_seq != seq);
|
||||
lo->plh_return_seq = seq;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
pnfs_clear_layoutreturn_info(struct pnfs_layout_hdr *lo)
|
||||
{
|
||||
lo->plh_return_iomode = 0;
|
||||
lo->plh_return_seq = 0;
|
||||
clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
|
||||
}
|
||||
|
||||
static void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
|
||||
{
|
||||
clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags);
|
||||
clear_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags);
|
||||
smp_mb__after_atomic();
|
||||
wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN);
|
||||
rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
|
||||
}
|
||||
|
||||
static void
|
||||
pnfs_clear_lseg_state(struct pnfs_layout_segment *lseg,
|
||||
struct list_head *free_me)
|
||||
{
|
||||
clear_bit(NFS_LSEG_ROC, &lseg->pls_flags);
|
||||
clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
|
||||
if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags))
|
||||
pnfs_lseg_dec_and_remove_zero(lseg, free_me);
|
||||
if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
|
||||
pnfs_lseg_dec_and_remove_zero(lseg, free_me);
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark a pnfs_layout_hdr and all associated layout segments as invalid
|
||||
*
|
||||
|
@ -315,9 +364,17 @@ pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo,
|
|||
.offset = 0,
|
||||
.length = NFS4_MAX_UINT64,
|
||||
};
|
||||
struct pnfs_layout_segment *lseg, *next;
|
||||
|
||||
set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
|
||||
return pnfs_mark_matching_lsegs_invalid(lo, lseg_list, &range, 0);
|
||||
pnfs_clear_layoutreturn_info(lo);
|
||||
list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
|
||||
pnfs_clear_lseg_state(lseg, lseg_list);
|
||||
pnfs_free_returned_lsegs(lo, lseg_list, &range, 0);
|
||||
if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags) &&
|
||||
!test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags))
|
||||
pnfs_clear_layoutreturn_waitbit(lo);
|
||||
return !list_empty(&lo->plh_segs);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -396,27 +453,42 @@ pnfs_init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg,
|
|||
|
||||
static void pnfs_free_lseg(struct pnfs_layout_segment *lseg)
|
||||
{
|
||||
struct inode *ino = lseg->pls_layout->plh_inode;
|
||||
|
||||
NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
|
||||
if (lseg != NULL) {
|
||||
struct inode *inode = lseg->pls_layout->plh_inode;
|
||||
NFS_SERVER(inode)->pnfs_curr_ld->free_lseg(lseg);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
|
||||
struct pnfs_layout_segment *lseg)
|
||||
{
|
||||
struct inode *inode = lo->plh_inode;
|
||||
|
||||
WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
|
||||
list_del_init(&lseg->pls_list);
|
||||
/* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
|
||||
atomic_dec(&lo->plh_refcount);
|
||||
if (list_empty(&lo->plh_segs)) {
|
||||
if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
|
||||
return;
|
||||
if (list_empty(&lo->plh_segs) &&
|
||||
!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) &&
|
||||
!test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
|
||||
if (atomic_read(&lo->plh_outstanding) == 0)
|
||||
set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
|
||||
clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
|
||||
}
|
||||
rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
|
||||
}
|
||||
|
||||
static bool
|
||||
pnfs_cache_lseg_for_layoutreturn(struct pnfs_layout_hdr *lo,
|
||||
struct pnfs_layout_segment *lseg)
|
||||
{
|
||||
if (test_and_clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags) &&
|
||||
pnfs_layout_is_valid(lo)) {
|
||||
pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
|
||||
list_move_tail(&lseg->pls_list, &lo->plh_return_segs);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -442,6 +514,8 @@ pnfs_put_lseg(struct pnfs_layout_segment *lseg)
|
|||
}
|
||||
pnfs_get_layout_hdr(lo);
|
||||
pnfs_layout_remove_lseg(lo, lseg);
|
||||
if (pnfs_cache_lseg_for_layoutreturn(lo, lseg))
|
||||
lseg = NULL;
|
||||
spin_unlock(&inode->i_lock);
|
||||
pnfs_free_lseg(lseg);
|
||||
pnfs_put_layout_hdr(lo);
|
||||
|
@ -482,22 +556,15 @@ pnfs_put_lseg_locked(struct pnfs_layout_segment *lseg)
|
|||
struct pnfs_layout_hdr *lo = lseg->pls_layout;
|
||||
if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags))
|
||||
return;
|
||||
pnfs_get_layout_hdr(lo);
|
||||
pnfs_layout_remove_lseg(lo, lseg);
|
||||
pnfs_free_lseg_async(lseg);
|
||||
if (!pnfs_cache_lseg_for_layoutreturn(lo, lseg)) {
|
||||
pnfs_get_layout_hdr(lo);
|
||||
pnfs_free_lseg_async(lseg);
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pnfs_put_lseg_locked);
|
||||
|
||||
static u64
|
||||
end_offset(u64 start, u64 len)
|
||||
{
|
||||
u64 end;
|
||||
|
||||
end = start + len;
|
||||
return end >= start ? end : NFS4_MAX_UINT64;
|
||||
}
|
||||
|
||||
/*
|
||||
* is l2 fully contained in l1?
|
||||
* start1 end1
|
||||
|
@ -510,33 +577,13 @@ pnfs_lseg_range_contained(const struct pnfs_layout_range *l1,
|
|||
const struct pnfs_layout_range *l2)
|
||||
{
|
||||
u64 start1 = l1->offset;
|
||||
u64 end1 = end_offset(start1, l1->length);
|
||||
u64 end1 = pnfs_end_offset(start1, l1->length);
|
||||
u64 start2 = l2->offset;
|
||||
u64 end2 = end_offset(start2, l2->length);
|
||||
u64 end2 = pnfs_end_offset(start2, l2->length);
|
||||
|
||||
return (start1 <= start2) && (end1 >= end2);
|
||||
}
|
||||
|
||||
/*
|
||||
* is l1 and l2 intersecting?
|
||||
* start1 end1
|
||||
* [----------------------------------)
|
||||
* start2 end2
|
||||
* [----------------)
|
||||
*/
|
||||
static bool
|
||||
pnfs_lseg_range_intersecting(const struct pnfs_layout_range *l1,
|
||||
const struct pnfs_layout_range *l2)
|
||||
{
|
||||
u64 start1 = l1->offset;
|
||||
u64 end1 = end_offset(start1, l1->length);
|
||||
u64 start2 = l2->offset;
|
||||
u64 end2 = end_offset(start2, l2->length);
|
||||
|
||||
return (end1 == NFS4_MAX_UINT64 || end1 > start2) &&
|
||||
(end2 == NFS4_MAX_UINT64 || end2 > start1);
|
||||
}
|
||||
|
||||
static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
|
||||
struct list_head *tmp_list)
|
||||
{
|
||||
|
@ -637,6 +684,20 @@ pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
|
|||
return remaining;
|
||||
}
|
||||
|
||||
static void
|
||||
pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo,
|
||||
struct list_head *free_me,
|
||||
const struct pnfs_layout_range *range,
|
||||
u32 seq)
|
||||
{
|
||||
struct pnfs_layout_segment *lseg, *next;
|
||||
|
||||
list_for_each_entry_safe(lseg, next, &lo->plh_return_segs, pls_list) {
|
||||
if (pnfs_match_lseg_recall(lseg, range, seq))
|
||||
list_move_tail(&lseg->pls_list, free_me);
|
||||
}
|
||||
}
|
||||
|
||||
/* note free_me must contain lsegs from a single layout_hdr */
|
||||
void
|
||||
pnfs_free_lseg_list(struct list_head *free_me)
|
||||
|
@ -701,6 +762,8 @@ pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp,
|
|||
struct inode *inode;
|
||||
|
||||
list_for_each_entry_safe(lo, next, &server->layouts, plh_layouts) {
|
||||
if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags))
|
||||
continue;
|
||||
inode = igrab(lo->plh_inode);
|
||||
if (inode == NULL)
|
||||
continue;
|
||||
|
@ -816,14 +879,6 @@ pnfs_destroy_all_layouts(struct nfs_client *clp)
|
|||
pnfs_destroy_layouts_byclid(clp, false);
|
||||
}
|
||||
|
||||
static void
|
||||
pnfs_clear_layoutreturn_info(struct pnfs_layout_hdr *lo)
|
||||
{
|
||||
lo->plh_return_iomode = 0;
|
||||
lo->plh_return_seq = 0;
|
||||
clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
|
||||
}
|
||||
|
||||
/* update lo->plh_stateid with new if is more recent */
|
||||
void
|
||||
pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
|
||||
|
@ -941,12 +996,31 @@ static void pnfs_clear_layoutcommit(struct inode *inode,
|
|||
}
|
||||
}
|
||||
|
||||
void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
|
||||
void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo,
|
||||
const nfs4_stateid *arg_stateid,
|
||||
const struct pnfs_layout_range *range,
|
||||
const nfs4_stateid *stateid)
|
||||
{
|
||||
clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags);
|
||||
smp_mb__after_atomic();
|
||||
wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN);
|
||||
rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
|
||||
struct inode *inode = lo->plh_inode;
|
||||
LIST_HEAD(freeme);
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
if (!pnfs_layout_is_valid(lo) || !arg_stateid ||
|
||||
!nfs4_stateid_match_other(&lo->plh_stateid, arg_stateid))
|
||||
goto out_unlock;
|
||||
if (stateid) {
|
||||
u32 seq = be32_to_cpu(arg_stateid->seqid);
|
||||
|
||||
pnfs_mark_matching_lsegs_invalid(lo, &freeme, range, seq);
|
||||
pnfs_free_returned_lsegs(lo, &freeme, range, seq);
|
||||
pnfs_set_layout_stateid(lo, stateid, true);
|
||||
} else
|
||||
pnfs_mark_layout_stateid_invalid(lo, &freeme);
|
||||
out_unlock:
|
||||
pnfs_clear_layoutreturn_waitbit(lo);
|
||||
spin_unlock(&inode->i_lock);
|
||||
pnfs_free_lseg_list(&freeme);
|
||||
|
||||
}
|
||||
|
||||
static bool
|
||||
|
@ -957,8 +1031,9 @@ pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo,
|
|||
/* Serialise LAYOUTGET/LAYOUTRETURN */
|
||||
if (atomic_read(&lo->plh_outstanding) != 0)
|
||||
return false;
|
||||
if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
|
||||
if (test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags))
|
||||
return false;
|
||||
set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
|
||||
pnfs_get_layout_hdr(lo);
|
||||
if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) {
|
||||
if (stateid != NULL) {
|
||||
|
@ -978,11 +1053,29 @@ pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo,
|
|||
return true;
|
||||
}
|
||||
|
||||
static void
|
||||
pnfs_init_layoutreturn_args(struct nfs4_layoutreturn_args *args,
|
||||
struct pnfs_layout_hdr *lo,
|
||||
const nfs4_stateid *stateid,
|
||||
enum pnfs_iomode iomode)
|
||||
{
|
||||
struct inode *inode = lo->plh_inode;
|
||||
|
||||
args->layout_type = NFS_SERVER(inode)->pnfs_curr_ld->id;
|
||||
args->inode = inode;
|
||||
args->range.iomode = iomode;
|
||||
args->range.offset = 0;
|
||||
args->range.length = NFS4_MAX_UINT64;
|
||||
args->layout = lo;
|
||||
nfs4_stateid_copy(&args->stateid, stateid);
|
||||
}
|
||||
|
||||
static int
|
||||
pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, const nfs4_stateid *stateid,
|
||||
enum pnfs_iomode iomode, bool sync)
|
||||
{
|
||||
struct inode *ino = lo->plh_inode;
|
||||
struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
|
||||
struct nfs4_layoutreturn *lrp;
|
||||
int status = 0;
|
||||
|
||||
|
@ -996,15 +1089,12 @@ pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, const nfs4_stateid *stateid,
|
|||
goto out;
|
||||
}
|
||||
|
||||
nfs4_stateid_copy(&lrp->args.stateid, stateid);
|
||||
lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id;
|
||||
lrp->args.inode = ino;
|
||||
lrp->args.range.iomode = iomode;
|
||||
lrp->args.range.offset = 0;
|
||||
lrp->args.range.length = NFS4_MAX_UINT64;
|
||||
lrp->args.layout = lo;
|
||||
pnfs_init_layoutreturn_args(&lrp->args, lo, stateid, iomode);
|
||||
lrp->args.ld_private = &lrp->ld_private;
|
||||
lrp->clp = NFS_SERVER(ino)->nfs_client;
|
||||
lrp->cred = lo->plh_lc_cred;
|
||||
if (ld->prepare_layoutreturn)
|
||||
ld->prepare_layoutreturn(&lrp->args);
|
||||
|
||||
status = nfs4_proc_layoutreturn(lrp, sync);
|
||||
out:
|
||||
|
@ -1067,7 +1157,7 @@ _pnfs_return_layout(struct inode *ino)
|
|||
struct nfs_inode *nfsi = NFS_I(ino);
|
||||
LIST_HEAD(tmp_list);
|
||||
nfs4_stateid stateid;
|
||||
int status = 0, empty;
|
||||
int status = 0;
|
||||
bool send;
|
||||
|
||||
dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
|
||||
|
@ -1081,7 +1171,14 @@ _pnfs_return_layout(struct inode *ino)
|
|||
}
|
||||
/* Reference matched in nfs4_layoutreturn_release */
|
||||
pnfs_get_layout_hdr(lo);
|
||||
empty = list_empty(&lo->plh_segs);
|
||||
/* Is there an outstanding layoutreturn ? */
|
||||
if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
|
||||
spin_unlock(&ino->i_lock);
|
||||
if (wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
|
||||
TASK_UNINTERRUPTIBLE))
|
||||
goto out_put_layout_hdr;
|
||||
spin_lock(&ino->i_lock);
|
||||
}
|
||||
pnfs_clear_layoutcommit(ino, &tmp_list);
|
||||
pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL, 0);
|
||||
|
||||
|
@ -1095,7 +1192,7 @@ _pnfs_return_layout(struct inode *ino)
|
|||
}
|
||||
|
||||
/* Don't send a LAYOUTRETURN if list was initially empty */
|
||||
if (empty) {
|
||||
if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) {
|
||||
spin_unlock(&ino->i_lock);
|
||||
dprintk("NFS: %s no layout segments to return\n", __func__);
|
||||
goto out_put_layout_hdr;
|
||||
|
@ -1141,21 +1238,36 @@ pnfs_commit_and_return_layout(struct inode *inode)
|
|||
return ret;
|
||||
}
|
||||
|
||||
bool pnfs_roc(struct inode *ino)
|
||||
bool pnfs_roc(struct inode *ino,
|
||||
struct nfs4_layoutreturn_args *args,
|
||||
struct nfs4_layoutreturn_res *res,
|
||||
const struct rpc_cred *cred)
|
||||
{
|
||||
struct nfs_inode *nfsi = NFS_I(ino);
|
||||
struct nfs_open_context *ctx;
|
||||
struct nfs4_state *state;
|
||||
struct pnfs_layout_hdr *lo;
|
||||
struct pnfs_layout_segment *lseg, *tmp;
|
||||
struct pnfs_layout_segment *lseg, *next;
|
||||
nfs4_stateid stateid;
|
||||
LIST_HEAD(tmp_list);
|
||||
bool found = false, layoutreturn = false, roc = false;
|
||||
enum pnfs_iomode iomode = 0;
|
||||
bool layoutreturn = false, roc = false;
|
||||
|
||||
if (!nfs_have_layout(ino))
|
||||
return false;
|
||||
retry:
|
||||
spin_lock(&ino->i_lock);
|
||||
lo = nfsi->layout;
|
||||
if (!lo || test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
|
||||
if (!lo || !pnfs_layout_is_valid(lo) ||
|
||||
test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
|
||||
goto out_noroc;
|
||||
if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
|
||||
pnfs_get_layout_hdr(lo);
|
||||
spin_unlock(&ino->i_lock);
|
||||
wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
pnfs_put_layout_hdr(lo);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
/* no roc if we hold a delegation */
|
||||
if (nfs4_check_delegation(ino, FMODE_READ))
|
||||
|
@ -1168,78 +1280,73 @@ bool pnfs_roc(struct inode *ino)
|
|||
goto out_noroc;
|
||||
}
|
||||
|
||||
/* always send layoutreturn if being marked so */
|
||||
if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
|
||||
layoutreturn = pnfs_prepare_layoutreturn(lo,
|
||||
&stateid, NULL);
|
||||
|
||||
list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
|
||||
list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) {
|
||||
/* If we are sending layoutreturn, invalidate all valid lsegs */
|
||||
if (layoutreturn || test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
|
||||
mark_lseg_invalid(lseg, &tmp_list);
|
||||
found = true;
|
||||
}
|
||||
if (!test_and_clear_bit(NFS_LSEG_ROC, &lseg->pls_flags))
|
||||
continue;
|
||||
/*
|
||||
* Note: mark lseg for return so pnfs_layout_remove_lseg
|
||||
* doesn't invalidate the layout for us.
|
||||
*/
|
||||
set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
|
||||
if (!mark_lseg_invalid(lseg, &lo->plh_return_segs))
|
||||
continue;
|
||||
pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
|
||||
}
|
||||
|
||||
if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
|
||||
goto out_noroc;
|
||||
|
||||
/* ROC in two conditions:
|
||||
* 1. there are ROC lsegs
|
||||
* 2. we don't send layoutreturn
|
||||
*/
|
||||
if (found && !layoutreturn) {
|
||||
/* lo ref dropped in pnfs_roc_release() */
|
||||
pnfs_get_layout_hdr(lo);
|
||||
roc = true;
|
||||
}
|
||||
/* lo ref dropped in pnfs_roc_release() */
|
||||
layoutreturn = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
|
||||
/* If the creds don't match, we can't compound the layoutreturn */
|
||||
if (!layoutreturn || cred != lo->plh_lc_cred)
|
||||
goto out_noroc;
|
||||
|
||||
roc = layoutreturn;
|
||||
pnfs_init_layoutreturn_args(args, lo, &stateid, iomode);
|
||||
res->lrs_present = 0;
|
||||
layoutreturn = false;
|
||||
|
||||
out_noroc:
|
||||
spin_unlock(&ino->i_lock);
|
||||
pnfs_free_lseg_list(&tmp_list);
|
||||
pnfs_layoutcommit_inode(ino, true);
|
||||
if (roc) {
|
||||
struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
|
||||
if (ld->prepare_layoutreturn)
|
||||
ld->prepare_layoutreturn(args);
|
||||
return true;
|
||||
}
|
||||
if (layoutreturn)
|
||||
pnfs_send_layoutreturn(lo, &stateid, IOMODE_ANY, true);
|
||||
return roc;
|
||||
pnfs_send_layoutreturn(lo, &stateid, iomode, true);
|
||||
return false;
|
||||
}
|
||||
|
||||
void pnfs_roc_release(struct inode *ino)
|
||||
void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
|
||||
struct nfs4_layoutreturn_res *res,
|
||||
int ret)
|
||||
{
|
||||
struct pnfs_layout_hdr *lo;
|
||||
struct pnfs_layout_hdr *lo = args->layout;
|
||||
const nfs4_stateid *arg_stateid = NULL;
|
||||
const nfs4_stateid *res_stateid = NULL;
|
||||
struct nfs4_xdr_opaque_data *ld_private = args->ld_private;
|
||||
|
||||
spin_lock(&ino->i_lock);
|
||||
lo = NFS_I(ino)->layout;
|
||||
pnfs_clear_layoutreturn_waitbit(lo);
|
||||
if (atomic_dec_and_test(&lo->plh_refcount)) {
|
||||
pnfs_detach_layout_hdr(lo);
|
||||
spin_unlock(&ino->i_lock);
|
||||
pnfs_free_layout_hdr(lo);
|
||||
} else
|
||||
spin_unlock(&ino->i_lock);
|
||||
}
|
||||
|
||||
void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
|
||||
{
|
||||
struct pnfs_layout_hdr *lo;
|
||||
|
||||
spin_lock(&ino->i_lock);
|
||||
lo = NFS_I(ino)->layout;
|
||||
if (pnfs_seqid_is_newer(barrier, lo->plh_barrier))
|
||||
lo->plh_barrier = barrier;
|
||||
spin_unlock(&ino->i_lock);
|
||||
trace_nfs4_layoutreturn_on_close(ino, 0);
|
||||
}
|
||||
|
||||
void pnfs_roc_get_barrier(struct inode *ino, u32 *barrier)
|
||||
{
|
||||
struct nfs_inode *nfsi = NFS_I(ino);
|
||||
struct pnfs_layout_hdr *lo;
|
||||
u32 current_seqid;
|
||||
|
||||
spin_lock(&ino->i_lock);
|
||||
lo = nfsi->layout;
|
||||
current_seqid = be32_to_cpu(lo->plh_stateid.seqid);
|
||||
|
||||
/* Since close does not return a layout stateid for use as
|
||||
* a barrier, we choose the worst-case barrier.
|
||||
*/
|
||||
*barrier = current_seqid + atomic_read(&lo->plh_outstanding);
|
||||
spin_unlock(&ino->i_lock);
|
||||
if (ret == 0) {
|
||||
arg_stateid = &args->stateid;
|
||||
if (res->lrs_present)
|
||||
res_stateid = &res->stateid;
|
||||
}
|
||||
pnfs_layoutreturn_free_lsegs(lo, arg_stateid, &args->range,
|
||||
res_stateid);
|
||||
if (ld_private && ld_private->ops && ld_private->ops->free)
|
||||
ld_private->ops->free(ld_private);
|
||||
pnfs_put_layout_hdr(lo);
|
||||
trace_nfs4_layoutreturn_on_close(args->inode, 0);
|
||||
}
|
||||
|
||||
bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task)
|
||||
|
@ -1252,13 +1359,11 @@ bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task)
|
|||
* i_lock */
|
||||
spin_lock(&ino->i_lock);
|
||||
lo = nfsi->layout;
|
||||
if (lo && test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
|
||||
sleep = true;
|
||||
spin_unlock(&ino->i_lock);
|
||||
|
||||
if (sleep)
|
||||
if (lo && test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
|
||||
rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
|
||||
|
||||
sleep = true;
|
||||
}
|
||||
spin_unlock(&ino->i_lock);
|
||||
return sleep;
|
||||
}
|
||||
|
||||
|
@ -1375,6 +1480,7 @@ alloc_init_layout_hdr(struct inode *ino,
|
|||
atomic_set(&lo->plh_refcount, 1);
|
||||
INIT_LIST_HEAD(&lo->plh_layouts);
|
||||
INIT_LIST_HEAD(&lo->plh_segs);
|
||||
INIT_LIST_HEAD(&lo->plh_return_segs);
|
||||
INIT_LIST_HEAD(&lo->plh_bulk_destroy);
|
||||
lo->plh_inode = ino;
|
||||
lo->plh_lc_cred = get_rpccred(ctx->cred);
|
||||
|
@ -1841,7 +1947,10 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
|
|||
goto out_forget;
|
||||
}
|
||||
|
||||
if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
|
||||
if (!pnfs_layout_is_valid(lo)) {
|
||||
/* We have a completely new layout */
|
||||
pnfs_set_layout_stateid(lo, &res->stateid, true);
|
||||
} else if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
|
||||
/* existing state ID, make sure the sequence number matches. */
|
||||
if (pnfs_layout_stateid_blocked(lo, &res->stateid)) {
|
||||
dprintk("%s forget reply due to sequence\n", __func__);
|
||||
|
@ -1851,12 +1960,10 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
|
|||
} else {
|
||||
/*
|
||||
* We got an entirely new state ID. Mark all segments for the
|
||||
* inode invalid, and don't bother validating the stateid
|
||||
* sequence number.
|
||||
* inode invalid, and retry the layoutget
|
||||
*/
|
||||
pnfs_mark_layout_stateid_invalid(lo, &free_me);
|
||||
|
||||
pnfs_set_layout_stateid(lo, &res->stateid, true);
|
||||
goto out_forget;
|
||||
}
|
||||
|
||||
pnfs_get_lseg(lseg);
|
||||
|
@ -1877,20 +1984,6 @@ out_forget:
|
|||
return ERR_PTR(-EAGAIN);
|
||||
}
|
||||
|
||||
static void
|
||||
pnfs_set_plh_return_info(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode,
|
||||
u32 seq)
|
||||
{
|
||||
if (lo->plh_return_iomode != 0 && lo->plh_return_iomode != iomode)
|
||||
iomode = IOMODE_ANY;
|
||||
lo->plh_return_iomode = iomode;
|
||||
set_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
|
||||
if (seq != 0) {
|
||||
WARN_ON_ONCE(lo->plh_return_seq != 0 && lo->plh_return_seq != seq);
|
||||
lo->plh_return_seq = seq;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* pnfs_mark_matching_lsegs_return - Free or return matching layout segments
|
||||
* @lo: pointer to layout header
|
||||
|
@ -1945,17 +2038,18 @@ void pnfs_error_mark_layout_for_return(struct inode *inode,
|
|||
.offset = 0,
|
||||
.length = NFS4_MAX_UINT64,
|
||||
};
|
||||
LIST_HEAD(free_me);
|
||||
bool return_now = false;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
pnfs_set_plh_return_info(lo, range.iomode, 0);
|
||||
/* Block LAYOUTGET */
|
||||
set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
|
||||
/*
|
||||
* mark all matching lsegs so that we are sure to have no live
|
||||
* segments at hand when sending layoutreturn. See pnfs_put_lseg()
|
||||
* for how it works.
|
||||
*/
|
||||
if (!pnfs_mark_matching_lsegs_return(lo, &free_me, &range, 0)) {
|
||||
if (!pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs, &range, 0)) {
|
||||
nfs4_stateid stateid;
|
||||
enum pnfs_iomode iomode;
|
||||
|
||||
|
@ -1967,7 +2061,6 @@ void pnfs_error_mark_layout_for_return(struct inode *inode,
|
|||
spin_unlock(&inode->i_lock);
|
||||
nfs_commit_inode(inode, 0);
|
||||
}
|
||||
pnfs_free_lseg_list(&free_me);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pnfs_error_mark_layout_for_return);
|
||||
|
||||
|
@ -2063,7 +2156,7 @@ pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio,
|
|||
*
|
||||
*/
|
||||
if (pgio->pg_lseg) {
|
||||
seg_end = end_offset(pgio->pg_lseg->pls_range.offset,
|
||||
seg_end = pnfs_end_offset(pgio->pg_lseg->pls_range.offset,
|
||||
pgio->pg_lseg->pls_range.length);
|
||||
req_start = req_offset(req);
|
||||
WARN_ON_ONCE(req_start >= seg_end);
|
||||
|
@ -2286,6 +2379,10 @@ void pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr)
|
|||
struct nfs_pageio_descriptor pgio;
|
||||
|
||||
if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
|
||||
/* Prevent deadlocks with layoutreturn! */
|
||||
pnfs_put_lseg(hdr->lseg);
|
||||
hdr->lseg = NULL;
|
||||
|
||||
nfs_pageio_init_read(&pgio, hdr->inode, false,
|
||||
hdr->completion_ops);
|
||||
hdr->task.tk_status = nfs_pageio_resend(&pgio, hdr);
|
||||
|
|
|
@ -96,6 +96,7 @@ enum {
|
|||
NFS_LAYOUT_RW_FAILED, /* get rw layout failed stop trying */
|
||||
NFS_LAYOUT_BULK_RECALL, /* bulk recall affecting layout */
|
||||
NFS_LAYOUT_RETURN, /* layoutreturn in progress */
|
||||
NFS_LAYOUT_RETURN_LOCK, /* Serialise layoutreturn */
|
||||
NFS_LAYOUT_RETURN_REQUESTED, /* Return this layout ASAP */
|
||||
NFS_LAYOUT_INVALID_STID, /* layout stateid id is invalid */
|
||||
NFS_LAYOUT_FIRST_LAYOUTGET, /* Serialize first layoutget */
|
||||
|
@ -171,8 +172,8 @@ struct pnfs_layoutdriver_type {
|
|||
(struct nfs_server *server, struct pnfs_device *pdev,
|
||||
gfp_t gfp_flags);
|
||||
|
||||
void (*encode_layoutreturn) (struct pnfs_layout_hdr *layoutid,
|
||||
struct xdr_stream *xdr,
|
||||
int (*prepare_layoutreturn) (struct nfs4_layoutreturn_args *);
|
||||
void (*encode_layoutreturn) (struct xdr_stream *xdr,
|
||||
const struct nfs4_layoutreturn_args *args);
|
||||
|
||||
void (*cleanup_layoutcommit) (struct nfs4_layoutcommit_data *data);
|
||||
|
@ -181,7 +182,6 @@ struct pnfs_layoutdriver_type {
|
|||
struct xdr_stream *xdr,
|
||||
const struct nfs4_layoutcommit_args *args);
|
||||
int (*prepare_layoutstats) (struct nfs42_layoutstat_args *args);
|
||||
void (*cleanup_layoutstats) (struct nfs42_layoutstat_data *data);
|
||||
};
|
||||
|
||||
struct pnfs_layout_hdr {
|
||||
|
@ -190,6 +190,7 @@ struct pnfs_layout_hdr {
|
|||
struct list_head plh_layouts; /* other client layouts */
|
||||
struct list_head plh_bulk_destroy;
|
||||
struct list_head plh_segs; /* layout segments list */
|
||||
struct list_head plh_return_segs; /* invalid layout segments */
|
||||
unsigned long plh_block_lgets; /* block LAYOUTGET if >0 */
|
||||
unsigned long plh_retry_timestamp;
|
||||
unsigned long plh_flags;
|
||||
|
@ -270,10 +271,13 @@ int pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
|
|||
u32 seq);
|
||||
int pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo,
|
||||
struct list_head *lseg_list);
|
||||
bool pnfs_roc(struct inode *ino);
|
||||
void pnfs_roc_release(struct inode *ino);
|
||||
void pnfs_roc_set_barrier(struct inode *ino, u32 barrier);
|
||||
void pnfs_roc_get_barrier(struct inode *ino, u32 *barrier);
|
||||
bool pnfs_roc(struct inode *ino,
|
||||
struct nfs4_layoutreturn_args *args,
|
||||
struct nfs4_layoutreturn_res *res,
|
||||
const struct rpc_cred *cred);
|
||||
void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
|
||||
struct nfs4_layoutreturn_res *res,
|
||||
int ret);
|
||||
bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task);
|
||||
void pnfs_set_layoutcommit(struct inode *, struct pnfs_layout_segment *, loff_t);
|
||||
void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data);
|
||||
|
@ -292,7 +296,10 @@ struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino,
|
|||
enum pnfs_iomode iomode,
|
||||
bool strict_iomode,
|
||||
gfp_t gfp_flags);
|
||||
void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo);
|
||||
void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo,
|
||||
const nfs4_stateid *arg_stateid,
|
||||
const struct pnfs_layout_range *range,
|
||||
const nfs4_stateid *stateid);
|
||||
|
||||
void pnfs_generic_layout_insert_lseg(struct pnfs_layout_hdr *lo,
|
||||
struct pnfs_layout_segment *lseg,
|
||||
|
@ -362,8 +369,7 @@ struct nfs4_pnfs_ds *nfs4_pnfs_ds_add(struct list_head *dsaddrs,
|
|||
void nfs4_pnfs_v3_ds_connect_unload(void);
|
||||
void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
|
||||
struct nfs4_deviceid_node *devid, unsigned int timeo,
|
||||
unsigned int retrans, u32 version, u32 minor_version,
|
||||
rpc_authflavor_t au_flavor);
|
||||
unsigned int retrans, u32 version, u32 minor_version);
|
||||
struct nfs4_pnfs_ds_addr *nfs4_decode_mp_ds_addr(struct net *net,
|
||||
struct xdr_stream *xdr,
|
||||
gfp_t gfp_flags);
|
||||
|
@ -559,6 +565,38 @@ pnfs_copy_range(struct pnfs_layout_range *dst,
|
|||
memcpy(dst, src, sizeof(*dst));
|
||||
}
|
||||
|
||||
static inline u64
|
||||
pnfs_end_offset(u64 start, u64 len)
|
||||
{
|
||||
if (NFS4_MAX_UINT64 - start <= len)
|
||||
return NFS4_MAX_UINT64;
|
||||
return start + len;
|
||||
}
|
||||
|
||||
/*
|
||||
* Are 2 ranges intersecting?
|
||||
* start1 end1
|
||||
* [----------------------------------)
|
||||
* start2 end2
|
||||
* [----------------)
|
||||
*/
|
||||
static inline bool
|
||||
pnfs_is_range_intersecting(u64 start1, u64 end1, u64 start2, u64 end2)
|
||||
{
|
||||
return (end1 == NFS4_MAX_UINT64 || start2 < end1) &&
|
||||
(end2 == NFS4_MAX_UINT64 || start1 < end2);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
pnfs_lseg_range_intersecting(const struct pnfs_layout_range *l1,
|
||||
const struct pnfs_layout_range *l2)
|
||||
{
|
||||
u64 end1 = pnfs_end_offset(l1->offset, l1->length);
|
||||
u64 end2 = pnfs_end_offset(l2->offset, l2->length);
|
||||
|
||||
return pnfs_is_range_intersecting(l1->offset, end1, l2->offset, end2);
|
||||
}
|
||||
|
||||
extern unsigned int layoutstats_timer;
|
||||
|
||||
#ifdef NFS_DEBUG
|
||||
|
@ -630,23 +668,18 @@ pnfs_layoutcommit_outstanding(struct inode *inode)
|
|||
|
||||
|
||||
static inline bool
|
||||
pnfs_roc(struct inode *ino)
|
||||
pnfs_roc(struct inode *ino,
|
||||
struct nfs4_layoutreturn_args *args,
|
||||
struct nfs4_layoutreturn_res *res,
|
||||
const struct rpc_cred *cred)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void
|
||||
pnfs_roc_release(struct inode *ino)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
pnfs_roc_get_barrier(struct inode *ino, u32 *barrier)
|
||||
pnfs_roc_release(struct nfs4_layoutreturn_args *args,
|
||||
struct nfs4_layoutreturn_res *res,
|
||||
int ret)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -600,8 +600,7 @@ static struct nfs_client *(*get_v3_ds_connect)(
|
|||
int ds_addrlen,
|
||||
int ds_proto,
|
||||
unsigned int ds_timeo,
|
||||
unsigned int ds_retrans,
|
||||
rpc_authflavor_t au_flavor);
|
||||
unsigned int ds_retrans);
|
||||
|
||||
static bool load_v3_ds_connect(void)
|
||||
{
|
||||
|
@ -625,15 +624,13 @@ EXPORT_SYMBOL_GPL(nfs4_pnfs_v3_ds_connect_unload);
|
|||
static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv,
|
||||
struct nfs4_pnfs_ds *ds,
|
||||
unsigned int timeo,
|
||||
unsigned int retrans,
|
||||
rpc_authflavor_t au_flavor)
|
||||
unsigned int retrans)
|
||||
{
|
||||
struct nfs_client *clp = ERR_PTR(-EIO);
|
||||
struct nfs4_pnfs_ds_addr *da;
|
||||
int status = 0;
|
||||
|
||||
dprintk("--> %s DS %s au_flavor %d\n", __func__,
|
||||
ds->ds_remotestr, au_flavor);
|
||||
dprintk("--> %s DS %s\n", __func__, ds->ds_remotestr);
|
||||
|
||||
if (!load_v3_ds_connect())
|
||||
goto out;
|
||||
|
@ -657,7 +654,7 @@ static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv,
|
|||
clp = get_v3_ds_connect(mds_srv,
|
||||
(struct sockaddr *)&da->da_addr,
|
||||
da->da_addrlen, IPPROTO_TCP,
|
||||
timeo, retrans, au_flavor);
|
||||
timeo, retrans);
|
||||
}
|
||||
|
||||
if (IS_ERR(clp)) {
|
||||
|
@ -676,15 +673,13 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
|
|||
struct nfs4_pnfs_ds *ds,
|
||||
unsigned int timeo,
|
||||
unsigned int retrans,
|
||||
u32 minor_version,
|
||||
rpc_authflavor_t au_flavor)
|
||||
u32 minor_version)
|
||||
{
|
||||
struct nfs_client *clp = ERR_PTR(-EIO);
|
||||
struct nfs4_pnfs_ds_addr *da;
|
||||
int status = 0;
|
||||
|
||||
dprintk("--> %s DS %s au_flavor %d\n", __func__, ds->ds_remotestr,
|
||||
au_flavor);
|
||||
dprintk("--> %s DS %s\n", __func__, ds->ds_remotestr);
|
||||
|
||||
list_for_each_entry(da, &ds->ds_addrs, da_node) {
|
||||
dprintk("%s: DS %s: trying address %s\n",
|
||||
|
@ -720,8 +715,7 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
|
|||
clp = nfs4_set_ds_client(mds_srv,
|
||||
(struct sockaddr *)&da->da_addr,
|
||||
da->da_addrlen, IPPROTO_TCP,
|
||||
timeo, retrans, minor_version,
|
||||
au_flavor);
|
||||
timeo, retrans, minor_version);
|
||||
if (IS_ERR(clp))
|
||||
continue;
|
||||
|
||||
|
@ -755,19 +749,17 @@ out:
|
|||
*/
|
||||
void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
|
||||
struct nfs4_deviceid_node *devid, unsigned int timeo,
|
||||
unsigned int retrans, u32 version,
|
||||
u32 minor_version, rpc_authflavor_t au_flavor)
|
||||
unsigned int retrans, u32 version, u32 minor_version)
|
||||
{
|
||||
if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) {
|
||||
int err = 0;
|
||||
|
||||
if (version == 3) {
|
||||
err = _nfs4_pnfs_v3_ds_connect(mds_srv, ds, timeo,
|
||||
retrans, au_flavor);
|
||||
retrans);
|
||||
} else if (version == 4) {
|
||||
err = _nfs4_pnfs_v4_ds_connect(mds_srv, ds, timeo,
|
||||
retrans, minor_version,
|
||||
au_flavor);
|
||||
retrans, minor_version);
|
||||
} else {
|
||||
dprintk("%s: unsupported DS version %d\n", __func__,
|
||||
version);
|
||||
|
|
|
@ -2904,7 +2904,7 @@ module_param(max_session_slots, ushort, 0644);
|
|||
MODULE_PARM_DESC(max_session_slots, "Maximum number of outstanding NFSv4.1 "
|
||||
"requests the client will negotiate");
|
||||
module_param(max_session_cb_slots, ushort, 0644);
|
||||
MODULE_PARM_DESC(max_session_slots, "Maximum number of parallel NFSv4.1 "
|
||||
MODULE_PARM_DESC(max_session_cb_slots, "Maximum number of parallel NFSv4.1 "
|
||||
"callbacks the client will process for a given server");
|
||||
module_param(send_implementation_id, ushort, 0644);
|
||||
MODULE_PARM_DESC(send_implementation_id,
|
||||
|
|
|
@ -1151,8 +1151,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
|
|||
if (l_ctx && flctx &&
|
||||
!(list_empty_careful(&flctx->flc_posix) &&
|
||||
list_empty_careful(&flctx->flc_flock))) {
|
||||
do_flush |= l_ctx->lockowner.l_owner != current->files
|
||||
|| l_ctx->lockowner.l_pid != current->tgid;
|
||||
do_flush |= l_ctx->lockowner != current->files;
|
||||
}
|
||||
nfs_release_request(req);
|
||||
if (!do_flush)
|
||||
|
|
|
@ -440,6 +440,7 @@ enum lock_type4 {
|
|||
#define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4)
|
||||
#define FATTR4_WORD2_CLONE_BLKSIZE (1UL << 13)
|
||||
#define FATTR4_WORD2_SECURITY_LABEL (1UL << 16)
|
||||
#define FATTR4_WORD2_MODE_UMASK (1UL << 17)
|
||||
|
||||
/* MDS threshold bitmap bits */
|
||||
#define THRESHOLD_RD (1UL << 0)
|
||||
|
|
|
@ -55,22 +55,18 @@ struct nfs_access_entry {
|
|||
struct rcu_head rcu_head;
|
||||
};
|
||||
|
||||
struct nfs_lockowner {
|
||||
fl_owner_t l_owner;
|
||||
pid_t l_pid;
|
||||
};
|
||||
|
||||
struct nfs_lock_context {
|
||||
atomic_t count;
|
||||
struct list_head list;
|
||||
struct nfs_open_context *open_context;
|
||||
struct nfs_lockowner lockowner;
|
||||
fl_owner_t lockowner;
|
||||
atomic_t io_count;
|
||||
};
|
||||
|
||||
struct nfs4_state;
|
||||
struct nfs_open_context {
|
||||
struct nfs_lock_context lock_context;
|
||||
fl_owner_t flock_owner;
|
||||
struct dentry *dentry;
|
||||
struct rpc_cred *cred;
|
||||
struct nfs4_state *state;
|
||||
|
@ -349,6 +345,7 @@ extern int nfs_attribute_cache_expired(struct inode *inode);
|
|||
extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode);
|
||||
extern int nfs_revalidate_inode_rcu(struct nfs_server *server, struct inode *inode);
|
||||
extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *);
|
||||
extern bool nfs_mapping_need_revalidate_inode(struct inode *inode);
|
||||
extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping);
|
||||
extern int nfs_revalidate_mapping_rcu(struct inode *inode);
|
||||
extern int nfs_setattr(struct dentry *, struct iattr *);
|
||||
|
@ -358,7 +355,7 @@ extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
|
|||
extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx);
|
||||
extern void put_nfs_open_context(struct nfs_open_context *ctx);
|
||||
extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode);
|
||||
extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode);
|
||||
extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode, struct file *filp);
|
||||
extern void nfs_inode_attach_open_context(struct nfs_open_context *ctx);
|
||||
extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx);
|
||||
extern void nfs_file_clear_open_context(struct file *flip);
|
||||
|
|
|
@ -216,6 +216,20 @@ struct nfs4_get_lease_time_res {
|
|||
struct nfs_fsinfo *lr_fsinfo;
|
||||
};
|
||||
|
||||
struct xdr_stream;
|
||||
struct nfs4_xdr_opaque_data;
|
||||
|
||||
struct nfs4_xdr_opaque_ops {
|
||||
void (*encode)(struct xdr_stream *, const void *args,
|
||||
const struct nfs4_xdr_opaque_data *);
|
||||
void (*free)(struct nfs4_xdr_opaque_data *);
|
||||
};
|
||||
|
||||
struct nfs4_xdr_opaque_data {
|
||||
const struct nfs4_xdr_opaque_ops *ops;
|
||||
void *data;
|
||||
};
|
||||
|
||||
#define PNFS_LAYOUT_MAXSIZE 4096
|
||||
|
||||
struct nfs4_layoutdriver_data {
|
||||
|
@ -306,6 +320,7 @@ struct nfs4_layoutreturn_args {
|
|||
struct pnfs_layout_range range;
|
||||
nfs4_stateid stateid;
|
||||
__u32 layout_type;
|
||||
struct nfs4_xdr_opaque_data *ld_private;
|
||||
};
|
||||
|
||||
struct nfs4_layoutreturn_res {
|
||||
|
@ -321,6 +336,7 @@ struct nfs4_layoutreturn {
|
|||
struct nfs_client *clp;
|
||||
struct inode *inode;
|
||||
int rpc_status;
|
||||
struct nfs4_xdr_opaque_data ld_private;
|
||||
};
|
||||
|
||||
#define PNFS_LAYOUTSTATS_MAXSIZE 256
|
||||
|
@ -341,8 +357,7 @@ struct nfs42_layoutstat_devinfo {
|
|||
__u64 write_count;
|
||||
__u64 write_bytes;
|
||||
__u32 layout_type;
|
||||
layoutstats_encode_t layoutstats_encode;
|
||||
void *layout_private;
|
||||
struct nfs4_xdr_opaque_data ld_private;
|
||||
};
|
||||
|
||||
struct nfs42_layoutstat_args {
|
||||
|
@ -418,6 +433,7 @@ struct nfs_openargs {
|
|||
enum open_claim_type4 claim;
|
||||
enum createmode4 createmode;
|
||||
const struct nfs4_label *label;
|
||||
umode_t umask;
|
||||
};
|
||||
|
||||
struct nfs_openres {
|
||||
|
@ -469,6 +485,7 @@ struct nfs_closeargs {
|
|||
fmode_t fmode;
|
||||
u32 share_access;
|
||||
const u32 * bitmask;
|
||||
struct nfs4_layoutreturn_args *lr_args;
|
||||
};
|
||||
|
||||
struct nfs_closeres {
|
||||
|
@ -477,6 +494,8 @@ struct nfs_closeres {
|
|||
struct nfs_fattr * fattr;
|
||||
struct nfs_seqid * seqid;
|
||||
const struct nfs_server *server;
|
||||
struct nfs4_layoutreturn_res *lr_res;
|
||||
int lr_ret;
|
||||
};
|
||||
/*
|
||||
* * Arguments to the lock,lockt, and locku call.
|
||||
|
@ -549,12 +568,15 @@ struct nfs4_delegreturnargs {
|
|||
const struct nfs_fh *fhandle;
|
||||
const nfs4_stateid *stateid;
|
||||
const u32 * bitmask;
|
||||
struct nfs4_layoutreturn_args *lr_args;
|
||||
};
|
||||
|
||||
struct nfs4_delegreturnres {
|
||||
struct nfs4_sequence_res seq_res;
|
||||
struct nfs_fattr * fattr;
|
||||
struct nfs_server *server;
|
||||
struct nfs4_layoutreturn_res *lr_res;
|
||||
int lr_ret;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -937,6 +959,7 @@ struct nfs4_create_arg {
|
|||
const struct nfs_fh * dir_fh;
|
||||
const u32 * bitmask;
|
||||
const struct nfs4_label *label;
|
||||
umode_t umask;
|
||||
};
|
||||
|
||||
struct nfs4_create_res {
|
||||
|
|
|
@ -541,9 +541,13 @@ gss_setup_upcall(struct gss_auth *gss_auth, struct rpc_cred *cred)
|
|||
return gss_new;
|
||||
gss_msg = gss_add_msg(gss_new);
|
||||
if (gss_msg == gss_new) {
|
||||
int res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg);
|
||||
int res;
|
||||
atomic_inc(&gss_msg->count);
|
||||
res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg);
|
||||
if (res) {
|
||||
gss_unhash_msg(gss_new);
|
||||
atomic_dec(&gss_msg->count);
|
||||
gss_release_msg(gss_new);
|
||||
gss_msg = ERR_PTR(res);
|
||||
}
|
||||
} else
|
||||
|
@ -836,6 +840,7 @@ gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
|
|||
warn_gssd();
|
||||
gss_release_msg(gss_msg);
|
||||
}
|
||||
gss_release_msg(gss_msg);
|
||||
}
|
||||
|
||||
static void gss_pipe_dentry_destroy(struct dentry *dir,
|
||||
|
|
|
@ -1926,6 +1926,8 @@ call_connect_status(struct rpc_task *task)
|
|||
case -EADDRINUSE:
|
||||
case -ENOBUFS:
|
||||
case -EPIPE:
|
||||
xprt_conditional_disconnect(task->tk_rqstp->rq_xprt,
|
||||
task->tk_rqstp->rq_connect_cookie);
|
||||
if (RPC_IS_SOFTCONN(task))
|
||||
break;
|
||||
/* retry with existing socket, after a delay */
|
||||
|
|
|
@ -157,15 +157,17 @@ void rpc_count_iostats_metrics(const struct rpc_task *task,
|
|||
spin_lock(&op_metrics->om_lock);
|
||||
|
||||
op_metrics->om_ops++;
|
||||
op_metrics->om_ntrans += req->rq_ntrans;
|
||||
/* kernel API: om_ops must never become larger than om_ntrans */
|
||||
op_metrics->om_ntrans += max(req->rq_ntrans, 1);
|
||||
op_metrics->om_timeouts += task->tk_timeouts;
|
||||
|
||||
op_metrics->om_bytes_sent += req->rq_xmit_bytes_sent;
|
||||
op_metrics->om_bytes_recv += req->rq_reply_bytes_recvd;
|
||||
|
||||
delta = ktime_sub(req->rq_xtime, task->tk_start);
|
||||
op_metrics->om_queue = ktime_add(op_metrics->om_queue, delta);
|
||||
|
||||
if (ktime_to_ns(req->rq_xtime)) {
|
||||
delta = ktime_sub(req->rq_xtime, task->tk_start);
|
||||
op_metrics->om_queue = ktime_add(op_metrics->om_queue, delta);
|
||||
}
|
||||
op_metrics->om_rtt = ktime_add(op_metrics->om_rtt, req->rq_rtt);
|
||||
|
||||
delta = ktime_sub(now, task->tk_start);
|
||||
|
|
|
@ -669,7 +669,7 @@ void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
|
|||
spin_lock_bh(&xprt->transport_lock);
|
||||
if (cookie != xprt->connect_cookie)
|
||||
goto out;
|
||||
if (test_bit(XPRT_CLOSING, &xprt->state) || !xprt_connected(xprt))
|
||||
if (test_bit(XPRT_CLOSING, &xprt->state))
|
||||
goto out;
|
||||
set_bit(XPRT_CLOSE_WAIT, &xprt->state);
|
||||
/* Try to schedule an autoclose RPC call */
|
||||
|
@ -772,6 +772,7 @@ void xprt_connect(struct rpc_task *task)
|
|||
if (!xprt_connected(xprt)) {
|
||||
task->tk_rqstp->rq_bytes_sent = 0;
|
||||
task->tk_timeout = task->tk_rqstp->rq_timeout;
|
||||
task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
|
||||
rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
|
||||
|
||||
if (test_bit(XPRT_CLOSING, &xprt->state))
|
||||
|
|
|
@ -55,7 +55,8 @@ static int rpcrdma_bc_setup_rqst(struct rpcrdma_xprt *r_xprt,
|
|||
if (IS_ERR(rb))
|
||||
goto out_fail;
|
||||
req->rl_sendbuf = rb;
|
||||
xdr_buf_init(&rqst->rq_snd_buf, rb->rg_base, size);
|
||||
xdr_buf_init(&rqst->rq_snd_buf, rb->rg_base,
|
||||
min_t(size_t, size, PAGE_SIZE));
|
||||
rpcrdma_set_xprtdata(rqst, req);
|
||||
return 0;
|
||||
|
||||
|
@ -191,6 +192,7 @@ size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt)
|
|||
size_t maxmsg;
|
||||
|
||||
maxmsg = min_t(unsigned int, cdata->inline_rsize, cdata->inline_wsize);
|
||||
maxmsg = min_t(unsigned int, maxmsg, PAGE_SIZE);
|
||||
return maxmsg - RPCRDMA_HDRLEN_MIN;
|
||||
}
|
||||
|
||||
|
|
|
@ -101,7 +101,7 @@ frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
|
|||
struct rpcrdma_frmr *f = &r->frmr;
|
||||
int rc;
|
||||
|
||||
f->fr_mr = ib_alloc_mr(ia->ri_pd, IB_MR_TYPE_MEM_REG, depth);
|
||||
f->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth);
|
||||
if (IS_ERR(f->fr_mr))
|
||||
goto out_mr_err;
|
||||
|
||||
|
@ -157,7 +157,7 @@ __frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
|
|||
return rc;
|
||||
}
|
||||
|
||||
f->fr_mr = ib_alloc_mr(ia->ri_pd, IB_MR_TYPE_MEM_REG,
|
||||
f->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype,
|
||||
ia->ri_max_frmr_depth);
|
||||
if (IS_ERR(f->fr_mr)) {
|
||||
pr_warn("rpcrdma: ib_alloc_mr status %ld, frwr %p orphaned\n",
|
||||
|
@ -171,10 +171,6 @@ __frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
|
|||
}
|
||||
|
||||
/* Reset of a single FRMR. Generate a fresh rkey by replacing the MR.
|
||||
*
|
||||
* There's no recovery if this fails. The FRMR is abandoned, but
|
||||
* remains in rb_all. It will be cleaned up when the transport is
|
||||
* destroyed.
|
||||
*/
|
||||
static void
|
||||
frwr_op_recover_mr(struct rpcrdma_mw *mw)
|
||||
|
@ -210,11 +206,16 @@ static int
|
|||
frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
|
||||
struct rpcrdma_create_data_internal *cdata)
|
||||
{
|
||||
struct ib_device_attr *attrs = &ia->ri_device->attrs;
|
||||
int depth, delta;
|
||||
|
||||
ia->ri_mrtype = IB_MR_TYPE_MEM_REG;
|
||||
if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
|
||||
ia->ri_mrtype = IB_MR_TYPE_SG_GAPS;
|
||||
|
||||
ia->ri_max_frmr_depth =
|
||||
min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
|
||||
ia->ri_device->attrs.max_fast_reg_page_list_len);
|
||||
attrs->max_fast_reg_page_list_len);
|
||||
dprintk("RPC: %s: device's max FR page list len = %u\n",
|
||||
__func__, ia->ri_max_frmr_depth);
|
||||
|
||||
|
@ -241,8 +242,8 @@ frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
|
|||
}
|
||||
|
||||
ep->rep_attr.cap.max_send_wr *= depth;
|
||||
if (ep->rep_attr.cap.max_send_wr > ia->ri_device->attrs.max_qp_wr) {
|
||||
cdata->max_requests = ia->ri_device->attrs.max_qp_wr / depth;
|
||||
if (ep->rep_attr.cap.max_send_wr > attrs->max_qp_wr) {
|
||||
cdata->max_requests = attrs->max_qp_wr / depth;
|
||||
if (!cdata->max_requests)
|
||||
return -EINVAL;
|
||||
ep->rep_attr.cap.max_send_wr = cdata->max_requests *
|
||||
|
@ -348,6 +349,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
|
|||
int nsegs, bool writing, struct rpcrdma_mw **out)
|
||||
{
|
||||
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
|
||||
bool holes_ok = ia->ri_mrtype == IB_MR_TYPE_SG_GAPS;
|
||||
struct rpcrdma_mw *mw;
|
||||
struct rpcrdma_frmr *frmr;
|
||||
struct ib_mr *mr;
|
||||
|
@ -383,8 +385,8 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
|
|||
|
||||
++seg;
|
||||
++i;
|
||||
|
||||
/* Check for holes */
|
||||
if (holes_ok)
|
||||
continue;
|
||||
if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
|
||||
offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
|
||||
break;
|
||||
|
@ -421,7 +423,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
|
|||
IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
|
||||
IB_ACCESS_REMOTE_READ;
|
||||
|
||||
DECR_CQCOUNT(&r_xprt->rx_ep);
|
||||
rpcrdma_set_signaled(&r_xprt->rx_ep, ®_wr->wr);
|
||||
rc = ib_post_send(ia->ri_id->qp, ®_wr->wr, &bad_wr);
|
||||
if (rc)
|
||||
goto out_senderr;
|
||||
|
@ -451,26 +453,6 @@ out_senderr:
|
|||
return -ENOTCONN;
|
||||
}
|
||||
|
||||
static struct ib_send_wr *
|
||||
__frwr_prepare_linv_wr(struct rpcrdma_mw *mw)
|
||||
{
|
||||
struct rpcrdma_frmr *f = &mw->frmr;
|
||||
struct ib_send_wr *invalidate_wr;
|
||||
|
||||
dprintk("RPC: %s: invalidating frmr %p\n", __func__, f);
|
||||
|
||||
f->fr_state = FRMR_IS_INVALID;
|
||||
invalidate_wr = &f->fr_invwr;
|
||||
|
||||
memset(invalidate_wr, 0, sizeof(*invalidate_wr));
|
||||
f->fr_cqe.done = frwr_wc_localinv;
|
||||
invalidate_wr->wr_cqe = &f->fr_cqe;
|
||||
invalidate_wr->opcode = IB_WR_LOCAL_INV;
|
||||
invalidate_wr->ex.invalidate_rkey = f->fr_mr->rkey;
|
||||
|
||||
return invalidate_wr;
|
||||
}
|
||||
|
||||
/* Invalidate all memory regions that were registered for "req".
|
||||
*
|
||||
* Sleeps until it is safe for the host CPU to access the
|
||||
|
@ -481,12 +463,12 @@ __frwr_prepare_linv_wr(struct rpcrdma_mw *mw)
|
|||
static void
|
||||
frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
|
||||
{
|
||||
struct ib_send_wr *invalidate_wrs, *pos, *prev, *bad_wr;
|
||||
struct ib_send_wr *first, **prev, *last, *bad_wr;
|
||||
struct rpcrdma_rep *rep = req->rl_reply;
|
||||
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
|
||||
struct rpcrdma_mw *mw, *tmp;
|
||||
struct rpcrdma_frmr *f;
|
||||
int rc;
|
||||
int count, rc;
|
||||
|
||||
dprintk("RPC: %s: req %p\n", __func__, req);
|
||||
|
||||
|
@ -496,22 +478,29 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
|
|||
* a single ib_post_send() call.
|
||||
*/
|
||||
f = NULL;
|
||||
invalidate_wrs = pos = prev = NULL;
|
||||
count = 0;
|
||||
prev = &first;
|
||||
list_for_each_entry(mw, &req->rl_registered, mw_list) {
|
||||
mw->frmr.fr_state = FRMR_IS_INVALID;
|
||||
|
||||
if ((rep->rr_wc_flags & IB_WC_WITH_INVALIDATE) &&
|
||||
(mw->mw_handle == rep->rr_inv_rkey)) {
|
||||
mw->frmr.fr_state = FRMR_IS_INVALID;
|
||||
(mw->mw_handle == rep->rr_inv_rkey))
|
||||
continue;
|
||||
}
|
||||
|
||||
pos = __frwr_prepare_linv_wr(mw);
|
||||
|
||||
if (!invalidate_wrs)
|
||||
invalidate_wrs = pos;
|
||||
else
|
||||
prev->next = pos;
|
||||
prev = pos;
|
||||
f = &mw->frmr;
|
||||
dprintk("RPC: %s: invalidating frmr %p\n",
|
||||
__func__, f);
|
||||
|
||||
f->fr_cqe.done = frwr_wc_localinv;
|
||||
last = &f->fr_invwr;
|
||||
memset(last, 0, sizeof(*last));
|
||||
last->wr_cqe = &f->fr_cqe;
|
||||
last->opcode = IB_WR_LOCAL_INV;
|
||||
last->ex.invalidate_rkey = mw->mw_handle;
|
||||
count++;
|
||||
|
||||
*prev = last;
|
||||
prev = &last->next;
|
||||
}
|
||||
if (!f)
|
||||
goto unmap;
|
||||
|
@ -520,17 +509,22 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
|
|||
* last WR in the chain completes, all WRs in the chain
|
||||
* are complete.
|
||||
*/
|
||||
f->fr_invwr.send_flags = IB_SEND_SIGNALED;
|
||||
last->send_flags = IB_SEND_SIGNALED;
|
||||
f->fr_cqe.done = frwr_wc_localinv_wake;
|
||||
reinit_completion(&f->fr_linv_done);
|
||||
INIT_CQCOUNT(&r_xprt->rx_ep);
|
||||
|
||||
/* Initialize CQ count, since there is always a signaled
|
||||
* WR being posted here. The new cqcount depends on how
|
||||
* many SQEs are about to be consumed.
|
||||
*/
|
||||
rpcrdma_init_cqcount(&r_xprt->rx_ep, count);
|
||||
|
||||
/* Transport disconnect drains the receive CQ before it
|
||||
* replaces the QP. The RPC reply handler won't call us
|
||||
* unless ri_id->qp is a valid pointer.
|
||||
*/
|
||||
r_xprt->rx_stats.local_inv_needed++;
|
||||
rc = ib_post_send(ia->ri_id->qp, invalidate_wrs, &bad_wr);
|
||||
rc = ib_post_send(ia->ri_id->qp, first, &bad_wr);
|
||||
if (rc)
|
||||
goto reset_mrs;
|
||||
|
||||
|
@ -541,7 +535,7 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
|
|||
*/
|
||||
unmap:
|
||||
list_for_each_entry_safe(mw, tmp, &req->rl_registered, mw_list) {
|
||||
dprintk("RPC: %s: unmapping frmr %p\n",
|
||||
dprintk("RPC: %s: DMA unmapping frmr %p\n",
|
||||
__func__, &mw->frmr);
|
||||
list_del_init(&mw->mw_list);
|
||||
ib_dma_unmap_sg(ia->ri_device,
|
||||
|
@ -559,7 +553,7 @@ reset_mrs:
|
|||
*/
|
||||
list_for_each_entry(mw, &req->rl_registered, mw_list) {
|
||||
f = &mw->frmr;
|
||||
if (mw->frmr.fr_mr->rkey == bad_wr->ex.invalidate_rkey) {
|
||||
if (mw->mw_handle == bad_wr->ex.invalidate_rkey) {
|
||||
__frwr_reset_mr(ia, mw);
|
||||
bad_wr = bad_wr->next;
|
||||
}
|
||||
|
|
|
@ -786,7 +786,7 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, int wrchunk, __be32 **iptrp)
|
|||
ifdebug(FACILITY) {
|
||||
u64 off;
|
||||
xdr_decode_hyper((__be32 *)&seg->rs_offset, &off);
|
||||
dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n",
|
||||
dprintk("RPC: %s: chunk %d@0x%016llx:0x%08x\n",
|
||||
__func__,
|
||||
be32_to_cpu(seg->rs_length),
|
||||
(unsigned long long)off,
|
||||
|
@ -906,28 +906,6 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
|
|||
return fixup_copy_count;
|
||||
}
|
||||
|
||||
void
|
||||
rpcrdma_connect_worker(struct work_struct *work)
|
||||
{
|
||||
struct rpcrdma_ep *ep =
|
||||
container_of(work, struct rpcrdma_ep, rep_connect_worker.work);
|
||||
struct rpcrdma_xprt *r_xprt =
|
||||
container_of(ep, struct rpcrdma_xprt, rx_ep);
|
||||
struct rpc_xprt *xprt = &r_xprt->rx_xprt;
|
||||
|
||||
spin_lock_bh(&xprt->transport_lock);
|
||||
if (++xprt->connect_cookie == 0) /* maintain a reserved value */
|
||||
++xprt->connect_cookie;
|
||||
if (ep->rep_connected > 0) {
|
||||
if (!xprt_test_and_set_connected(xprt))
|
||||
xprt_wake_pending_tasks(xprt, 0);
|
||||
} else {
|
||||
if (xprt_test_and_clear_connected(xprt))
|
||||
xprt_wake_pending_tasks(xprt, -ENOTCONN);
|
||||
}
|
||||
spin_unlock_bh(&xprt->transport_lock);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
|
||||
/* By convention, backchannel calls arrive via rdma_msg type
|
||||
* messages, and never populate the chunk lists. This makes
|
||||
|
@ -959,18 +937,6 @@ rpcrdma_is_bcall(struct rpcrdma_msg *headerp)
|
|||
}
|
||||
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
|
||||
|
||||
/*
|
||||
* This function is called when an async event is posted to
|
||||
* the connection which changes the connection state. All it
|
||||
* does at this point is mark the connection up/down, the rpc
|
||||
* timers do the rest.
|
||||
*/
|
||||
void
|
||||
rpcrdma_conn_func(struct rpcrdma_ep *ep)
|
||||
{
|
||||
schedule_delayed_work(&ep->rep_connect_worker, 0);
|
||||
}
|
||||
|
||||
/* Process received RPC/RDMA messages.
|
||||
*
|
||||
* Errors must result in the RPC task either being awakened, or
|
||||
|
|
|
@ -219,6 +219,34 @@ xprt_rdma_free_addresses(struct rpc_xprt *xprt)
|
|||
}
|
||||
}
|
||||
|
||||
void
|
||||
rpcrdma_conn_func(struct rpcrdma_ep *ep)
|
||||
{
|
||||
schedule_delayed_work(&ep->rep_connect_worker, 0);
|
||||
}
|
||||
|
||||
void
|
||||
rpcrdma_connect_worker(struct work_struct *work)
|
||||
{
|
||||
struct rpcrdma_ep *ep =
|
||||
container_of(work, struct rpcrdma_ep, rep_connect_worker.work);
|
||||
struct rpcrdma_xprt *r_xprt =
|
||||
container_of(ep, struct rpcrdma_xprt, rx_ep);
|
||||
struct rpc_xprt *xprt = &r_xprt->rx_xprt;
|
||||
|
||||
spin_lock_bh(&xprt->transport_lock);
|
||||
if (++xprt->connect_cookie == 0) /* maintain a reserved value */
|
||||
++xprt->connect_cookie;
|
||||
if (ep->rep_connected > 0) {
|
||||
if (!xprt_test_and_set_connected(xprt))
|
||||
xprt_wake_pending_tasks(xprt, 0);
|
||||
} else {
|
||||
if (xprt_test_and_clear_connected(xprt))
|
||||
xprt_wake_pending_tasks(xprt, -ENOTCONN);
|
||||
}
|
||||
spin_unlock_bh(&xprt->transport_lock);
|
||||
}
|
||||
|
||||
static void
|
||||
xprt_rdma_connect_worker(struct work_struct *work)
|
||||
{
|
||||
|
@ -621,7 +649,8 @@ xprt_rdma_free(struct rpc_task *task)
|
|||
|
||||
dprintk("RPC: %s: called on 0x%p\n", __func__, req->rl_reply);
|
||||
|
||||
ia->ri_ops->ro_unmap_safe(r_xprt, req, !RPC_IS_ASYNC(task));
|
||||
if (unlikely(!list_empty(&req->rl_registered)))
|
||||
ia->ri_ops->ro_unmap_safe(r_xprt, req, !RPC_IS_ASYNC(task));
|
||||
rpcrdma_unmap_sges(ia, req);
|
||||
rpcrdma_buffer_put(req);
|
||||
}
|
||||
|
@ -657,7 +686,8 @@ xprt_rdma_send_request(struct rpc_task *task)
|
|||
int rc = 0;
|
||||
|
||||
/* On retransmit, remove any previously registered chunks */
|
||||
r_xprt->rx_ia.ri_ops->ro_unmap_safe(r_xprt, req, false);
|
||||
if (unlikely(!list_empty(&req->rl_registered)))
|
||||
r_xprt->rx_ia.ri_ops->ro_unmap_safe(r_xprt, req, false);
|
||||
|
||||
rc = rpcrdma_marshal_req(rqst);
|
||||
if (rc < 0)
|
||||
|
|
|
@ -103,9 +103,9 @@ rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context)
|
|||
{
|
||||
struct rpcrdma_ep *ep = context;
|
||||
|
||||
pr_err("RPC: %s: %s on device %s ep %p\n",
|
||||
__func__, ib_event_msg(event->event),
|
||||
event->device->name, context);
|
||||
pr_err("rpcrdma: %s on device %s ep %p\n",
|
||||
ib_event_msg(event->event), event->device->name, context);
|
||||
|
||||
if (ep->rep_connected == 1) {
|
||||
ep->rep_connected = -EIO;
|
||||
rpcrdma_conn_func(ep);
|
||||
|
@ -223,8 +223,8 @@ rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt,
|
|||
cdata->inline_rsize = rsize;
|
||||
if (wsize < cdata->inline_wsize)
|
||||
cdata->inline_wsize = wsize;
|
||||
pr_info("rpcrdma: max send %u, max recv %u\n",
|
||||
cdata->inline_wsize, cdata->inline_rsize);
|
||||
dprintk("RPC: %s: max send %u, max recv %u\n",
|
||||
__func__, cdata->inline_wsize, cdata->inline_rsize);
|
||||
rpcrdma_set_max_header_sizes(r_xprt);
|
||||
}
|
||||
|
||||
|
@ -331,6 +331,7 @@ static struct rdma_cm_id *
|
|||
rpcrdma_create_id(struct rpcrdma_xprt *xprt,
|
||||
struct rpcrdma_ia *ia, struct sockaddr *addr)
|
||||
{
|
||||
unsigned long wtimeout = msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1;
|
||||
struct rdma_cm_id *id;
|
||||
int rc;
|
||||
|
||||
|
@ -352,8 +353,12 @@ rpcrdma_create_id(struct rpcrdma_xprt *xprt,
|
|||
__func__, rc);
|
||||
goto out;
|
||||
}
|
||||
wait_for_completion_interruptible_timeout(&ia->ri_done,
|
||||
msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
|
||||
rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout);
|
||||
if (rc < 0) {
|
||||
dprintk("RPC: %s: wait() exited: %i\n",
|
||||
__func__, rc);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* FIXME:
|
||||
* Until xprtrdma supports DEVICE_REMOVAL, the provider must
|
||||
|
@ -376,8 +381,12 @@ rpcrdma_create_id(struct rpcrdma_xprt *xprt,
|
|||
__func__, rc);
|
||||
goto put;
|
||||
}
|
||||
wait_for_completion_interruptible_timeout(&ia->ri_done,
|
||||
msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
|
||||
rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout);
|
||||
if (rc < 0) {
|
||||
dprintk("RPC: %s: wait() exited: %i\n",
|
||||
__func__, rc);
|
||||
goto put;
|
||||
}
|
||||
rc = ia->ri_async_rc;
|
||||
if (rc)
|
||||
goto put;
|
||||
|
@ -532,7 +541,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
|
|||
ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 - 1;
|
||||
if (ep->rep_cqinit <= 2)
|
||||
ep->rep_cqinit = 0; /* always signal? */
|
||||
INIT_CQCOUNT(ep);
|
||||
rpcrdma_init_cqcount(ep, 0);
|
||||
init_waitqueue_head(&ep->rep_connect_wait);
|
||||
INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
|
||||
|
||||
|
@ -1311,13 +1320,7 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
|
|||
dprintk("RPC: %s: posting %d s/g entries\n",
|
||||
__func__, send_wr->num_sge);
|
||||
|
||||
if (DECR_CQCOUNT(ep) > 0)
|
||||
send_wr->send_flags = 0;
|
||||
else { /* Provider must take a send completion every now and then */
|
||||
INIT_CQCOUNT(ep);
|
||||
send_wr->send_flags = IB_SEND_SIGNALED;
|
||||
}
|
||||
|
||||
rpcrdma_set_signaled(ep, send_wr);
|
||||
rc = ib_post_send(ia->ri_id->qp, send_wr, &send_wr_fail);
|
||||
if (rc)
|
||||
goto out_postsend_err;
|
||||
|
|
|
@ -75,6 +75,7 @@ struct rpcrdma_ia {
|
|||
unsigned int ri_max_inline_write;
|
||||
unsigned int ri_max_inline_read;
|
||||
bool ri_reminv_expected;
|
||||
enum ib_mr_type ri_mrtype;
|
||||
struct ib_qp_attr ri_qp_attr;
|
||||
struct ib_qp_init_attr ri_qp_init_attr;
|
||||
};
|
||||
|
@ -95,8 +96,24 @@ struct rpcrdma_ep {
|
|||
struct delayed_work rep_connect_worker;
|
||||
};
|
||||
|
||||
#define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit)
|
||||
#define DECR_CQCOUNT(ep) atomic_sub_return(1, &(ep)->rep_cqcount)
|
||||
static inline void
|
||||
rpcrdma_init_cqcount(struct rpcrdma_ep *ep, int count)
|
||||
{
|
||||
atomic_set(&ep->rep_cqcount, ep->rep_cqinit - count);
|
||||
}
|
||||
|
||||
/* To update send queue accounting, provider must take a
|
||||
* send completion every now and then.
|
||||
*/
|
||||
static inline void
|
||||
rpcrdma_set_signaled(struct rpcrdma_ep *ep, struct ib_send_wr *send_wr)
|
||||
{
|
||||
send_wr->send_flags = 0;
|
||||
if (unlikely(atomic_sub_return(1, &ep->rep_cqcount) <= 0)) {
|
||||
rpcrdma_init_cqcount(ep, 0);
|
||||
send_wr->send_flags = IB_SEND_SIGNALED;
|
||||
}
|
||||
}
|
||||
|
||||
/* Pre-allocate extra Work Requests for handling backward receives
|
||||
* and sends. This is a fixed value because the Work Queues are
|
||||
|
@ -473,6 +490,7 @@ int rpcrdma_ep_create(struct rpcrdma_ep *, struct rpcrdma_ia *,
|
|||
struct rpcrdma_create_data_internal *);
|
||||
void rpcrdma_ep_destroy(struct rpcrdma_ep *, struct rpcrdma_ia *);
|
||||
int rpcrdma_ep_connect(struct rpcrdma_ep *, struct rpcrdma_ia *);
|
||||
void rpcrdma_conn_func(struct rpcrdma_ep *ep);
|
||||
void rpcrdma_ep_disconnect(struct rpcrdma_ep *, struct rpcrdma_ia *);
|
||||
|
||||
int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *,
|
||||
|
@ -531,13 +549,6 @@ rpcrdma_data_dir(bool writing)
|
|||
return writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
|
||||
}
|
||||
|
||||
/*
|
||||
* RPC/RDMA connection management calls - xprtrdma/rpc_rdma.c
|
||||
*/
|
||||
void rpcrdma_connect_worker(struct work_struct *);
|
||||
void rpcrdma_conn_func(struct rpcrdma_ep *);
|
||||
void rpcrdma_reply_handler(struct work_struct *);
|
||||
|
||||
/*
|
||||
* RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c
|
||||
*/
|
||||
|
@ -555,12 +566,14 @@ bool rpcrdma_prepare_send_sges(struct rpcrdma_ia *, struct rpcrdma_req *,
|
|||
void rpcrdma_unmap_sges(struct rpcrdma_ia *, struct rpcrdma_req *);
|
||||
int rpcrdma_marshal_req(struct rpc_rqst *);
|
||||
void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *);
|
||||
void rpcrdma_reply_handler(struct work_struct *work);
|
||||
|
||||
/* RPC/RDMA module init - xprtrdma/transport.c
|
||||
*/
|
||||
extern unsigned int xprt_rdma_max_inline_read;
|
||||
void xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap);
|
||||
void xprt_rdma_free_addresses(struct rpc_xprt *xprt);
|
||||
void rpcrdma_connect_worker(struct work_struct *work);
|
||||
void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq);
|
||||
int xprt_rdma_init(void);
|
||||
void xprt_rdma_cleanup(void);
|
||||
|
|
Loading…
Reference in New Issue