2019-05-19 20:08:55 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* linux/fs/nfs/dir.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 1992 Rick Sladkey
|
|
|
|
*
|
|
|
|
* nfs directory handling functions
|
|
|
|
*
|
|
|
|
* 10 Apr 1996 Added silly rename for unlink --okir
|
|
|
|
* 28 Sep 1996 Improved directory cache --okir
|
|
|
|
* 23 Aug 1997 Claus Heine claus@momo.math.rwth-aachen.de
|
|
|
|
* Re-implemented silly rename for unlink, newly implemented
|
|
|
|
* silly rename for nfs_rename() following the suggestions
|
|
|
|
* of Olaf Kirch (okir) found in this file.
|
|
|
|
* Following Linus comments on my original hack, this version
|
|
|
|
* depends only on the dcache stuff and doesn't touch the inode
|
|
|
|
* layer (iput() and friends).
|
|
|
|
* 6 Jun 1999 Cache readdir lookups in the page cache. -DaveM
|
|
|
|
*/
|
|
|
|
|
2021-12-29 08:49:13 +08:00
|
|
|
#include <linux/compat.h>
|
2012-07-31 04:05:23 +08:00
|
|
|
#include <linux/module.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/time.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/stat.h>
|
|
|
|
#include <linux/fcntl.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/sunrpc/clnt.h>
|
|
|
|
#include <linux/nfs_fs.h>
|
|
|
|
#include <linux/nfs_mount.h>
|
|
|
|
#include <linux/pagemap.h>
|
2006-08-23 08:06:23 +08:00
|
|
|
#include <linux/pagevec.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/namei.h>
|
NFS: Share NFS superblocks per-protocol per-server per-FSID
The attached patch makes NFS share superblocks between mounts from the same
server and FSID over the same protocol.
It does this by creating each superblock with a false root and returning the
real root dentry in the vfsmount presented by get_sb(). The root dentry set
starts off as an anonymous dentry if we don't already have the dentry for its
inode, otherwise it simply returns the dentry we already have.
We may thus end up with several trees of dentries in the superblock, and if at
some later point one of anonymous tree roots is discovered by normal filesystem
activity to be located in another tree within the superblock, the anonymous
root is named and materialises attached to the second tree at the appropriate
point.
Why do it this way? Why not pass an extra argument to the mount() syscall to
indicate the subpath and then pathwalk from the server root to the desired
directory? You can't guarantee this will work for two reasons:
(1) The root and intervening nodes may not be accessible to the client.
With NFS2 and NFS3, for instance, mountd is called on the server to get
the filehandle for the tip of a path. mountd won't give us handles for
anything we don't have permission to access, and so we can't set up NFS
inodes for such nodes, and so can't easily set up dentries (we'd have to
have ghost inodes or something).
With this patch we don't actually create dentries until we get handles
from the server that we can use to set up their inodes, and we don't
actually bind them into the tree until we know for sure where they go.
(2) Inaccessible symbolic links.
If we're asked to mount two exports from the server, eg:
mount warthog:/warthog/aaa/xxx /mmm
mount warthog:/warthog/bbb/yyy /nnn
We may not be able to access anything nearer the root than xxx and yyy,
but we may find out later that /mmm/www/yyy, say, is actually the same
directory as the one mounted on /nnn. What we might then find out, for
example, is that /warthog/bbb was actually a symbolic link to
/warthog/aaa/xxx/www, but we can't actually determine that by talking to
the server until /warthog is made available by NFS.
This would lead to having constructed an errneous dentry tree which we
can't easily fix. We can end up with a dentry marked as a directory when
it should actually be a symlink, or we could end up with an apparently
hardlinked directory.
With this patch we need not make assumptions about the type of a dentry
for which we can't retrieve information, nor need we assume we know its
place in the grand scheme of things until we actually see that place.
This patch reduces the possibility of aliasing in the inode and page caches for
inodes that may be accessed by more than one NFS export. It also reduces the
number of superblocks required for NFS where there are many NFS exports being
used from a server (home directory server + autofs for example).
This in turn makes it simpler to do local caching of network filesystems, as it
can then be guaranteed that there won't be links from multiple inodes in
separate superblocks to the same cache file.
Obviously, cache aliasing between different levels of NFS protocol could still
be a problem, but at least that gives us another key to use when indexing the
cache.
This patch makes the following changes:
(1) The server record construction/destruction has been abstracted out into
its own set of functions to make things easier to get right. These have
been moved into fs/nfs/client.c.
All the code in fs/nfs/client.c has to do with the management of
connections to servers, and doesn't touch superblocks in any way; the
remaining code in fs/nfs/super.c has to do with VFS superblock management.
(2) The sequence of events undertaken by NFS mount is now reordered:
(a) A volume representation (struct nfs_server) is allocated.
(b) A server representation (struct nfs_client) is acquired. This may be
allocated or shared, and is keyed on server address, port and NFS
version.
(c) If allocated, the client representation is initialised. The state
member variable of nfs_client is used to prevent a race during
initialisation from two mounts.
(d) For NFS4 a simple pathwalk is performed, walking from FH to FH to find
the root filehandle for the mount (fs/nfs/getroot.c). For NFS2/3 we
are given the root FH in advance.
(e) The volume FSID is probed for on the root FH.
(f) The volume representation is initialised from the FSINFO record
retrieved on the root FH.
(g) sget() is called to acquire a superblock. This may be allocated or
shared, keyed on client pointer and FSID.
(h) If allocated, the superblock is initialised.
(i) If the superblock is shared, then the new nfs_server record is
discarded.
(j) The root dentry for this mount is looked up from the root FH.
(k) The root dentry for this mount is assigned to the vfsmount.
(3) nfs_readdir_lookup() creates dentries for each of the entries readdir()
returns; this function now attaches disconnected trees from alternate
roots that happen to be discovered attached to a directory being read (in
the same way nfs_lookup() is made to do for lookup ops).
The new d_materialise_unique() function is now used to do this, thus
permitting the whole thing to be done under one set of locks, and thus
avoiding any race between mount and lookup operations on the same
directory.
(4) The client management code uses a new debug facility: NFSDBG_CLIENT which
is set by echoing 1024 to /proc/net/sunrpc/nfs_debug.
(5) Clone mounts are now called xdev mounts.
(6) Use the dentry passed to the statfs() op as the handle for retrieving fs
statistics rather than the root dentry of the superblock (which is now a
dummy).
Signed-Off-By: David Howells <dhowells@redhat.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2006-08-23 08:06:13 +08:00
|
|
|
#include <linux/mount.h>
|
2013-07-04 06:02:32 +08:00
|
|
|
#include <linux/swap.h>
|
Detach sched.h from mm.h
First thing mm.h does is including sched.h solely for can_do_mlock() inline
function which has "current" dereference inside. By dealing with can_do_mlock()
mm.h can be detached from sched.h which is good. See below, why.
This patch
a) removes unconditional inclusion of sched.h from mm.h
b) makes can_do_mlock() normal function in mm/mlock.c
c) exports can_do_mlock() to not break compilation
d) adds sched.h inclusions back to files that were getting it indirectly.
e) adds less bloated headers to some files (asm/signal.h, jiffies.h) that were
getting them indirectly
Net result is:
a) mm.h users would get less code to open, read, preprocess, parse, ... if
they don't need sched.h
b) sched.h stops being dependency for significant number of files:
on x86_64 allmodconfig touching sched.h results in recompile of 4083 files,
after patch it's only 3744 (-8.3%).
Cross-compile tested on
all arm defconfigs, all mips defconfigs, all powerpc defconfigs,
alpha alpha-up
arm
i386 i386-up i386-defconfig i386-allnoconfig
ia64 ia64-up
m68k
mips
parisc parisc-up
powerpc powerpc-up
s390 s390-up
sparc sparc-up
sparc64 sparc64-up
um-x86_64
x86_64 x86_64-up x86_64-defconfig x86_64-allnoconfig
as well as my two usual configs.
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-21 05:22:52 +08:00
|
|
|
#include <linux/sched.h>
|
2010-11-11 20:53:47 +08:00
|
|
|
#include <linux/kmemleak.h>
|
2010-12-09 19:35:25 +08:00
|
|
|
#include <linux/xattr.h>
|
2022-03-31 08:00:07 +08:00
|
|
|
#include <linux/hash.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#include "delegation.h"
|
2006-03-21 02:44:14 +08:00
|
|
|
#include "iostat.h"
|
2007-11-22 07:04:31 +08:00
|
|
|
#include "internal.h"
|
2010-09-17 22:56:50 +08:00
|
|
|
#include "fscache.h"
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-08-20 06:59:33 +08:00
|
|
|
#include "nfstrace.h"
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* #define NFS_DEBUG_VERBOSE 1 */
|
|
|
|
|
|
|
|
static int nfs_opendir(struct inode *, struct file *);
|
2011-03-24 02:48:29 +08:00
|
|
|
static int nfs_closedir(struct inode *, struct file *);
|
2013-05-18 04:34:50 +08:00
|
|
|
static int nfs_readdir(struct file *, struct dir_context *);
|
2011-07-17 08:44:56 +08:00
|
|
|
static int nfs_fsync_dir(struct file *, loff_t, loff_t, int);
|
2005-06-23 01:16:29 +08:00
|
|
|
static loff_t nfs_llseek_dir(struct file *, loff_t, int);
|
2023-04-05 03:12:52 +08:00
|
|
|
static void nfs_readdir_clear_array(struct folio *);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-03-28 17:56:42 +08:00
|
|
|
const struct file_operations nfs_dir_operations = {
|
2005-06-23 01:16:29 +08:00
|
|
|
.llseek = nfs_llseek_dir,
|
2005-04-17 06:20:36 +08:00
|
|
|
.read = generic_read_dir,
|
2020-02-03 06:53:56 +08:00
|
|
|
.iterate_shared = nfs_readdir,
|
2005-04-17 06:20:36 +08:00
|
|
|
.open = nfs_opendir,
|
2011-03-24 02:48:29 +08:00
|
|
|
.release = nfs_closedir,
|
2005-04-17 06:20:36 +08:00
|
|
|
.fsync = nfs_fsync_dir,
|
|
|
|
};
|
|
|
|
|
2010-12-02 03:17:06 +08:00
|
|
|
const struct address_space_operations nfs_dir_aops = {
|
2023-04-05 03:12:52 +08:00
|
|
|
.free_folio = nfs_readdir_clear_array,
|
2010-09-25 02:48:42 +08:00
|
|
|
};
|
|
|
|
|
2022-02-08 02:37:00 +08:00
|
|
|
#define NFS_INIT_DTSIZE PAGE_SIZE
|
|
|
|
|
2022-02-23 01:10:36 +08:00
|
|
|
static struct nfs_open_dir_context *
|
|
|
|
alloc_nfs_open_dir_context(struct inode *dir)
|
2011-03-24 02:48:29 +08:00
|
|
|
{
|
2014-02-08 06:02:08 +08:00
|
|
|
struct nfs_inode *nfsi = NFS_I(dir);
|
2011-03-24 02:48:29 +08:00
|
|
|
struct nfs_open_dir_context *ctx;
|
2022-02-23 01:10:36 +08:00
|
|
|
|
|
|
|
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL_ACCOUNT);
|
2011-03-24 02:48:29 +08:00
|
|
|
if (ctx != NULL) {
|
2014-02-08 06:02:08 +08:00
|
|
|
ctx->attr_gencount = nfsi->attr_gencount;
|
2022-02-08 02:37:00 +08:00
|
|
|
ctx->dtsize = NFS_INIT_DTSIZE;
|
2014-02-08 06:02:08 +08:00
|
|
|
spin_lock(&dir->i_lock);
|
2019-05-22 20:38:57 +08:00
|
|
|
if (list_empty(&nfsi->open_files) &&
|
|
|
|
(nfsi->cache_validity & NFS_INO_DATA_INVAL_DEFER))
|
2021-03-09 03:42:54 +08:00
|
|
|
nfs_set_cache_invalid(dir,
|
|
|
|
NFS_INO_INVALID_DATA |
|
|
|
|
NFS_INO_REVAL_FORCED);
|
2022-02-18 00:08:24 +08:00
|
|
|
list_add_tail_rcu(&ctx->list, &nfsi->open_files);
|
2022-02-25 23:22:30 +08:00
|
|
|
memcpy(ctx->verf, nfsi->cookieverf, sizeof(ctx->verf));
|
2014-02-08 06:02:08 +08:00
|
|
|
spin_unlock(&dir->i_lock);
|
2011-07-31 00:45:35 +08:00
|
|
|
return ctx;
|
|
|
|
}
|
|
|
|
return ERR_PTR(-ENOMEM);
|
2011-03-24 02:48:29 +08:00
|
|
|
}
|
|
|
|
|
2014-02-08 06:02:08 +08:00
|
|
|
static void put_nfs_open_dir_context(struct inode *dir, struct nfs_open_dir_context *ctx)
|
2011-03-24 02:48:29 +08:00
|
|
|
{
|
2014-02-08 06:02:08 +08:00
|
|
|
spin_lock(&dir->i_lock);
|
2022-02-18 00:08:24 +08:00
|
|
|
list_del_rcu(&ctx->list);
|
2014-02-08 06:02:08 +08:00
|
|
|
spin_unlock(&dir->i_lock);
|
2022-02-18 00:08:24 +08:00
|
|
|
kfree_rcu(ctx, rcu_head);
|
2011-03-24 02:48:29 +08:00
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Open file
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
nfs_opendir(struct inode *inode, struct file *filp)
|
|
|
|
{
|
2011-03-24 02:48:29 +08:00
|
|
|
int res = 0;
|
|
|
|
struct nfs_open_dir_context *ctx;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-09-16 22:53:17 +08:00
|
|
|
dfprintk(FILE, "NFS: open dir(%pD2)\n", filp);
|
2008-06-12 05:55:42 +08:00
|
|
|
|
|
|
|
nfs_inc_stats(inode, NFSIOS_VFSOPEN);
|
2006-03-21 02:44:24 +08:00
|
|
|
|
2020-11-02 04:24:41 +08:00
|
|
|
ctx = alloc_nfs_open_dir_context(inode);
|
2011-03-24 02:48:29 +08:00
|
|
|
if (IS_ERR(ctx)) {
|
|
|
|
res = PTR_ERR(ctx);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
filp->private_data = ctx;
|
|
|
|
out:
|
2005-04-17 06:20:36 +08:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2011-03-24 02:48:29 +08:00
|
|
|
static int
|
|
|
|
nfs_closedir(struct inode *inode, struct file *filp)
|
|
|
|
{
|
2014-10-22 08:11:25 +08:00
|
|
|
put_nfs_open_dir_context(file_inode(filp), filp->private_data);
|
2011-03-24 02:48:29 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-09-25 02:48:42 +08:00
|
|
|
struct nfs_cache_array_entry {
|
|
|
|
u64 cookie;
|
|
|
|
u64 ino;
|
2020-11-02 08:17:29 +08:00
|
|
|
const char *name;
|
|
|
|
unsigned int name_len;
|
2010-11-21 03:26:44 +08:00
|
|
|
unsigned char d_type;
|
2010-09-25 02:48:42 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct nfs_cache_array {
|
2022-02-22 21:31:28 +08:00
|
|
|
u64 change_attr;
|
2010-09-25 02:48:42 +08:00
|
|
|
u64 last_cookie;
|
2020-11-02 02:45:55 +08:00
|
|
|
unsigned int size;
|
2023-04-05 03:12:52 +08:00
|
|
|
unsigned char folio_full : 1,
|
|
|
|
folio_is_eof : 1,
|
2020-11-04 21:32:19 +08:00
|
|
|
cookies_are_ordered : 1;
|
2020-03-10 02:24:42 +08:00
|
|
|
struct nfs_cache_array_entry array[];
|
2010-09-25 02:48:42 +08:00
|
|
|
};
|
|
|
|
|
2020-11-03 20:42:04 +08:00
|
|
|
struct nfs_readdir_descriptor {
|
2005-04-17 06:20:36 +08:00
|
|
|
struct file *file;
|
2023-04-05 00:05:22 +08:00
|
|
|
struct folio *folio;
|
2013-05-18 04:34:50 +08:00
|
|
|
struct dir_context *ctx;
|
2023-04-05 00:05:22 +08:00
|
|
|
pgoff_t folio_index;
|
|
|
|
pgoff_t folio_index_max;
|
2020-11-01 22:56:18 +08:00
|
|
|
u64 dir_cookie;
|
2010-12-01 10:56:32 +08:00
|
|
|
u64 last_cookie;
|
2005-06-23 01:16:29 +08:00
|
|
|
loff_t current_index;
|
2010-09-25 02:48:42 +08:00
|
|
|
|
2020-11-03 09:06:12 +08:00
|
|
|
__be32 verf[NFS_DIR_VERIFIER_SIZE];
|
2020-02-05 22:01:52 +08:00
|
|
|
unsigned long dir_verifier;
|
2007-04-16 07:35:27 +08:00
|
|
|
unsigned long timestamp;
|
2008-10-15 07:16:07 +08:00
|
|
|
unsigned long gencount;
|
2020-11-01 22:56:18 +08:00
|
|
|
unsigned long attr_gencount;
|
2010-09-25 02:48:42 +08:00
|
|
|
unsigned int cache_entry_index;
|
2022-02-08 02:37:00 +08:00
|
|
|
unsigned int buffer_fills;
|
|
|
|
unsigned int dtsize;
|
2022-02-24 02:29:59 +08:00
|
|
|
bool clear_cache;
|
2017-06-20 20:33:44 +08:00
|
|
|
bool plus;
|
2022-01-19 11:10:52 +08:00
|
|
|
bool eob;
|
2017-06-20 20:33:44 +08:00
|
|
|
bool eof;
|
2020-11-03 20:42:04 +08:00
|
|
|
};
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2022-02-08 02:37:00 +08:00
|
|
|
static void nfs_set_dtsize(struct nfs_readdir_descriptor *desc, unsigned int sz)
|
|
|
|
{
|
|
|
|
struct nfs_server *server = NFS_SERVER(file_inode(desc->file));
|
|
|
|
unsigned int maxsize = server->dtsize;
|
|
|
|
|
|
|
|
if (sz > maxsize)
|
|
|
|
sz = maxsize;
|
|
|
|
if (sz < NFS_MIN_FILE_IO_SIZE)
|
|
|
|
sz = NFS_MIN_FILE_IO_SIZE;
|
|
|
|
desc->dtsize = sz;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs_shrink_dtsize(struct nfs_readdir_descriptor *desc)
|
|
|
|
{
|
|
|
|
nfs_set_dtsize(desc, desc->dtsize >> 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs_grow_dtsize(struct nfs_readdir_descriptor *desc)
|
|
|
|
{
|
|
|
|
nfs_set_dtsize(desc, desc->dtsize << 1);
|
|
|
|
}
|
|
|
|
|
2023-04-05 03:12:52 +08:00
|
|
|
static void nfs_readdir_folio_init_array(struct folio *folio, u64 last_cookie,
|
|
|
|
u64 change_attr)
|
2020-02-03 06:53:53 +08:00
|
|
|
{
|
|
|
|
struct nfs_cache_array *array;
|
|
|
|
|
2023-04-05 03:12:52 +08:00
|
|
|
array = kmap_local_folio(folio, 0);
|
2022-02-22 21:31:28 +08:00
|
|
|
array->change_attr = change_attr;
|
2020-11-02 01:34:43 +08:00
|
|
|
array->last_cookie = last_cookie;
|
2022-02-27 07:38:41 +08:00
|
|
|
array->size = 0;
|
2023-04-05 03:12:52 +08:00
|
|
|
array->folio_full = 0;
|
|
|
|
array->folio_is_eof = 0;
|
2020-11-04 21:32:19 +08:00
|
|
|
array->cookies_are_ordered = 1;
|
fs/nfs: Replace kmap_atomic() with kmap_local_page() in dir.c
kmap_atomic() is deprecated in favor of kmap_local_page().
With kmap_local_page() the mappings are per thread, CPU local, can take
page-faults, and can be called from any context (including interrupts).
Furthermore, the tasks can be preempted and, when they are scheduled to
run again, the kernel virtual addresses are restored and still valid.
kmap_atomic() is implemented like a kmap_local_page() which also disables
page-faults and preemption (the latter only for !PREEMPT_RT kernels,
otherwise it only disables migration).
The code within the mappings/un-mappings in the functions of dir.c don't
depend on the above-mentioned side effects of kmap_atomic(), so that mere
replacements of the old API with the new one is all that is required
(i.e., there is no need to explicitly add calls to pagefault_disable()
and/or preempt_disable()).
Therefore, replace kmap_atomic() with kmap_local_page() in fs/nfs/dir.c.
Tested in a QEMU/KVM x86_32 VM, 6GB RAM, booting a kernel with
HIGHMEM64GB enabled.
Suggested-by: Ira Weiny <ira.weiny@intel.com>
Signed-off-by: Fabio M. De Francesco <fmdefrancesco@gmail.com>
Reviewed-by: Luis Chamberlain <mcgrof@kernel.org>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2023-01-28 05:54:52 +08:00
|
|
|
kunmap_local(array);
|
2020-02-03 06:53:53 +08:00
|
|
|
}
|
|
|
|
|
2010-09-25 02:48:42 +08:00
|
|
|
/*
|
|
|
|
* we are freeing strings created by nfs_add_to_readdir_array()
|
|
|
|
*/
|
2023-04-05 03:12:52 +08:00
|
|
|
static void nfs_readdir_clear_array(struct folio *folio)
|
2010-09-25 02:48:42 +08:00
|
|
|
{
|
2010-12-02 03:17:06 +08:00
|
|
|
struct nfs_cache_array *array;
|
2022-02-27 07:38:41 +08:00
|
|
|
unsigned int i;
|
2010-11-16 09:26:22 +08:00
|
|
|
|
2023-04-05 03:12:52 +08:00
|
|
|
array = kmap_local_folio(folio, 0);
|
2017-03-11 06:07:46 +08:00
|
|
|
for (i = 0; i < array->size; i++)
|
2020-11-02 08:17:29 +08:00
|
|
|
kfree(array->array[i].name);
|
2022-02-27 07:38:41 +08:00
|
|
|
array->size = 0;
|
fs/nfs: Replace kmap_atomic() with kmap_local_page() in dir.c
kmap_atomic() is deprecated in favor of kmap_local_page().
With kmap_local_page() the mappings are per thread, CPU local, can take
page-faults, and can be called from any context (including interrupts).
Furthermore, the tasks can be preempted and, when they are scheduled to
run again, the kernel virtual addresses are restored and still valid.
kmap_atomic() is implemented like a kmap_local_page() which also disables
page-faults and preemption (the latter only for !PREEMPT_RT kernels,
otherwise it only disables migration).
The code within the mappings/un-mappings in the functions of dir.c don't
depend on the above-mentioned side effects of kmap_atomic(), so that mere
replacements of the old API with the new one is all that is required
(i.e., there is no need to explicitly add calls to pagefault_disable()
and/or preempt_disable()).
Therefore, replace kmap_atomic() with kmap_local_page() in fs/nfs/dir.c.
Tested in a QEMU/KVM x86_32 VM, 6GB RAM, booting a kernel with
HIGHMEM64GB enabled.
Suggested-by: Ira Weiny <ira.weiny@intel.com>
Signed-off-by: Fabio M. De Francesco <fmdefrancesco@gmail.com>
Reviewed-by: Luis Chamberlain <mcgrof@kernel.org>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2023-01-28 05:54:52 +08:00
|
|
|
kunmap_local(array);
|
2010-09-25 02:48:42 +08:00
|
|
|
}
|
|
|
|
|
2023-04-05 03:12:52 +08:00
|
|
|
static void nfs_readdir_folio_reinit_array(struct folio *folio, u64 last_cookie,
|
|
|
|
u64 change_attr)
|
2022-05-01 20:00:56 +08:00
|
|
|
{
|
2023-04-05 03:12:52 +08:00
|
|
|
nfs_readdir_clear_array(folio);
|
|
|
|
nfs_readdir_folio_init_array(folio, last_cookie, change_attr);
|
2022-05-01 20:00:56 +08:00
|
|
|
}
|
|
|
|
|
2023-04-05 03:12:52 +08:00
|
|
|
static struct folio *
|
|
|
|
nfs_readdir_folio_array_alloc(u64 last_cookie, gfp_t gfp_flags)
|
2020-11-07 09:38:47 +08:00
|
|
|
{
|
2023-04-05 03:12:52 +08:00
|
|
|
struct folio *folio = folio_alloc(gfp_flags, 0);
|
|
|
|
if (folio)
|
|
|
|
nfs_readdir_folio_init_array(folio, last_cookie, 0);
|
|
|
|
return folio;
|
2020-11-07 09:38:47 +08:00
|
|
|
}
|
|
|
|
|
2023-04-05 03:12:52 +08:00
|
|
|
static void nfs_readdir_folio_array_free(struct folio *folio)
|
2020-11-07 09:38:47 +08:00
|
|
|
{
|
2023-04-05 03:12:52 +08:00
|
|
|
if (folio) {
|
|
|
|
nfs_readdir_clear_array(folio);
|
|
|
|
folio_put(folio);
|
2020-11-07 09:38:47 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-22 21:11:44 +08:00
|
|
|
static u64 nfs_readdir_array_index_cookie(struct nfs_cache_array *array)
|
|
|
|
{
|
|
|
|
return array->size == 0 ? array->last_cookie : array->array[0].cookie;
|
|
|
|
}
|
|
|
|
|
2020-11-02 02:45:55 +08:00
|
|
|
static void nfs_readdir_array_set_eof(struct nfs_cache_array *array)
|
|
|
|
{
|
2023-04-05 03:12:52 +08:00
|
|
|
array->folio_is_eof = 1;
|
|
|
|
array->folio_full = 1;
|
2020-11-02 02:45:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool nfs_readdir_array_is_full(struct nfs_cache_array *array)
|
|
|
|
{
|
2023-04-05 03:12:52 +08:00
|
|
|
return array->folio_full;
|
2020-11-02 02:45:55 +08:00
|
|
|
}
|
|
|
|
|
2010-09-25 02:48:42 +08:00
|
|
|
/*
|
|
|
|
* the caller is responsible for freeing qstr.name
|
|
|
|
* when called by nfs_readdir_add_to_array, the strings will be freed in
|
|
|
|
* nfs_clear_readdir_array()
|
|
|
|
*/
|
2020-11-02 08:17:29 +08:00
|
|
|
static const char *nfs_readdir_copy_name(const char *name, unsigned int len)
|
2010-09-25 02:48:42 +08:00
|
|
|
{
|
2020-11-02 08:17:29 +08:00
|
|
|
const char *ret = kmemdup_nul(name, len, GFP_KERNEL);
|
|
|
|
|
2010-11-11 20:53:47 +08:00
|
|
|
/*
|
|
|
|
* Avoid a kmemleak false positive. The pointer to the name is stored
|
|
|
|
* in a page cache page which kmemleak does not scan.
|
|
|
|
*/
|
2020-11-02 08:17:29 +08:00
|
|
|
if (ret != NULL)
|
|
|
|
kmemleak_not_leak(ret);
|
|
|
|
return ret;
|
2010-09-25 02:48:42 +08:00
|
|
|
}
|
|
|
|
|
2022-02-22 23:39:26 +08:00
|
|
|
static size_t nfs_readdir_array_maxentries(void)
|
|
|
|
{
|
|
|
|
return (PAGE_SIZE - sizeof(struct nfs_cache_array)) /
|
|
|
|
sizeof(struct nfs_cache_array_entry);
|
|
|
|
}
|
|
|
|
|
2020-11-02 02:45:55 +08:00
|
|
|
/*
|
|
|
|
* Check that the next array entry lies entirely within the page bounds
|
|
|
|
*/
|
|
|
|
static int nfs_readdir_array_can_expand(struct nfs_cache_array *array)
|
|
|
|
{
|
2023-04-05 03:12:52 +08:00
|
|
|
if (array->folio_full)
|
2020-11-02 02:45:55 +08:00
|
|
|
return -ENOSPC;
|
2022-02-22 23:39:26 +08:00
|
|
|
if (array->size == nfs_readdir_array_maxentries()) {
|
2023-04-05 03:12:52 +08:00
|
|
|
array->folio_full = 1;
|
2020-11-02 02:45:55 +08:00
|
|
|
return -ENOSPC;
|
|
|
|
}
|
2010-10-24 02:53:23 +08:00
|
|
|
return 0;
|
2010-09-25 02:48:42 +08:00
|
|
|
}
|
|
|
|
|
2023-04-05 03:12:52 +08:00
|
|
|
static int nfs_readdir_folio_array_append(struct folio *folio,
|
2023-04-05 00:05:22 +08:00
|
|
|
const struct nfs_entry *entry,
|
|
|
|
u64 *cookie)
|
2010-09-25 02:48:42 +08:00
|
|
|
{
|
2020-11-02 08:17:29 +08:00
|
|
|
struct nfs_cache_array *array;
|
2010-10-24 02:53:23 +08:00
|
|
|
struct nfs_cache_array_entry *cache_entry;
|
2020-11-02 08:17:29 +08:00
|
|
|
const char *name;
|
2022-02-28 01:46:24 +08:00
|
|
|
int ret = -ENOMEM;
|
2010-10-24 02:53:23 +08:00
|
|
|
|
2020-11-02 08:17:29 +08:00
|
|
|
name = nfs_readdir_copy_name(entry->name, entry->len);
|
2010-11-21 04:18:22 +08:00
|
|
|
|
2023-05-04 01:24:11 +08:00
|
|
|
array = kmap_local_folio(folio, 0);
|
2022-02-28 01:46:24 +08:00
|
|
|
if (!name)
|
|
|
|
goto out;
|
2020-11-02 02:45:55 +08:00
|
|
|
ret = nfs_readdir_array_can_expand(array);
|
2020-11-02 08:17:29 +08:00
|
|
|
if (ret) {
|
|
|
|
kfree(name);
|
2010-10-24 02:53:23 +08:00
|
|
|
goto out;
|
2020-11-02 08:17:29 +08:00
|
|
|
}
|
2010-09-25 02:48:42 +08:00
|
|
|
|
2020-11-02 02:45:55 +08:00
|
|
|
cache_entry = &array->array[array->size];
|
2022-02-28 01:46:24 +08:00
|
|
|
cache_entry->cookie = array->last_cookie;
|
2010-10-24 02:53:23 +08:00
|
|
|
cache_entry->ino = entry->ino;
|
2010-11-21 03:26:44 +08:00
|
|
|
cache_entry->d_type = entry->d_type;
|
2020-11-02 08:17:29 +08:00
|
|
|
cache_entry->name_len = entry->len;
|
|
|
|
cache_entry->name = name;
|
2010-09-25 02:48:42 +08:00
|
|
|
array->last_cookie = entry->cookie;
|
2020-11-04 21:32:19 +08:00
|
|
|
if (array->last_cookie <= cache_entry->cookie)
|
|
|
|
array->cookies_are_ordered = 0;
|
2010-11-16 09:26:22 +08:00
|
|
|
array->size++;
|
2010-12-08 01:44:56 +08:00
|
|
|
if (entry->eof != 0)
|
2020-11-02 02:45:55 +08:00
|
|
|
nfs_readdir_array_set_eof(array);
|
2010-10-24 02:53:23 +08:00
|
|
|
out:
|
2022-02-28 01:46:24 +08:00
|
|
|
*cookie = array->last_cookie;
|
2023-05-04 01:24:11 +08:00
|
|
|
kunmap_local(array);
|
2010-10-24 02:53:23 +08:00
|
|
|
return ret;
|
2010-09-25 02:48:42 +08:00
|
|
|
}
|
|
|
|
|
2022-02-24 00:31:51 +08:00
|
|
|
#define NFS_READDIR_COOKIE_MASK (U32_MAX >> 14)
|
|
|
|
/*
|
|
|
|
* Hash algorithm allowing content addressible access to sequences
|
|
|
|
* of directory cookies. Content is addressed by the value of the
|
|
|
|
* cookie index of the first readdir entry in a page.
|
|
|
|
*
|
2022-03-31 08:00:07 +08:00
|
|
|
* We select only the first 18 bits to avoid issues with excessive
|
2022-02-24 00:31:51 +08:00
|
|
|
* memory use for the page cache XArray. 18 bits should allow the caching
|
|
|
|
* of 262144 pages of sequences of readdir entries. Since each page holds
|
|
|
|
* 127 readdir entries for a typical 64-bit system, that works out to a
|
|
|
|
* cache of ~ 33 million entries per directory.
|
|
|
|
*/
|
2023-04-05 03:12:52 +08:00
|
|
|
static pgoff_t nfs_readdir_folio_cookie_hash(u64 cookie)
|
2022-02-24 00:31:51 +08:00
|
|
|
{
|
|
|
|
if (cookie == 0)
|
|
|
|
return 0;
|
2022-03-31 08:00:07 +08:00
|
|
|
return hash_64(cookie, 18);
|
2022-02-24 00:31:51 +08:00
|
|
|
}
|
|
|
|
|
2023-04-05 03:12:52 +08:00
|
|
|
static bool nfs_readdir_folio_validate(struct folio *folio, u64 last_cookie,
|
|
|
|
u64 change_attr)
|
2022-02-22 21:31:28 +08:00
|
|
|
{
|
2023-04-05 03:12:52 +08:00
|
|
|
struct nfs_cache_array *array = kmap_local_folio(folio, 0);
|
2022-02-22 21:31:28 +08:00
|
|
|
int ret = true;
|
|
|
|
|
|
|
|
if (array->change_attr != change_attr)
|
|
|
|
ret = false;
|
2022-03-22 21:11:44 +08:00
|
|
|
if (nfs_readdir_array_index_cookie(array) != last_cookie)
|
2022-02-22 21:31:28 +08:00
|
|
|
ret = false;
|
fs/nfs: Replace kmap_atomic() with kmap_local_page() in dir.c
kmap_atomic() is deprecated in favor of kmap_local_page().
With kmap_local_page() the mappings are per thread, CPU local, can take
page-faults, and can be called from any context (including interrupts).
Furthermore, the tasks can be preempted and, when they are scheduled to
run again, the kernel virtual addresses are restored and still valid.
kmap_atomic() is implemented like a kmap_local_page() which also disables
page-faults and preemption (the latter only for !PREEMPT_RT kernels,
otherwise it only disables migration).
The code within the mappings/un-mappings in the functions of dir.c don't
depend on the above-mentioned side effects of kmap_atomic(), so that mere
replacements of the old API with the new one is all that is required
(i.e., there is no need to explicitly add calls to pagefault_disable()
and/or preempt_disable()).
Therefore, replace kmap_atomic() with kmap_local_page() in fs/nfs/dir.c.
Tested in a QEMU/KVM x86_32 VM, 6GB RAM, booting a kernel with
HIGHMEM64GB enabled.
Suggested-by: Ira Weiny <ira.weiny@intel.com>
Signed-off-by: Fabio M. De Francesco <fmdefrancesco@gmail.com>
Reviewed-by: Luis Chamberlain <mcgrof@kernel.org>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2023-01-28 05:54:52 +08:00
|
|
|
kunmap_local(array);
|
2022-02-22 21:31:28 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-04-05 03:12:52 +08:00
|
|
|
static void nfs_readdir_folio_unlock_and_put(struct folio *folio)
|
2022-02-22 21:31:28 +08:00
|
|
|
{
|
2023-04-05 03:12:52 +08:00
|
|
|
folio_unlock(folio);
|
|
|
|
folio_put(folio);
|
2022-02-22 21:31:28 +08:00
|
|
|
}
|
|
|
|
|
2023-04-05 03:12:52 +08:00
|
|
|
static void nfs_readdir_folio_init_and_validate(struct folio *folio, u64 cookie,
|
|
|
|
u64 change_attr)
|
2022-03-22 10:27:13 +08:00
|
|
|
{
|
2023-04-05 03:12:52 +08:00
|
|
|
if (folio_test_uptodate(folio)) {
|
|
|
|
if (nfs_readdir_folio_validate(folio, cookie, change_attr))
|
2022-03-22 10:27:13 +08:00
|
|
|
return;
|
2023-04-05 03:12:52 +08:00
|
|
|
nfs_readdir_clear_array(folio);
|
2022-03-22 10:27:13 +08:00
|
|
|
}
|
2023-04-05 03:12:52 +08:00
|
|
|
nfs_readdir_folio_init_array(folio, cookie, change_attr);
|
|
|
|
folio_mark_uptodate(folio);
|
2022-03-22 10:27:13 +08:00
|
|
|
}
|
|
|
|
|
2023-04-05 03:12:52 +08:00
|
|
|
static struct folio *nfs_readdir_folio_get_locked(struct address_space *mapping,
|
|
|
|
u64 cookie, u64 change_attr)
|
2020-11-02 01:34:43 +08:00
|
|
|
{
|
2023-04-05 03:12:52 +08:00
|
|
|
pgoff_t index = nfs_readdir_folio_cookie_hash(cookie);
|
|
|
|
struct folio *folio;
|
2020-11-02 01:34:43 +08:00
|
|
|
|
2023-04-05 03:12:52 +08:00
|
|
|
folio = filemap_grab_folio(mapping, index);
|
2023-05-10 01:22:13 +08:00
|
|
|
if (IS_ERR(folio))
|
2022-02-22 21:31:28 +08:00
|
|
|
return NULL;
|
2023-04-05 03:12:52 +08:00
|
|
|
nfs_readdir_folio_init_and_validate(folio, cookie, change_attr);
|
|
|
|
return folio;
|
2020-11-02 01:34:43 +08:00
|
|
|
}
|
|
|
|
|
2023-04-05 03:12:52 +08:00
|
|
|
static u64 nfs_readdir_folio_last_cookie(struct folio *folio)
|
2020-11-02 01:34:43 +08:00
|
|
|
{
|
|
|
|
struct nfs_cache_array *array;
|
|
|
|
u64 ret;
|
|
|
|
|
2023-04-05 03:12:52 +08:00
|
|
|
array = kmap_local_folio(folio, 0);
|
2020-11-02 01:34:43 +08:00
|
|
|
ret = array->last_cookie;
|
fs/nfs: Replace kmap_atomic() with kmap_local_page() in dir.c
kmap_atomic() is deprecated in favor of kmap_local_page().
With kmap_local_page() the mappings are per thread, CPU local, can take
page-faults, and can be called from any context (including interrupts).
Furthermore, the tasks can be preempted and, when they are scheduled to
run again, the kernel virtual addresses are restored and still valid.
kmap_atomic() is implemented like a kmap_local_page() which also disables
page-faults and preemption (the latter only for !PREEMPT_RT kernels,
otherwise it only disables migration).
The code within the mappings/un-mappings in the functions of dir.c don't
depend on the above-mentioned side effects of kmap_atomic(), so that mere
replacements of the old API with the new one is all that is required
(i.e., there is no need to explicitly add calls to pagefault_disable()
and/or preempt_disable()).
Therefore, replace kmap_atomic() with kmap_local_page() in fs/nfs/dir.c.
Tested in a QEMU/KVM x86_32 VM, 6GB RAM, booting a kernel with
HIGHMEM64GB enabled.
Suggested-by: Ira Weiny <ira.weiny@intel.com>
Signed-off-by: Fabio M. De Francesco <fmdefrancesco@gmail.com>
Reviewed-by: Luis Chamberlain <mcgrof@kernel.org>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2023-01-28 05:54:52 +08:00
|
|
|
kunmap_local(array);
|
2020-11-02 01:34:43 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-04-05 03:12:52 +08:00
|
|
|
static bool nfs_readdir_folio_needs_filling(struct folio *folio)
|
2020-11-02 01:34:43 +08:00
|
|
|
{
|
|
|
|
struct nfs_cache_array *array;
|
|
|
|
bool ret;
|
|
|
|
|
2023-04-05 03:12:52 +08:00
|
|
|
array = kmap_local_folio(folio, 0);
|
2020-11-02 01:34:43 +08:00
|
|
|
ret = !nfs_readdir_array_is_full(array);
|
fs/nfs: Replace kmap_atomic() with kmap_local_page() in dir.c
kmap_atomic() is deprecated in favor of kmap_local_page().
With kmap_local_page() the mappings are per thread, CPU local, can take
page-faults, and can be called from any context (including interrupts).
Furthermore, the tasks can be preempted and, when they are scheduled to
run again, the kernel virtual addresses are restored and still valid.
kmap_atomic() is implemented like a kmap_local_page() which also disables
page-faults and preemption (the latter only for !PREEMPT_RT kernels,
otherwise it only disables migration).
The code within the mappings/un-mappings in the functions of dir.c don't
depend on the above-mentioned side effects of kmap_atomic(), so that mere
replacements of the old API with the new one is all that is required
(i.e., there is no need to explicitly add calls to pagefault_disable()
and/or preempt_disable()).
Therefore, replace kmap_atomic() with kmap_local_page() in fs/nfs/dir.c.
Tested in a QEMU/KVM x86_32 VM, 6GB RAM, booting a kernel with
HIGHMEM64GB enabled.
Suggested-by: Ira Weiny <ira.weiny@intel.com>
Signed-off-by: Fabio M. De Francesco <fmdefrancesco@gmail.com>
Reviewed-by: Luis Chamberlain <mcgrof@kernel.org>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2023-01-28 05:54:52 +08:00
|
|
|
kunmap_local(array);
|
2010-10-24 02:53:23 +08:00
|
|
|
return ret;
|
2010-09-25 02:48:42 +08:00
|
|
|
}
|
|
|
|
|
2023-04-05 03:12:52 +08:00
|
|
|
static void nfs_readdir_folio_set_eof(struct folio *folio)
|
2020-11-02 02:45:55 +08:00
|
|
|
{
|
|
|
|
struct nfs_cache_array *array;
|
|
|
|
|
2023-04-05 03:12:52 +08:00
|
|
|
array = kmap_local_folio(folio, 0);
|
2020-11-02 02:45:55 +08:00
|
|
|
nfs_readdir_array_set_eof(array);
|
fs/nfs: Replace kmap_atomic() with kmap_local_page() in dir.c
kmap_atomic() is deprecated in favor of kmap_local_page().
With kmap_local_page() the mappings are per thread, CPU local, can take
page-faults, and can be called from any context (including interrupts).
Furthermore, the tasks can be preempted and, when they are scheduled to
run again, the kernel virtual addresses are restored and still valid.
kmap_atomic() is implemented like a kmap_local_page() which also disables
page-faults and preemption (the latter only for !PREEMPT_RT kernels,
otherwise it only disables migration).
The code within the mappings/un-mappings in the functions of dir.c don't
depend on the above-mentioned side effects of kmap_atomic(), so that mere
replacements of the old API with the new one is all that is required
(i.e., there is no need to explicitly add calls to pagefault_disable()
and/or preempt_disable()).
Therefore, replace kmap_atomic() with kmap_local_page() in fs/nfs/dir.c.
Tested in a QEMU/KVM x86_32 VM, 6GB RAM, booting a kernel with
HIGHMEM64GB enabled.
Suggested-by: Ira Weiny <ira.weiny@intel.com>
Signed-off-by: Fabio M. De Francesco <fmdefrancesco@gmail.com>
Reviewed-by: Luis Chamberlain <mcgrof@kernel.org>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2023-01-28 05:54:52 +08:00
|
|
|
kunmap_local(array);
|
2020-11-02 02:45:55 +08:00
|
|
|
}
|
|
|
|
|
2023-04-05 03:12:52 +08:00
|
|
|
static struct folio *nfs_readdir_folio_get_next(struct address_space *mapping,
|
|
|
|
u64 cookie, u64 change_attr)
|
2020-11-02 02:14:10 +08:00
|
|
|
{
|
2023-04-05 03:12:52 +08:00
|
|
|
pgoff_t index = nfs_readdir_folio_cookie_hash(cookie);
|
|
|
|
struct folio *folio;
|
2020-11-02 02:14:10 +08:00
|
|
|
|
2023-04-05 03:12:52 +08:00
|
|
|
folio = __filemap_get_folio(mapping, index,
|
|
|
|
FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
|
|
|
|
mapping_gfp_mask(mapping));
|
2023-05-07 01:16:19 +08:00
|
|
|
if (IS_ERR(folio))
|
2022-02-24 02:29:59 +08:00
|
|
|
return NULL;
|
2023-04-05 03:12:52 +08:00
|
|
|
nfs_readdir_folio_init_and_validate(folio, cookie, change_attr);
|
|
|
|
if (nfs_readdir_folio_last_cookie(folio) != cookie)
|
|
|
|
nfs_readdir_folio_reinit_array(folio, cookie, change_attr);
|
|
|
|
return folio;
|
2020-11-02 02:14:10 +08:00
|
|
|
}
|
|
|
|
|
2020-02-04 03:49:33 +08:00
|
|
|
static inline
|
|
|
|
int is_32bit_api(void)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
return in_compat_syscall();
|
|
|
|
#else
|
|
|
|
return (BITS_PER_LONG == 32);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static
|
|
|
|
bool nfs_readdir_use_cookie(const struct file *filp)
|
|
|
|
{
|
|
|
|
if ((filp->f_mode & FMODE_32BITHASH) ||
|
|
|
|
(!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-02-26 22:38:19 +08:00
|
|
|
static void nfs_readdir_seek_next_array(struct nfs_cache_array *array,
|
|
|
|
struct nfs_readdir_descriptor *desc)
|
|
|
|
{
|
2023-04-05 03:12:52 +08:00
|
|
|
if (array->folio_full) {
|
2022-02-26 22:38:19 +08:00
|
|
|
desc->last_cookie = array->last_cookie;
|
|
|
|
desc->current_index += array->size;
|
|
|
|
desc->cache_entry_index = 0;
|
2023-04-05 00:05:22 +08:00
|
|
|
desc->folio_index++;
|
2022-02-26 22:38:19 +08:00
|
|
|
} else
|
2022-03-22 21:11:44 +08:00
|
|
|
desc->last_cookie = nfs_readdir_array_index_cookie(array);
|
2022-02-26 22:38:19 +08:00
|
|
|
}
|
|
|
|
|
2022-02-24 00:31:51 +08:00
|
|
|
static void nfs_readdir_rewind_search(struct nfs_readdir_descriptor *desc)
|
|
|
|
{
|
|
|
|
desc->current_index = 0;
|
|
|
|
desc->last_cookie = 0;
|
2023-04-05 00:05:22 +08:00
|
|
|
desc->folio_index = 0;
|
2022-02-24 00:31:51 +08:00
|
|
|
}
|
|
|
|
|
2020-11-03 20:42:04 +08:00
|
|
|
static int nfs_readdir_search_for_pos(struct nfs_cache_array *array,
|
|
|
|
struct nfs_readdir_descriptor *desc)
|
2010-09-25 02:48:42 +08:00
|
|
|
{
|
2013-05-18 04:34:50 +08:00
|
|
|
loff_t diff = desc->ctx->pos - desc->current_index;
|
2010-09-25 02:48:42 +08:00
|
|
|
unsigned int index;
|
|
|
|
|
|
|
|
if (diff < 0)
|
|
|
|
goto out_eof;
|
|
|
|
if (diff >= array->size) {
|
2023-04-05 03:12:52 +08:00
|
|
|
if (array->folio_is_eof)
|
2010-09-25 02:48:42 +08:00
|
|
|
goto out_eof;
|
2022-02-26 22:38:19 +08:00
|
|
|
nfs_readdir_seek_next_array(array, desc);
|
2010-09-25 02:48:42 +08:00
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
|
|
|
|
index = (unsigned int)diff;
|
2020-11-01 22:56:18 +08:00
|
|
|
desc->dir_cookie = array->array[index].cookie;
|
2010-09-25 02:48:42 +08:00
|
|
|
desc->cache_entry_index = index;
|
|
|
|
return 0;
|
|
|
|
out_eof:
|
2017-10-07 22:02:21 +08:00
|
|
|
desc->eof = true;
|
2010-09-25 02:48:42 +08:00
|
|
|
return -EBADCOOKIE;
|
|
|
|
}
|
|
|
|
|
2020-11-04 21:32:19 +08:00
|
|
|
static bool nfs_readdir_array_cookie_in_range(struct nfs_cache_array *array,
|
|
|
|
u64 cookie)
|
|
|
|
{
|
|
|
|
if (!array->cookies_are_ordered)
|
|
|
|
return true;
|
|
|
|
/* Optimisation for monotonically increasing cookies */
|
|
|
|
if (cookie >= array->last_cookie)
|
|
|
|
return false;
|
|
|
|
if (array->size && cookie < array->array[0].cookie)
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-11-03 20:42:04 +08:00
|
|
|
static int nfs_readdir_search_for_cookie(struct nfs_cache_array *array,
|
|
|
|
struct nfs_readdir_descriptor *desc)
|
2010-09-25 02:48:42 +08:00
|
|
|
{
|
2022-02-24 00:31:51 +08:00
|
|
|
unsigned int i;
|
2010-09-25 02:48:42 +08:00
|
|
|
int status = -EAGAIN;
|
|
|
|
|
2020-11-04 21:32:19 +08:00
|
|
|
if (!nfs_readdir_array_cookie_in_range(array, desc->dir_cookie))
|
|
|
|
goto check_eof;
|
|
|
|
|
2010-09-25 02:48:42 +08:00
|
|
|
for (i = 0; i < array->size; i++) {
|
2020-11-01 22:56:18 +08:00
|
|
|
if (array->array[i].cookie == desc->dir_cookie) {
|
2020-02-04 03:49:33 +08:00
|
|
|
if (nfs_readdir_use_cookie(desc->file))
|
2020-11-01 22:56:18 +08:00
|
|
|
desc->ctx->pos = desc->dir_cookie;
|
2020-02-04 03:49:33 +08:00
|
|
|
else
|
2022-02-24 00:31:51 +08:00
|
|
|
desc->ctx->pos = desc->current_index + i;
|
2010-09-25 02:48:42 +08:00
|
|
|
desc->cache_entry_index = i;
|
2010-12-08 01:44:56 +08:00
|
|
|
return 0;
|
2010-09-25 02:48:42 +08:00
|
|
|
}
|
|
|
|
}
|
2020-11-04 21:32:19 +08:00
|
|
|
check_eof:
|
2023-04-05 03:12:52 +08:00
|
|
|
if (array->folio_is_eof) {
|
2010-11-16 09:26:22 +08:00
|
|
|
status = -EBADCOOKIE;
|
2020-11-01 22:56:18 +08:00
|
|
|
if (desc->dir_cookie == array->last_cookie)
|
2017-10-07 22:02:21 +08:00
|
|
|
desc->eof = true;
|
2022-02-26 22:38:19 +08:00
|
|
|
} else
|
|
|
|
nfs_readdir_seek_next_array(array, desc);
|
2010-09-25 02:48:42 +08:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2020-11-03 20:42:04 +08:00
|
|
|
static int nfs_readdir_search_array(struct nfs_readdir_descriptor *desc)
|
2010-09-25 02:48:42 +08:00
|
|
|
{
|
|
|
|
struct nfs_cache_array *array;
|
2010-12-08 01:44:56 +08:00
|
|
|
int status;
|
2010-09-25 02:48:42 +08:00
|
|
|
|
2023-04-05 00:05:22 +08:00
|
|
|
array = kmap_local_folio(desc->folio, 0);
|
2010-09-25 02:48:42 +08:00
|
|
|
|
2020-11-01 22:56:18 +08:00
|
|
|
if (desc->dir_cookie == 0)
|
2010-09-25 02:48:42 +08:00
|
|
|
status = nfs_readdir_search_for_pos(array, desc);
|
|
|
|
else
|
|
|
|
status = nfs_readdir_search_for_cookie(array, desc);
|
|
|
|
|
fs/nfs: Replace kmap_atomic() with kmap_local_page() in dir.c
kmap_atomic() is deprecated in favor of kmap_local_page().
With kmap_local_page() the mappings are per thread, CPU local, can take
page-faults, and can be called from any context (including interrupts).
Furthermore, the tasks can be preempted and, when they are scheduled to
run again, the kernel virtual addresses are restored and still valid.
kmap_atomic() is implemented like a kmap_local_page() which also disables
page-faults and preemption (the latter only for !PREEMPT_RT kernels,
otherwise it only disables migration).
The code within the mappings/un-mappings in the functions of dir.c don't
depend on the above-mentioned side effects of kmap_atomic(), so that mere
replacements of the old API with the new one is all that is required
(i.e., there is no need to explicitly add calls to pagefault_disable()
and/or preempt_disable()).
Therefore, replace kmap_atomic() with kmap_local_page() in fs/nfs/dir.c.
Tested in a QEMU/KVM x86_32 VM, 6GB RAM, booting a kernel with
HIGHMEM64GB enabled.
Suggested-by: Ira Weiny <ira.weiny@intel.com>
Signed-off-by: Fabio M. De Francesco <fmdefrancesco@gmail.com>
Reviewed-by: Luis Chamberlain <mcgrof@kernel.org>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2023-01-28 05:54:52 +08:00
|
|
|
kunmap_local(array);
|
2010-09-25 02:48:42 +08:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Fill a page with xdr information before transferring to the cache page */
|
2020-11-02 04:24:41 +08:00
|
|
|
static int nfs_readdir_xdr_filler(struct nfs_readdir_descriptor *desc,
|
2020-11-03 09:06:12 +08:00
|
|
|
__be32 *verf, u64 cookie,
|
|
|
|
struct page **pages, size_t bufsize,
|
|
|
|
__be32 *verf_res)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2020-11-03 06:34:23 +08:00
|
|
|
struct inode *inode = file_inode(desc->file);
|
|
|
|
struct nfs_readdir_arg arg = {
|
|
|
|
.dentry = file_dentry(desc->file),
|
|
|
|
.cred = desc->file->f_cred,
|
2020-11-03 09:06:12 +08:00
|
|
|
.verf = verf,
|
2020-11-03 06:34:23 +08:00
|
|
|
.cookie = cookie,
|
|
|
|
.pages = pages,
|
|
|
|
.page_len = bufsize,
|
|
|
|
.plus = desc->plus,
|
|
|
|
};
|
|
|
|
struct nfs_readdir_res res = {
|
|
|
|
.verf = verf_res,
|
|
|
|
};
|
2008-10-15 07:16:07 +08:00
|
|
|
unsigned long timestamp, gencount;
|
2005-04-17 06:20:36 +08:00
|
|
|
int error;
|
|
|
|
|
|
|
|
again:
|
|
|
|
timestamp = jiffies;
|
2008-10-15 07:16:07 +08:00
|
|
|
gencount = nfs_inc_attr_generation_counter();
|
2020-02-05 22:01:52 +08:00
|
|
|
desc->dir_verifier = nfs_save_change_attribute(inode);
|
2020-11-03 06:34:23 +08:00
|
|
|
error = NFS_PROTO(inode)->readdir(&arg, &res);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (error < 0) {
|
|
|
|
/* We requested READDIRPLUS, but the server doesn't grok it */
|
|
|
|
if (error == -ENOTSUPP && desc->plus) {
|
|
|
|
NFS_SERVER(inode)->caps &= ~NFS_CAP_READDIRPLUS;
|
2020-11-03 06:34:23 +08:00
|
|
|
desc->plus = arg.plus = false;
|
2005-04-17 06:20:36 +08:00
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
goto error;
|
|
|
|
}
|
2007-04-16 07:35:27 +08:00
|
|
|
desc->timestamp = timestamp;
|
2008-10-15 07:16:07 +08:00
|
|
|
desc->gencount = gencount;
|
2010-09-25 02:48:42 +08:00
|
|
|
error:
|
|
|
|
return error;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2020-11-03 20:42:04 +08:00
|
|
|
static int xdr_decode(struct nfs_readdir_descriptor *desc,
|
2010-12-14 22:58:11 +08:00
|
|
|
struct nfs_entry *entry, struct xdr_stream *xdr)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2020-02-04 03:49:33 +08:00
|
|
|
struct inode *inode = file_inode(desc->file);
|
2010-12-14 22:58:11 +08:00
|
|
|
int error;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2020-02-04 03:49:33 +08:00
|
|
|
error = NFS_PROTO(inode)->decode_dirent(xdr, entry, desc->plus);
|
2010-12-14 22:58:11 +08:00
|
|
|
if (error)
|
|
|
|
return error;
|
2010-09-25 02:48:42 +08:00
|
|
|
entry->fattr->time_start = desc->timestamp;
|
|
|
|
entry->fattr->gencount = desc->gencount;
|
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2015-02-24 07:51:32 +08:00
|
|
|
/* Match file and dirent using either filehandle or fileid
|
|
|
|
* Note: caller is responsible for checking the fsid
|
|
|
|
*/
|
2010-09-25 06:50:01 +08:00
|
|
|
static
|
|
|
|
int nfs_same_file(struct dentry *dentry, struct nfs_entry *entry)
|
|
|
|
{
|
2016-06-18 04:48:27 +08:00
|
|
|
struct inode *inode;
|
2015-02-24 07:51:32 +08:00
|
|
|
struct nfs_inode *nfsi;
|
|
|
|
|
2015-03-18 06:25:59 +08:00
|
|
|
if (d_really_is_negative(dentry))
|
|
|
|
return 0;
|
2015-02-24 07:51:32 +08:00
|
|
|
|
2016-06-18 04:48:27 +08:00
|
|
|
inode = d_inode(dentry);
|
|
|
|
if (is_bad_inode(inode) || NFS_STALE(inode))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
nfsi = NFS_I(inode);
|
2016-09-23 01:38:52 +08:00
|
|
|
if (entry->fattr->fileid != nfsi->fileid)
|
|
|
|
return 0;
|
|
|
|
if (entry->fh->size && nfs_compare_fh(entry->fh, &nfsi->fh) != 0)
|
|
|
|
return 0;
|
|
|
|
return 1;
|
2010-09-25 06:50:01 +08:00
|
|
|
}
|
|
|
|
|
2022-02-18 00:08:24 +08:00
|
|
|
#define NFS_READDIR_CACHE_USAGE_THRESHOLD (8UL)
|
|
|
|
|
|
|
|
static bool nfs_use_readdirplus(struct inode *dir, struct dir_context *ctx,
|
|
|
|
unsigned int cache_hits,
|
|
|
|
unsigned int cache_misses)
|
2012-05-02 05:37:59 +08:00
|
|
|
{
|
|
|
|
if (!nfs_server_capable(dir, NFS_CAP_READDIRPLUS))
|
|
|
|
return false;
|
2022-02-18 00:08:24 +08:00
|
|
|
if (ctx->pos == 0 ||
|
|
|
|
cache_hits + cache_misses > NFS_READDIR_CACHE_USAGE_THRESHOLD)
|
2012-05-02 05:37:59 +08:00
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2022-02-18 00:08:24 +08:00
|
|
|
* This function is called by the getattr code to request the
|
2016-11-20 00:21:54 +08:00
|
|
|
* use of readdirplus to accelerate any future lookups in the same
|
2012-05-02 05:37:59 +08:00
|
|
|
* directory.
|
|
|
|
*/
|
2022-02-18 00:08:24 +08:00
|
|
|
void nfs_readdir_record_entry_cache_hit(struct inode *dir)
|
2012-05-02 05:37:59 +08:00
|
|
|
{
|
2016-11-20 00:21:54 +08:00
|
|
|
struct nfs_inode *nfsi = NFS_I(dir);
|
2022-02-18 00:08:24 +08:00
|
|
|
struct nfs_open_dir_context *ctx;
|
2016-11-20 00:21:54 +08:00
|
|
|
|
|
|
|
if (nfs_server_capable(dir, NFS_CAP_READDIRPLUS) &&
|
2022-02-18 00:08:24 +08:00
|
|
|
S_ISDIR(dir->i_mode)) {
|
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu (ctx, &nfsi->open_files, list)
|
|
|
|
atomic_inc(&ctx->cache_hits);
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
2012-05-02 05:37:59 +08:00
|
|
|
}
|
|
|
|
|
2014-02-08 06:02:08 +08:00
|
|
|
/*
|
|
|
|
* This function is mainly for use by nfs_getattr().
|
|
|
|
*
|
|
|
|
* If this is an 'ls -l', we want to force use of readdirplus.
|
|
|
|
*/
|
2022-02-18 00:08:24 +08:00
|
|
|
void nfs_readdir_record_entry_cache_miss(struct inode *dir)
|
2014-02-08 06:02:08 +08:00
|
|
|
{
|
2016-11-20 00:21:54 +08:00
|
|
|
struct nfs_inode *nfsi = NFS_I(dir);
|
2022-02-18 00:08:24 +08:00
|
|
|
struct nfs_open_dir_context *ctx;
|
2016-11-20 00:21:54 +08:00
|
|
|
|
|
|
|
if (nfs_server_capable(dir, NFS_CAP_READDIRPLUS) &&
|
2022-02-18 00:08:24 +08:00
|
|
|
S_ISDIR(dir->i_mode)) {
|
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu (ctx, &nfsi->open_files, list)
|
|
|
|
atomic_inc(&ctx->cache_misses);
|
|
|
|
rcu_read_unlock();
|
2014-02-08 06:02:08 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-19 23:06:05 +08:00
|
|
|
static void nfs_lookup_advise_force_readdirplus(struct inode *dir,
|
|
|
|
unsigned int flags)
|
2022-02-18 00:08:24 +08:00
|
|
|
{
|
2022-02-19 22:56:45 +08:00
|
|
|
if (nfs_server_capable(dir, NFS_CAP_CASE_INSENSITIVE))
|
|
|
|
return;
|
2022-02-19 23:06:05 +08:00
|
|
|
if (flags & (LOOKUP_EXCL | LOOKUP_PARENT | LOOKUP_REVAL))
|
|
|
|
return;
|
2022-02-18 00:08:24 +08:00
|
|
|
nfs_readdir_record_entry_cache_miss(dir);
|
|
|
|
}
|
|
|
|
|
2010-09-25 06:50:01 +08:00
|
|
|
static
|
2020-02-05 22:01:52 +08:00
|
|
|
void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry,
|
|
|
|
unsigned long dir_verifier)
|
2010-09-25 06:50:01 +08:00
|
|
|
{
|
2012-05-11 04:14:12 +08:00
|
|
|
struct qstr filename = QSTR_INIT(entry->name, entry->len);
|
2016-04-29 07:52:56 +08:00
|
|
|
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
|
2010-10-24 02:53:23 +08:00
|
|
|
struct dentry *dentry;
|
|
|
|
struct dentry *alias;
|
2010-09-25 06:50:01 +08:00
|
|
|
struct inode *inode;
|
2013-05-23 00:50:44 +08:00
|
|
|
int status;
|
2010-09-25 06:50:01 +08:00
|
|
|
|
2015-02-24 07:51:32 +08:00
|
|
|
if (!(entry->fattr->valid & NFS_ATTR_FATTR_FILEID))
|
|
|
|
return;
|
2015-02-23 05:35:36 +08:00
|
|
|
if (!(entry->fattr->valid & NFS_ATTR_FATTR_FSID))
|
|
|
|
return;
|
2016-09-21 02:34:24 +08:00
|
|
|
if (filename.len == 0)
|
|
|
|
return;
|
|
|
|
/* Validate that the name doesn't contain any illegal '\0' */
|
|
|
|
if (strnlen(filename.name, filename.len) != filename.len)
|
|
|
|
return;
|
|
|
|
/* ...or '/' */
|
|
|
|
if (strnchr(filename.name, filename.len, '/'))
|
|
|
|
return;
|
2010-10-24 02:53:23 +08:00
|
|
|
if (filename.name[0] == '.') {
|
|
|
|
if (filename.len == 1)
|
|
|
|
return;
|
|
|
|
if (filename.len == 2 && filename.name[1] == '.')
|
|
|
|
return;
|
|
|
|
}
|
2016-06-10 22:51:30 +08:00
|
|
|
filename.hash = full_name_hash(parent, filename.name, filename.len);
|
2010-09-25 06:50:01 +08:00
|
|
|
|
2010-10-24 02:53:23 +08:00
|
|
|
dentry = d_lookup(parent, &filename);
|
2016-04-29 07:52:56 +08:00
|
|
|
again:
|
|
|
|
if (!dentry) {
|
|
|
|
dentry = d_alloc_parallel(parent, &filename, &wq);
|
|
|
|
if (IS_ERR(dentry))
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (!d_in_lookup(dentry)) {
|
2015-02-23 05:35:36 +08:00
|
|
|
/* Is there a mountpoint here? If so, just exit */
|
|
|
|
if (!nfs_fsid_equal(&NFS_SB(dentry->d_sb)->fsid,
|
|
|
|
&entry->fattr->fsid))
|
|
|
|
goto out;
|
2010-09-25 06:50:01 +08:00
|
|
|
if (nfs_same_file(dentry, entry)) {
|
2016-09-23 01:38:52 +08:00
|
|
|
if (!entry->fh->size)
|
|
|
|
goto out;
|
2020-02-05 22:01:52 +08:00
|
|
|
nfs_set_verifier(dentry, dir_verifier);
|
2015-03-18 06:25:59 +08:00
|
|
|
status = nfs_refresh_inode(d_inode(dentry), entry->fattr);
|
2013-05-23 00:50:44 +08:00
|
|
|
if (!status)
|
2021-10-23 01:11:12 +08:00
|
|
|
nfs_setsecurity(d_inode(dentry), entry->fattr);
|
2022-02-20 08:19:35 +08:00
|
|
|
trace_nfs_readdir_lookup_revalidate(d_inode(parent),
|
|
|
|
dentry, 0, status);
|
2010-09-25 06:50:01 +08:00
|
|
|
goto out;
|
|
|
|
} else {
|
2022-02-20 08:19:35 +08:00
|
|
|
trace_nfs_readdir_lookup_revalidate_failed(
|
|
|
|
d_inode(parent), dentry, 0);
|
2014-02-14 01:46:25 +08:00
|
|
|
d_invalidate(dentry);
|
2010-09-25 06:50:01 +08:00
|
|
|
dput(dentry);
|
2016-04-29 07:52:56 +08:00
|
|
|
dentry = NULL;
|
|
|
|
goto again;
|
2010-09-25 06:50:01 +08:00
|
|
|
}
|
|
|
|
}
|
2016-09-23 01:38:52 +08:00
|
|
|
if (!entry->fh->size) {
|
|
|
|
d_lookup_done(dentry);
|
|
|
|
goto out;
|
|
|
|
}
|
2010-09-25 06:50:01 +08:00
|
|
|
|
2021-10-23 01:11:11 +08:00
|
|
|
inode = nfs_fhget(dentry->d_sb, entry->fh, entry->fattr);
|
2014-10-13 10:24:21 +08:00
|
|
|
alias = d_splice_alias(inode, dentry);
|
2016-04-29 07:52:56 +08:00
|
|
|
d_lookup_done(dentry);
|
|
|
|
if (alias) {
|
|
|
|
if (IS_ERR(alias))
|
|
|
|
goto out;
|
|
|
|
dput(dentry);
|
|
|
|
dentry = alias;
|
|
|
|
}
|
2020-02-05 22:01:52 +08:00
|
|
|
nfs_set_verifier(dentry, dir_verifier);
|
2022-02-20 08:19:35 +08:00
|
|
|
trace_nfs_readdir_lookup(d_inode(parent), dentry, 0);
|
2010-09-25 06:50:01 +08:00
|
|
|
out:
|
|
|
|
dput(dentry);
|
|
|
|
}
|
|
|
|
|
2022-02-25 00:48:35 +08:00
|
|
|
static int nfs_readdir_entry_decode(struct nfs_readdir_descriptor *desc,
|
|
|
|
struct nfs_entry *entry,
|
|
|
|
struct xdr_stream *stream)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (entry->fattr->label)
|
|
|
|
entry->fattr->label->len = NFS4_MAXLABELLEN;
|
|
|
|
ret = xdr_decode(desc, entry, stream);
|
|
|
|
if (ret || !desc->plus)
|
|
|
|
return ret;
|
|
|
|
nfs_prime_dcache(file_dentry(desc->file), entry, desc->dir_verifier);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-09-25 02:48:42 +08:00
|
|
|
/* Perform conversion from xdr to cache array */
|
2023-04-05 00:05:22 +08:00
|
|
|
static int nfs_readdir_folio_filler(struct nfs_readdir_descriptor *desc,
|
|
|
|
struct nfs_entry *entry,
|
|
|
|
struct page **xdr_pages, unsigned int buflen,
|
|
|
|
struct folio **arrays, size_t narrays,
|
|
|
|
u64 change_attr)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2020-11-02 02:14:10 +08:00
|
|
|
struct address_space *mapping = desc->file->f_mapping;
|
2023-04-05 03:12:52 +08:00
|
|
|
struct folio *new, *folio = *arrays;
|
2010-10-21 03:44:29 +08:00
|
|
|
struct xdr_stream stream;
|
2023-04-05 03:12:52 +08:00
|
|
|
struct page *scratch;
|
2011-05-20 02:16:47 +08:00
|
|
|
struct xdr_buf buf;
|
2022-02-28 01:46:24 +08:00
|
|
|
u64 cookie;
|
2010-11-21 01:43:45 +08:00
|
|
|
int status;
|
2010-10-21 03:44:29 +08:00
|
|
|
|
2011-01-09 06:45:38 +08:00
|
|
|
scratch = alloc_page(GFP_KERNEL);
|
|
|
|
if (scratch == NULL)
|
|
|
|
return -ENOMEM;
|
2010-10-21 03:44:29 +08:00
|
|
|
|
2011-05-20 02:16:47 +08:00
|
|
|
xdr_init_decode_pages(&stream, &buf, xdr_pages, buflen);
|
2020-11-12 04:52:47 +08:00
|
|
|
xdr_set_scratch_page(&stream, scratch);
|
2010-10-22 04:33:16 +08:00
|
|
|
|
|
|
|
do {
|
2022-02-25 00:48:35 +08:00
|
|
|
status = nfs_readdir_entry_decode(desc, entry, &stream);
|
2020-11-02 06:15:43 +08:00
|
|
|
if (status != 0)
|
2010-10-22 04:33:16 +08:00
|
|
|
break;
|
2010-11-21 01:43:45 +08:00
|
|
|
|
2023-04-05 03:12:52 +08:00
|
|
|
status = nfs_readdir_folio_array_append(folio, entry, &cookie);
|
2020-11-02 02:14:10 +08:00
|
|
|
if (status != -ENOSPC)
|
|
|
|
continue;
|
|
|
|
|
2023-04-05 00:05:22 +08:00
|
|
|
if (folio->mapping != mapping) {
|
2020-11-07 09:38:47 +08:00
|
|
|
if (!--narrays)
|
|
|
|
break;
|
2023-04-05 03:12:52 +08:00
|
|
|
new = nfs_readdir_folio_array_alloc(cookie, GFP_KERNEL);
|
2020-11-07 09:38:47 +08:00
|
|
|
if (!new)
|
|
|
|
break;
|
|
|
|
arrays++;
|
2023-04-05 03:12:52 +08:00
|
|
|
*arrays = folio = new;
|
2020-11-07 09:38:47 +08:00
|
|
|
} else {
|
2023-04-05 03:12:52 +08:00
|
|
|
new = nfs_readdir_folio_get_next(mapping, cookie,
|
|
|
|
change_attr);
|
2020-11-07 09:38:47 +08:00
|
|
|
if (!new)
|
|
|
|
break;
|
2023-04-05 00:05:22 +08:00
|
|
|
if (folio != *arrays)
|
2023-04-05 03:12:52 +08:00
|
|
|
nfs_readdir_folio_unlock_and_put(folio);
|
|
|
|
folio = new;
|
2020-11-07 09:38:47 +08:00
|
|
|
}
|
2023-04-05 00:05:22 +08:00
|
|
|
desc->folio_index_max++;
|
2023-04-05 03:12:52 +08:00
|
|
|
status = nfs_readdir_folio_array_append(folio, entry, &cookie);
|
2020-11-02 06:15:43 +08:00
|
|
|
} while (!status && !entry->eof);
|
2010-10-22 04:33:16 +08:00
|
|
|
|
2020-11-02 06:15:43 +08:00
|
|
|
switch (status) {
|
|
|
|
case -EBADCOOKIE:
|
2022-02-25 00:48:35 +08:00
|
|
|
if (!entry->eof)
|
|
|
|
break;
|
2023-04-05 03:12:52 +08:00
|
|
|
nfs_readdir_folio_set_eof(folio);
|
2022-02-25 00:48:35 +08:00
|
|
|
fallthrough;
|
2020-11-02 06:15:43 +08:00
|
|
|
case -EAGAIN:
|
2017-05-04 02:52:21 +08:00
|
|
|
status = 0;
|
2020-11-02 06:15:43 +08:00
|
|
|
break;
|
2022-02-25 00:48:35 +08:00
|
|
|
case -ENOSPC:
|
|
|
|
status = 0;
|
|
|
|
if (!desc->plus)
|
|
|
|
break;
|
|
|
|
while (!nfs_readdir_entry_decode(desc, entry, &stream))
|
|
|
|
;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2011-01-09 06:45:38 +08:00
|
|
|
|
2023-04-05 00:05:22 +08:00
|
|
|
if (folio != *arrays)
|
2023-04-05 03:12:52 +08:00
|
|
|
nfs_readdir_folio_unlock_and_put(folio);
|
2020-11-02 02:14:10 +08:00
|
|
|
|
2011-01-09 06:45:38 +08:00
|
|
|
put_page(scratch);
|
2010-11-16 09:26:22 +08:00
|
|
|
return status;
|
2010-10-21 03:44:37 +08:00
|
|
|
}
|
|
|
|
|
2020-11-02 03:26:47 +08:00
|
|
|
static void nfs_readdir_free_pages(struct page **pages, size_t npages)
|
2010-10-21 03:44:37 +08:00
|
|
|
{
|
2020-11-02 03:26:47 +08:00
|
|
|
while (npages--)
|
|
|
|
put_page(pages[npages]);
|
|
|
|
kfree(pages);
|
2010-10-21 03:44:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2019-02-16 08:38:40 +08:00
|
|
|
* nfs_readdir_alloc_pages() will allocate pages that must be freed with a call
|
|
|
|
* to nfs_readdir_free_pages()
|
2010-10-21 03:44:37 +08:00
|
|
|
*/
|
2020-11-02 03:26:47 +08:00
|
|
|
static struct page **nfs_readdir_alloc_pages(size_t npages)
|
2010-10-21 03:44:37 +08:00
|
|
|
{
|
2020-11-02 03:26:47 +08:00
|
|
|
struct page **pages;
|
|
|
|
size_t i;
|
2010-10-21 03:44:37 +08:00
|
|
|
|
2020-11-02 03:26:47 +08:00
|
|
|
pages = kmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
|
|
|
|
if (!pages)
|
|
|
|
return NULL;
|
2010-10-21 03:44:37 +08:00
|
|
|
for (i = 0; i < npages; i++) {
|
|
|
|
struct page *page = alloc_page(GFP_KERNEL);
|
|
|
|
if (page == NULL)
|
|
|
|
goto out_freepages;
|
|
|
|
pages[i] = page;
|
|
|
|
}
|
2020-11-02 03:26:47 +08:00
|
|
|
return pages;
|
2010-10-21 03:44:37 +08:00
|
|
|
|
|
|
|
out_freepages:
|
2015-07-14 02:01:25 +08:00
|
|
|
nfs_readdir_free_pages(pages, i);
|
2020-11-02 03:26:47 +08:00
|
|
|
return NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2020-11-03 20:42:04 +08:00
|
|
|
static int nfs_readdir_xdr_to_array(struct nfs_readdir_descriptor *desc,
|
2020-11-07 09:38:47 +08:00
|
|
|
__be32 *verf_arg, __be32 *verf_res,
|
2023-04-05 00:05:22 +08:00
|
|
|
struct folio **arrays, size_t narrays)
|
2005-06-23 01:16:29 +08:00
|
|
|
{
|
2022-02-24 00:31:51 +08:00
|
|
|
u64 change_attr;
|
2020-11-02 03:26:47 +08:00
|
|
|
struct page **pages;
|
2023-04-05 00:05:22 +08:00
|
|
|
struct folio *folio = *arrays;
|
2020-11-02 21:55:03 +08:00
|
|
|
struct nfs_entry *entry;
|
2020-11-02 03:26:47 +08:00
|
|
|
size_t array_size;
|
2020-11-03 09:06:12 +08:00
|
|
|
struct inode *inode = file_inode(desc->file);
|
2022-02-08 02:37:00 +08:00
|
|
|
unsigned int dtsize = desc->dtsize;
|
2022-02-08 04:07:01 +08:00
|
|
|
unsigned int pglen;
|
2010-11-16 09:26:22 +08:00
|
|
|
int status = -ENOMEM;
|
2010-09-25 02:48:42 +08:00
|
|
|
|
2020-11-02 21:55:03 +08:00
|
|
|
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
|
|
|
|
if (!entry)
|
|
|
|
return -ENOMEM;
|
2023-04-05 03:12:52 +08:00
|
|
|
entry->cookie = nfs_readdir_folio_last_cookie(folio);
|
2020-11-02 21:55:03 +08:00
|
|
|
entry->fh = nfs_alloc_fhandle();
|
2021-10-23 01:11:01 +08:00
|
|
|
entry->fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode));
|
2020-11-02 21:55:03 +08:00
|
|
|
entry->server = NFS_SERVER(inode);
|
|
|
|
if (entry->fh == NULL || entry->fattr == NULL)
|
2010-09-25 02:48:42 +08:00
|
|
|
goto out;
|
2005-06-23 01:16:29 +08:00
|
|
|
|
2020-11-02 03:26:47 +08:00
|
|
|
array_size = (dtsize + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
|
|
|
pages = nfs_readdir_alloc_pages(array_size);
|
|
|
|
if (!pages)
|
2021-10-23 01:11:01 +08:00
|
|
|
goto out;
|
2005-06-23 01:16:29 +08:00
|
|
|
|
2022-02-24 00:31:51 +08:00
|
|
|
change_attr = inode_peek_iversion_raw(inode);
|
2022-02-08 04:07:01 +08:00
|
|
|
status = nfs_readdir_xdr_filler(desc, verf_arg, entry->cookie, pages,
|
|
|
|
dtsize, verf_res);
|
|
|
|
if (status < 0)
|
|
|
|
goto free_pages;
|
2021-03-16 18:25:14 +08:00
|
|
|
|
2022-02-08 04:07:01 +08:00
|
|
|
pglen = status;
|
|
|
|
if (pglen != 0)
|
2023-04-05 00:05:22 +08:00
|
|
|
status = nfs_readdir_folio_filler(desc, entry, pages, pglen,
|
|
|
|
arrays, narrays, change_attr);
|
2022-02-08 04:07:01 +08:00
|
|
|
else
|
2023-04-05 03:12:52 +08:00
|
|
|
nfs_readdir_folio_set_eof(folio);
|
2022-02-08 04:07:01 +08:00
|
|
|
desc->buffer_fills++;
|
2010-09-25 02:48:42 +08:00
|
|
|
|
2022-02-08 04:07:01 +08:00
|
|
|
free_pages:
|
2015-07-14 02:01:25 +08:00
|
|
|
nfs_readdir_free_pages(pages, array_size);
|
2010-09-25 02:48:42 +08:00
|
|
|
out:
|
2020-11-02 21:55:03 +08:00
|
|
|
nfs_free_fattr(entry->fattr);
|
|
|
|
nfs_free_fhandle(entry->fh);
|
|
|
|
kfree(entry);
|
2005-06-23 01:16:29 +08:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2023-04-05 00:05:22 +08:00
|
|
|
static void nfs_readdir_folio_put(struct nfs_readdir_descriptor *desc)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2023-04-05 00:05:22 +08:00
|
|
|
folio_put(desc->folio);
|
|
|
|
desc->folio = NULL;
|
2010-09-25 02:48:42 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2020-11-02 01:34:43 +08:00
|
|
|
static void
|
2023-04-05 00:05:22 +08:00
|
|
|
nfs_readdir_folio_unlock_and_put_cached(struct nfs_readdir_descriptor *desc)
|
2010-09-25 02:48:42 +08:00
|
|
|
{
|
2023-04-05 00:05:22 +08:00
|
|
|
folio_unlock(desc->folio);
|
|
|
|
nfs_readdir_folio_put(desc);
|
2010-09-25 02:48:42 +08:00
|
|
|
}
|
|
|
|
|
2023-04-05 00:05:22 +08:00
|
|
|
static struct folio *
|
|
|
|
nfs_readdir_folio_get_cached(struct nfs_readdir_descriptor *desc)
|
2010-09-25 02:48:42 +08:00
|
|
|
{
|
2022-02-24 00:31:51 +08:00
|
|
|
struct address_space *mapping = desc->file->f_mapping;
|
|
|
|
u64 change_attr = inode_peek_iversion_raw(mapping->host);
|
2022-02-24 02:29:59 +08:00
|
|
|
u64 cookie = desc->last_cookie;
|
2023-04-05 03:12:52 +08:00
|
|
|
struct folio *folio;
|
2022-02-24 00:31:51 +08:00
|
|
|
|
2023-04-05 03:12:52 +08:00
|
|
|
folio = nfs_readdir_folio_get_locked(mapping, cookie, change_attr);
|
|
|
|
if (!folio)
|
2022-02-24 02:29:59 +08:00
|
|
|
return NULL;
|
2023-04-05 03:12:52 +08:00
|
|
|
if (desc->clear_cache && !nfs_readdir_folio_needs_filling(folio))
|
|
|
|
nfs_readdir_folio_reinit_array(folio, cookie, change_attr);
|
|
|
|
return folio;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2010-09-25 02:48:42 +08:00
|
|
|
* Returns 0 if desc->dir_cookie was found on page desc->page_index
|
2020-02-03 06:53:54 +08:00
|
|
|
* and locks the page to prevent removal from the page cache.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2020-11-03 20:42:04 +08:00
|
|
|
static int find_and_lock_cache_page(struct nfs_readdir_descriptor *desc)
|
2010-09-25 02:48:42 +08:00
|
|
|
{
|
2020-01-23 09:45:39 +08:00
|
|
|
struct inode *inode = file_inode(desc->file);
|
|
|
|
struct nfs_inode *nfsi = NFS_I(inode);
|
2020-11-03 09:06:12 +08:00
|
|
|
__be32 verf[NFS_DIR_VERIFIER_SIZE];
|
2010-09-25 02:48:42 +08:00
|
|
|
int res;
|
|
|
|
|
2023-04-05 00:05:22 +08:00
|
|
|
desc->folio = nfs_readdir_folio_get_cached(desc);
|
|
|
|
if (!desc->folio)
|
2020-11-02 01:34:43 +08:00
|
|
|
return -ENOMEM;
|
2023-04-05 03:12:52 +08:00
|
|
|
if (nfs_readdir_folio_needs_filling(desc->folio)) {
|
2022-02-08 02:37:00 +08:00
|
|
|
/* Grow the dtsize if we had to go back for more pages */
|
2023-04-05 00:05:22 +08:00
|
|
|
if (desc->folio_index == desc->folio_index_max)
|
2022-02-08 02:37:00 +08:00
|
|
|
nfs_grow_dtsize(desc);
|
2023-04-05 00:05:22 +08:00
|
|
|
desc->folio_index_max = desc->folio_index;
|
2022-02-20 08:24:38 +08:00
|
|
|
trace_nfs_readdir_cache_fill(desc->file, nfsi->cookieverf,
|
|
|
|
desc->last_cookie,
|
2023-04-05 00:05:22 +08:00
|
|
|
desc->folio->index, desc->dtsize);
|
2020-11-07 09:38:47 +08:00
|
|
|
res = nfs_readdir_xdr_to_array(desc, nfsi->cookieverf, verf,
|
2023-04-05 00:05:22 +08:00
|
|
|
&desc->folio, 1);
|
2020-11-03 09:11:32 +08:00
|
|
|
if (res < 0) {
|
2023-04-05 00:05:22 +08:00
|
|
|
nfs_readdir_folio_unlock_and_put_cached(desc);
|
2022-02-20 08:24:38 +08:00
|
|
|
trace_nfs_readdir_cache_fill_done(inode, res);
|
2020-11-03 09:11:32 +08:00
|
|
|
if (res == -EBADCOOKIE || res == -ENOTSYNC) {
|
|
|
|
invalidate_inode_pages2(desc->file->f_mapping);
|
2022-02-24 00:31:51 +08:00
|
|
|
nfs_readdir_rewind_search(desc);
|
2022-02-20 08:09:21 +08:00
|
|
|
trace_nfs_readdir_invalidate_cache_range(
|
|
|
|
inode, 0, MAX_LFS_FILESIZE);
|
2020-11-03 09:11:32 +08:00
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
return res;
|
2020-01-23 09:45:39 +08:00
|
|
|
}
|
2021-03-17 20:46:19 +08:00
|
|
|
/*
|
|
|
|
* Set the cookie verifier if the page cache was empty
|
|
|
|
*/
|
2022-02-23 05:23:12 +08:00
|
|
|
if (desc->last_cookie == 0 &&
|
|
|
|
memcmp(nfsi->cookieverf, verf, sizeof(nfsi->cookieverf))) {
|
2021-03-17 20:46:19 +08:00
|
|
|
memcpy(nfsi->cookieverf, verf,
|
|
|
|
sizeof(nfsi->cookieverf));
|
2022-02-24 00:31:51 +08:00
|
|
|
invalidate_inode_pages2_range(desc->file->f_mapping, 1,
|
2022-02-23 05:23:12 +08:00
|
|
|
-1);
|
2022-02-20 08:09:21 +08:00
|
|
|
trace_nfs_readdir_invalidate_cache_range(
|
2022-02-24 00:31:51 +08:00
|
|
|
inode, 1, MAX_LFS_FILESIZE);
|
2022-02-23 05:23:12 +08:00
|
|
|
}
|
2022-02-24 02:29:59 +08:00
|
|
|
desc->clear_cache = false;
|
2020-02-03 06:53:54 +08:00
|
|
|
}
|
2020-11-02 01:34:43 +08:00
|
|
|
res = nfs_readdir_search_array(desc);
|
2021-09-29 02:33:44 +08:00
|
|
|
if (res == 0)
|
2020-11-02 01:34:43 +08:00
|
|
|
return 0;
|
2023-04-05 00:05:22 +08:00
|
|
|
nfs_readdir_folio_unlock_and_put_cached(desc);
|
2010-09-25 02:48:42 +08:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Search for desc->dir_cookie from the beginning of the page cache */
|
2020-11-03 20:42:04 +08:00
|
|
|
static int readdir_search_pagecache(struct nfs_readdir_descriptor *desc)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2010-11-16 09:26:22 +08:00
|
|
|
int res;
|
2010-09-25 02:48:42 +08:00
|
|
|
|
2010-12-08 01:44:56 +08:00
|
|
|
do {
|
2020-02-03 06:53:54 +08:00
|
|
|
res = find_and_lock_cache_page(desc);
|
2010-12-08 01:44:56 +08:00
|
|
|
} while (res == -EAGAIN);
|
2005-04-17 06:20:36 +08:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2022-09-21 01:00:21 +08:00
|
|
|
#define NFS_READDIR_CACHE_MISS_THRESHOLD (16UL)
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Once we've found the start of the dirent within a page: fill 'er up...
|
|
|
|
*/
|
2021-03-16 19:57:40 +08:00
|
|
|
static void nfs_do_filldir(struct nfs_readdir_descriptor *desc,
|
|
|
|
const __be32 *verf)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct file *file = desc->file;
|
2020-11-02 07:20:03 +08:00
|
|
|
struct nfs_cache_array *array;
|
2022-02-26 22:38:19 +08:00
|
|
|
unsigned int i;
|
2022-09-21 01:00:21 +08:00
|
|
|
bool first_emit = !desc->dir_cookie;
|
2011-03-24 03:04:31 +08:00
|
|
|
|
2023-04-05 00:05:22 +08:00
|
|
|
array = kmap_local_folio(desc->folio, 0);
|
2010-09-25 02:48:42 +08:00
|
|
|
for (i = desc->cache_entry_index; i < array->size; i++) {
|
2010-11-21 02:55:33 +08:00
|
|
|
struct nfs_cache_array_entry *ent;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-11-21 02:55:33 +08:00
|
|
|
ent = &array->array[i];
|
2020-11-02 08:17:29 +08:00
|
|
|
if (!dir_emit(desc->ctx, ent->name, ent->name_len,
|
2013-05-18 04:34:50 +08:00
|
|
|
nfs_compat_user_ino64(ent->ino), ent->d_type)) {
|
2022-01-19 11:10:52 +08:00
|
|
|
desc->eob = true;
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
2010-11-21 02:55:33 +08:00
|
|
|
}
|
2021-03-16 19:57:40 +08:00
|
|
|
memcpy(desc->verf, verf, sizeof(desc->verf));
|
2022-02-26 22:38:19 +08:00
|
|
|
if (i == array->size - 1) {
|
2020-11-01 22:56:18 +08:00
|
|
|
desc->dir_cookie = array->last_cookie;
|
2022-02-26 22:38:19 +08:00
|
|
|
nfs_readdir_seek_next_array(array, desc);
|
|
|
|
} else {
|
|
|
|
desc->dir_cookie = array->array[i + 1].cookie;
|
|
|
|
desc->last_cookie = array->array[0].cookie;
|
|
|
|
}
|
2020-02-04 03:49:33 +08:00
|
|
|
if (nfs_readdir_use_cookie(file))
|
2020-11-01 22:56:18 +08:00
|
|
|
desc->ctx->pos = desc->dir_cookie;
|
2020-02-04 03:49:33 +08:00
|
|
|
else
|
|
|
|
desc->ctx->pos++;
|
2022-09-21 01:00:21 +08:00
|
|
|
if (first_emit && i > NFS_READDIR_CACHE_MISS_THRESHOLD + 1) {
|
|
|
|
desc->eob = true;
|
|
|
|
break;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2023-04-05 03:12:52 +08:00
|
|
|
if (array->folio_is_eof)
|
2022-01-19 11:10:52 +08:00
|
|
|
desc->eof = !desc->eob;
|
2010-09-25 02:48:42 +08:00
|
|
|
|
2022-06-29 02:24:26 +08:00
|
|
|
kunmap_local(array);
|
2020-11-02 07:20:03 +08:00
|
|
|
dfprintk(DIRCACHE, "NFS: nfs_do_filldir() filling ended @ cookie %llu\n",
|
|
|
|
(unsigned long long)desc->dir_cookie);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we cannot find a cookie in our cache, we suspect that this is
|
|
|
|
* because it points to a deleted file, so we ask the server to return
|
|
|
|
* whatever it thinks is the next entry. We then feed this to filldir.
|
|
|
|
* If all goes well, we should then be able to find our way round the
|
|
|
|
* cache on the next call to readdir_search_pagecache();
|
|
|
|
*
|
|
|
|
* NOTE: we cannot add the anonymous page to the pagecache because
|
|
|
|
* the data it contains might not be page aligned. Besides,
|
|
|
|
* we should already have a complete representation of the
|
|
|
|
* directory in the page cache by the time we get here.
|
|
|
|
*/
|
2020-11-03 20:42:04 +08:00
|
|
|
static int uncached_readdir(struct nfs_readdir_descriptor *desc)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2023-04-05 00:05:22 +08:00
|
|
|
struct folio **arrays;
|
2020-11-07 09:38:47 +08:00
|
|
|
size_t i, sz = 512;
|
2020-11-03 09:06:12 +08:00
|
|
|
__be32 verf[NFS_DIR_VERIFIER_SIZE];
|
2020-11-07 09:38:47 +08:00
|
|
|
int status = -ENOMEM;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2020-11-07 09:38:47 +08:00
|
|
|
dfprintk(DIRCACHE, "NFS: uncached_readdir() searching for cookie %llu\n",
|
2020-11-01 22:56:18 +08:00
|
|
|
(unsigned long long)desc->dir_cookie);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2020-11-07 09:38:47 +08:00
|
|
|
arrays = kcalloc(sz, sizeof(*arrays), GFP_KERNEL);
|
|
|
|
if (!arrays)
|
|
|
|
goto out;
|
2023-04-05 03:12:52 +08:00
|
|
|
arrays[0] = nfs_readdir_folio_array_alloc(desc->dir_cookie, GFP_KERNEL);
|
2020-11-07 09:38:47 +08:00
|
|
|
if (!arrays[0])
|
2005-04-17 06:20:36 +08:00
|
|
|
goto out;
|
2010-09-25 02:48:42 +08:00
|
|
|
|
2023-04-05 00:05:22 +08:00
|
|
|
desc->folio_index = 0;
|
2022-01-19 08:52:16 +08:00
|
|
|
desc->cache_entry_index = 0;
|
2020-11-01 22:56:18 +08:00
|
|
|
desc->last_cookie = desc->dir_cookie;
|
2023-04-05 00:05:22 +08:00
|
|
|
desc->folio_index_max = 0;
|
2010-11-21 02:24:46 +08:00
|
|
|
|
2022-02-20 08:24:38 +08:00
|
|
|
trace_nfs_readdir_uncached(desc->file, desc->verf, desc->last_cookie,
|
|
|
|
-1, desc->dtsize);
|
|
|
|
|
2020-11-07 09:38:47 +08:00
|
|
|
status = nfs_readdir_xdr_to_array(desc, desc->verf, verf, arrays, sz);
|
2022-02-20 08:24:38 +08:00
|
|
|
if (status < 0) {
|
|
|
|
trace_nfs_readdir_uncached_done(file_inode(desc->file), status);
|
|
|
|
goto out_free;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2022-01-19 11:10:52 +08:00
|
|
|
for (i = 0; !desc->eob && i < sz && arrays[i]; i++) {
|
2023-04-05 00:05:22 +08:00
|
|
|
desc->folio = arrays[i];
|
2021-03-16 19:57:40 +08:00
|
|
|
nfs_do_filldir(desc, verf);
|
2020-11-07 09:38:47 +08:00
|
|
|
}
|
2023-04-05 00:05:22 +08:00
|
|
|
desc->folio = NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2022-02-08 02:37:00 +08:00
|
|
|
/*
|
|
|
|
* Grow the dtsize if we have to go back for more pages,
|
|
|
|
* or shrink it if we're reading too many.
|
|
|
|
*/
|
|
|
|
if (!desc->eof) {
|
|
|
|
if (!desc->eob)
|
|
|
|
nfs_grow_dtsize(desc);
|
|
|
|
else if (desc->buffer_fills == 1 &&
|
2023-04-05 00:05:22 +08:00
|
|
|
i < (desc->folio_index_max >> 1))
|
2022-02-08 02:37:00 +08:00
|
|
|
nfs_shrink_dtsize(desc);
|
|
|
|
}
|
2022-02-20 08:24:38 +08:00
|
|
|
out_free:
|
2020-11-07 09:38:47 +08:00
|
|
|
for (i = 0; i < sz && arrays[i]; i++)
|
2023-04-05 03:12:52 +08:00
|
|
|
nfs_readdir_folio_array_free(arrays[i]);
|
2020-11-07 09:38:47 +08:00
|
|
|
out:
|
2022-02-24 00:31:51 +08:00
|
|
|
if (!nfs_readdir_use_cookie(desc->file))
|
|
|
|
nfs_readdir_rewind_search(desc);
|
2023-04-05 00:05:22 +08:00
|
|
|
desc->folio_index_max = -1;
|
2020-11-07 09:38:47 +08:00
|
|
|
kfree(arrays);
|
|
|
|
dfprintk(DIRCACHE, "NFS: %s: returns %d\n", __func__, status);
|
2005-04-17 06:20:36 +08:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2022-02-24 02:29:59 +08:00
|
|
|
static bool nfs_readdir_handle_cache_misses(struct inode *inode,
|
2022-02-18 00:08:24 +08:00
|
|
|
struct nfs_readdir_descriptor *desc,
|
2022-02-24 02:29:59 +08:00
|
|
|
unsigned int cache_misses,
|
|
|
|
bool force_clear)
|
2022-02-18 00:08:24 +08:00
|
|
|
{
|
2022-02-24 02:29:59 +08:00
|
|
|
if (desc->ctx->pos == 0 || !desc->plus)
|
|
|
|
return false;
|
|
|
|
if (cache_misses <= NFS_READDIR_CACHE_MISS_THRESHOLD && !force_clear)
|
|
|
|
return false;
|
|
|
|
trace_nfs_readdir_force_readdirplus(inode);
|
|
|
|
return true;
|
2022-02-18 00:08:24 +08:00
|
|
|
}
|
|
|
|
|
2005-06-23 01:16:29 +08:00
|
|
|
/* The file offset position represents the dirent entry number. A
|
|
|
|
last cookie cache takes care of the common case of reading the
|
|
|
|
whole directory.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2013-05-18 04:34:50 +08:00
|
|
|
static int nfs_readdir(struct file *file, struct dir_context *ctx)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2016-03-27 04:14:39 +08:00
|
|
|
struct dentry *dentry = file_dentry(file);
|
2015-03-18 06:25:59 +08:00
|
|
|
struct inode *inode = d_inode(dentry);
|
2021-03-16 19:57:40 +08:00
|
|
|
struct nfs_inode *nfsi = NFS_I(inode);
|
2013-05-18 04:34:50 +08:00
|
|
|
struct nfs_open_dir_context *dir_ctx = file->private_data;
|
2020-11-02 21:55:03 +08:00
|
|
|
struct nfs_readdir_descriptor *desc;
|
2022-02-18 00:08:24 +08:00
|
|
|
unsigned int cache_hits, cache_misses;
|
2022-02-24 02:29:59 +08:00
|
|
|
bool force_clear;
|
2020-11-02 21:55:03 +08:00
|
|
|
int res;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-09-16 22:53:17 +08:00
|
|
|
dfprintk(FILE, "NFS: readdir(%pD2) starting at cookie %llu\n",
|
|
|
|
file, (long long)ctx->pos);
|
2006-03-21 02:44:14 +08:00
|
|
|
nfs_inc_stats(inode, NFSIOS_VFSGETDENTS);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2013-05-18 04:34:50 +08:00
|
|
|
* ctx->pos points to the dirent entry number.
|
2005-06-23 01:16:29 +08:00
|
|
|
* *desc->dir_cookie has the cookie for the next entry. We have
|
2005-06-23 01:16:29 +08:00
|
|
|
* to either find the entry with the appropriate number or
|
|
|
|
* revalidate the cookie.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2022-02-22 21:31:28 +08:00
|
|
|
nfs_revalidate_mapping(inode, file->f_mapping);
|
2020-11-02 21:55:03 +08:00
|
|
|
|
|
|
|
res = -ENOMEM;
|
|
|
|
desc = kzalloc(sizeof(*desc), GFP_KERNEL);
|
|
|
|
if (!desc)
|
2008-01-27 06:37:47 +08:00
|
|
|
goto out;
|
2020-11-02 21:55:03 +08:00
|
|
|
desc->file = file;
|
|
|
|
desc->ctx = ctx;
|
2023-04-05 00:05:22 +08:00
|
|
|
desc->folio_index_max = -1;
|
2008-01-27 06:37:47 +08:00
|
|
|
|
2020-11-01 22:56:18 +08:00
|
|
|
spin_lock(&file->f_lock);
|
|
|
|
desc->dir_cookie = dir_ctx->dir_cookie;
|
2023-04-05 00:05:22 +08:00
|
|
|
desc->folio_index = dir_ctx->page_index;
|
2022-02-22 21:59:33 +08:00
|
|
|
desc->last_cookie = dir_ctx->last_cookie;
|
2020-11-01 22:56:18 +08:00
|
|
|
desc->attr_gencount = dir_ctx->attr_gencount;
|
2022-01-19 11:10:52 +08:00
|
|
|
desc->eof = dir_ctx->eof;
|
2022-02-08 02:37:00 +08:00
|
|
|
nfs_set_dtsize(desc, dir_ctx->dtsize);
|
2020-11-03 09:06:12 +08:00
|
|
|
memcpy(desc->verf, dir_ctx->verf, sizeof(desc->verf));
|
2022-02-18 00:08:24 +08:00
|
|
|
cache_hits = atomic_xchg(&dir_ctx->cache_hits, 0);
|
|
|
|
cache_misses = atomic_xchg(&dir_ctx->cache_misses, 0);
|
2022-02-24 02:29:59 +08:00
|
|
|
force_clear = dir_ctx->force_clear;
|
2020-11-01 22:56:18 +08:00
|
|
|
spin_unlock(&file->f_lock);
|
2008-01-27 06:37:47 +08:00
|
|
|
|
2022-01-19 11:10:52 +08:00
|
|
|
if (desc->eof) {
|
|
|
|
res = 0;
|
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
|
2022-02-18 00:08:24 +08:00
|
|
|
desc->plus = nfs_use_readdirplus(inode, ctx, cache_hits, cache_misses);
|
2022-02-24 02:29:59 +08:00
|
|
|
force_clear = nfs_readdir_handle_cache_misses(inode, desc, cache_misses,
|
|
|
|
force_clear);
|
|
|
|
desc->clear_cache = force_clear;
|
2021-09-29 02:33:44 +08:00
|
|
|
|
2010-12-08 01:44:56 +08:00
|
|
|
do {
|
2005-04-17 06:20:36 +08:00
|
|
|
res = readdir_search_pagecache(desc);
|
2005-06-23 01:16:29 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (res == -EBADCOOKIE) {
|
2010-11-21 02:55:33 +08:00
|
|
|
res = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
/* This means either end of directory */
|
2020-11-01 22:56:18 +08:00
|
|
|
if (desc->dir_cookie && !desc->eof) {
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Or that the server has 'lost' a cookie */
|
2013-05-18 04:34:50 +08:00
|
|
|
res = uncached_readdir(desc);
|
2010-11-21 02:55:33 +08:00
|
|
|
if (res == 0)
|
2005-04-17 06:20:36 +08:00
|
|
|
continue;
|
2020-11-03 09:11:32 +08:00
|
|
|
if (res == -EBADCOOKIE || res == -ENOTSYNC)
|
|
|
|
res = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (res == -ETOOSMALL && desc->plus) {
|
|
|
|
nfs_zap_caches(inode);
|
2017-06-20 20:33:44 +08:00
|
|
|
desc->plus = false;
|
|
|
|
desc->eof = false;
|
2005-04-17 06:20:36 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (res < 0)
|
|
|
|
break;
|
|
|
|
|
2021-03-16 19:57:40 +08:00
|
|
|
nfs_do_filldir(desc, nfsi->cookieverf);
|
2023-04-05 00:05:22 +08:00
|
|
|
nfs_readdir_folio_unlock_and_put_cached(desc);
|
|
|
|
if (desc->folio_index == desc->folio_index_max)
|
2022-02-24 02:29:59 +08:00
|
|
|
desc->clear_cache = force_clear;
|
2022-01-19 11:10:52 +08:00
|
|
|
} while (!desc->eob && !desc->eof);
|
2020-11-01 22:56:18 +08:00
|
|
|
|
|
|
|
spin_lock(&file->f_lock);
|
|
|
|
dir_ctx->dir_cookie = desc->dir_cookie;
|
2022-02-22 21:59:33 +08:00
|
|
|
dir_ctx->last_cookie = desc->last_cookie;
|
2020-11-01 22:56:18 +08:00
|
|
|
dir_ctx->attr_gencount = desc->attr_gencount;
|
2023-04-05 00:05:22 +08:00
|
|
|
dir_ctx->page_index = desc->folio_index;
|
2022-02-24 02:29:59 +08:00
|
|
|
dir_ctx->force_clear = force_clear;
|
2022-01-19 11:10:52 +08:00
|
|
|
dir_ctx->eof = desc->eof;
|
2022-02-08 02:37:00 +08:00
|
|
|
dir_ctx->dtsize = desc->dtsize;
|
2020-11-03 09:06:12 +08:00
|
|
|
memcpy(dir_ctx->verf, desc->verf, sizeof(dir_ctx->verf));
|
2020-11-01 22:56:18 +08:00
|
|
|
spin_unlock(&file->f_lock);
|
2022-01-19 11:10:52 +08:00
|
|
|
out_free:
|
2020-11-02 21:55:03 +08:00
|
|
|
kfree(desc);
|
|
|
|
|
2008-01-27 06:37:47 +08:00
|
|
|
out:
|
2013-09-16 22:53:17 +08:00
|
|
|
dfprintk(FILE, "NFS: readdir(%pD2) returns %d\n", file, res);
|
2006-03-21 02:44:24 +08:00
|
|
|
return res;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2012-12-18 07:59:39 +08:00
|
|
|
static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int whence)
|
2005-06-23 01:16:29 +08:00
|
|
|
{
|
2011-03-24 02:48:29 +08:00
|
|
|
struct nfs_open_dir_context *dir_ctx = filp->private_data;
|
2008-06-12 05:55:34 +08:00
|
|
|
|
2013-09-16 22:53:17 +08:00
|
|
|
dfprintk(FILE, "NFS: llseek dir(%pD2, %lld, %d)\n",
|
|
|
|
filp, offset, whence);
|
2008-06-12 05:55:34 +08:00
|
|
|
|
2012-12-18 07:59:39 +08:00
|
|
|
switch (whence) {
|
2018-06-28 04:25:40 +08:00
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
case SEEK_SET:
|
|
|
|
if (offset < 0)
|
|
|
|
return -EINVAL;
|
2020-10-31 05:57:29 +08:00
|
|
|
spin_lock(&filp->f_lock);
|
2018-06-28 04:25:40 +08:00
|
|
|
break;
|
|
|
|
case SEEK_CUR:
|
|
|
|
if (offset == 0)
|
|
|
|
return filp->f_pos;
|
2020-10-31 05:57:29 +08:00
|
|
|
spin_lock(&filp->f_lock);
|
2018-06-28 04:25:40 +08:00
|
|
|
offset += filp->f_pos;
|
|
|
|
if (offset < 0) {
|
2020-10-31 05:57:29 +08:00
|
|
|
spin_unlock(&filp->f_lock);
|
2018-06-28 04:25:40 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2005-06-23 01:16:29 +08:00
|
|
|
}
|
|
|
|
if (offset != filp->f_pos) {
|
|
|
|
filp->f_pos = offset;
|
2022-02-18 02:02:37 +08:00
|
|
|
dir_ctx->page_index = 0;
|
2022-02-24 00:31:51 +08:00
|
|
|
if (!nfs_readdir_use_cookie(filp)) {
|
2020-02-04 03:49:33 +08:00
|
|
|
dir_ctx->dir_cookie = 0;
|
2022-02-24 00:31:51 +08:00
|
|
|
dir_ctx->last_cookie = 0;
|
|
|
|
} else {
|
2022-02-22 21:59:33 +08:00
|
|
|
dir_ctx->dir_cookie = offset;
|
2022-02-24 00:31:51 +08:00
|
|
|
dir_ctx->last_cookie = offset;
|
|
|
|
}
|
2022-01-19 11:10:52 +08:00
|
|
|
dir_ctx->eof = false;
|
2005-06-23 01:16:29 +08:00
|
|
|
}
|
2020-10-31 05:57:29 +08:00
|
|
|
spin_unlock(&filp->f_lock);
|
2005-06-23 01:16:29 +08:00
|
|
|
return offset;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* All directory operations under NFS are synchronous, so fsync()
|
|
|
|
* is a dummy operation.
|
|
|
|
*/
|
2011-07-17 08:44:56 +08:00
|
|
|
static int nfs_fsync_dir(struct file *filp, loff_t start, loff_t end,
|
|
|
|
int datasync)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2013-09-16 22:53:17 +08:00
|
|
|
dfprintk(FILE, "NFS: fsync dir(%pD2) datasync %d\n", filp, datasync);
|
2006-03-21 02:44:24 +08:00
|
|
|
|
2020-10-31 05:57:30 +08:00
|
|
|
nfs_inc_stats(file_inode(filp), NFSIOS_VFSFSYNC);
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-10-16 06:18:29 +08:00
|
|
|
/**
|
|
|
|
* nfs_force_lookup_revalidate - Mark the directory as having changed
|
2019-02-19 02:32:38 +08:00
|
|
|
* @dir: pointer to directory inode
|
2007-10-16 06:18:29 +08:00
|
|
|
*
|
|
|
|
* This forces the revalidation code in nfs_lookup_revalidate() to do a
|
|
|
|
* full lookup on all child dentries of 'dir' whenever a change occurs
|
|
|
|
* on the server that might have invalidated our dcache.
|
|
|
|
*
|
2020-02-05 22:01:54 +08:00
|
|
|
* Note that we reserve bit '0' as a tag to let us know when a dentry
|
|
|
|
* was revalidated while holding a delegation on its inode.
|
|
|
|
*
|
2007-10-16 06:18:29 +08:00
|
|
|
* The caller should be holding dir->i_lock
|
|
|
|
*/
|
|
|
|
void nfs_force_lookup_revalidate(struct inode *dir)
|
|
|
|
{
|
2020-02-05 22:01:54 +08:00
|
|
|
NFS_I(dir)->cache_change_attribute += 2;
|
2007-10-16 06:18:29 +08:00
|
|
|
}
|
2012-07-31 04:05:25 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_force_lookup_revalidate);
|
2007-10-16 06:18:29 +08:00
|
|
|
|
2020-02-05 22:01:54 +08:00
|
|
|
/**
|
|
|
|
* nfs_verify_change_attribute - Detects NFS remote directory changes
|
|
|
|
* @dir: pointer to parent directory inode
|
|
|
|
* @verf: previously saved change attribute
|
|
|
|
*
|
|
|
|
* Return "false" if the verifiers doesn't match the change attribute.
|
|
|
|
* This would usually indicate that the directory contents have changed on
|
|
|
|
* the server, and that any dentries need revalidating.
|
|
|
|
*/
|
|
|
|
static bool nfs_verify_change_attribute(struct inode *dir, unsigned long verf)
|
|
|
|
{
|
|
|
|
return (verf & ~1UL) == nfs_save_change_attribute(dir);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs_set_verifier_delegated(unsigned long *verf)
|
|
|
|
{
|
|
|
|
*verf |= 1UL;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_NFS_V4)
|
|
|
|
static void nfs_unset_verifier_delegated(unsigned long *verf)
|
|
|
|
{
|
|
|
|
*verf &= ~1UL;
|
|
|
|
}
|
|
|
|
#endif /* IS_ENABLED(CONFIG_NFS_V4) */
|
|
|
|
|
|
|
|
static bool nfs_test_verifier_delegated(unsigned long verf)
|
|
|
|
{
|
|
|
|
return verf & 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool nfs_verifier_is_delegated(struct dentry *dentry)
|
|
|
|
{
|
|
|
|
return nfs_test_verifier_delegated(dentry->d_time);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs_set_verifier_locked(struct dentry *dentry, unsigned long verf)
|
|
|
|
{
|
|
|
|
struct inode *inode = d_inode(dentry);
|
2021-09-29 20:12:53 +08:00
|
|
|
struct inode *dir = d_inode(dentry->d_parent);
|
2020-02-05 22:01:54 +08:00
|
|
|
|
2021-09-29 20:12:53 +08:00
|
|
|
if (!nfs_verify_change_attribute(dir, verf))
|
|
|
|
return;
|
2020-02-05 22:01:54 +08:00
|
|
|
if (inode && NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
|
|
|
|
nfs_set_verifier_delegated(&verf);
|
|
|
|
dentry->d_time = verf;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* nfs_set_verifier - save a parent directory verifier in the dentry
|
|
|
|
* @dentry: pointer to dentry
|
|
|
|
* @verf: verifier to save
|
|
|
|
*
|
|
|
|
* Saves the parent directory verifier in @dentry. If the inode has
|
|
|
|
* a delegation, we also tag the dentry as having been revalidated
|
|
|
|
* while holding a delegation so that we know we don't have to
|
|
|
|
* look it up again after a directory change.
|
|
|
|
*/
|
|
|
|
void nfs_set_verifier(struct dentry *dentry, unsigned long verf)
|
|
|
|
{
|
|
|
|
|
|
|
|
spin_lock(&dentry->d_lock);
|
|
|
|
nfs_set_verifier_locked(dentry, verf);
|
|
|
|
spin_unlock(&dentry->d_lock);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nfs_set_verifier);
|
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_NFS_V4)
|
|
|
|
/**
|
|
|
|
* nfs_clear_verifier_delegated - clear the dir verifier delegation tag
|
|
|
|
* @inode: pointer to inode
|
|
|
|
*
|
|
|
|
* Iterates through the dentries in the inode alias list and clears
|
|
|
|
* the tag used to indicate that the dentry has been revalidated
|
|
|
|
* while holding a delegation.
|
|
|
|
* This function is intended for use when the delegation is being
|
|
|
|
* returned or revoked.
|
|
|
|
*/
|
|
|
|
void nfs_clear_verifier_delegated(struct inode *inode)
|
|
|
|
{
|
|
|
|
struct dentry *alias;
|
|
|
|
|
|
|
|
if (!inode)
|
|
|
|
return;
|
|
|
|
spin_lock(&inode->i_lock);
|
|
|
|
hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
|
|
|
|
spin_lock(&alias->d_lock);
|
|
|
|
nfs_unset_verifier_delegated(&alias->d_time);
|
|
|
|
spin_unlock(&alias->d_lock);
|
|
|
|
}
|
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nfs_clear_verifier_delegated);
|
|
|
|
#endif /* IS_ENABLED(CONFIG_NFS_V4) */
|
|
|
|
|
2021-12-18 04:36:56 +08:00
|
|
|
static int nfs_dentry_verify_change(struct inode *dir, struct dentry *dentry)
|
|
|
|
{
|
|
|
|
if (nfs_server_capable(dir, NFS_CAP_CASE_INSENSITIVE) &&
|
|
|
|
d_really_is_negative(dentry))
|
|
|
|
return dentry->d_time == inode_peek_iversion_raw(dir);
|
|
|
|
return nfs_verify_change_attribute(dir, dentry->d_time);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* A check for whether or not the parent directory has changed.
|
|
|
|
* In the case it has, we assume that the dentries are untrustworthy
|
|
|
|
* and may need to be looked up again.
|
2014-07-14 09:28:20 +08:00
|
|
|
* If rcu_walk prevents us from performing a full check, return 0.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2014-07-14 09:28:20 +08:00
|
|
|
static int nfs_check_verifier(struct inode *dir, struct dentry *dentry,
|
|
|
|
int rcu_walk)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
if (IS_ROOT(dentry))
|
|
|
|
return 1;
|
2008-07-16 05:58:13 +08:00
|
|
|
if (NFS_SERVER(dir)->flags & NFS_MOUNT_LOOKUP_CACHE_NONE)
|
|
|
|
return 0;
|
2021-12-18 04:36:56 +08:00
|
|
|
if (!nfs_dentry_verify_change(dir, dentry))
|
2007-10-03 00:54:39 +08:00
|
|
|
return 0;
|
|
|
|
/* Revalidate nfsi->cache_change_attribute before we declare a match */
|
2016-12-05 07:34:34 +08:00
|
|
|
if (nfs_mapping_need_revalidate_inode(dir)) {
|
|
|
|
if (rcu_walk)
|
|
|
|
return 0;
|
|
|
|
if (__nfs_revalidate_inode(NFS_SERVER(dir), dir) < 0)
|
|
|
|
return 0;
|
|
|
|
}
|
2021-12-18 04:36:56 +08:00
|
|
|
if (!nfs_dentry_verify_change(dir, dentry))
|
2007-10-03 00:54:39 +08:00
|
|
|
return 0;
|
|
|
|
return 1;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2007-10-03 07:13:04 +08:00
|
|
|
/*
|
|
|
|
* Use intent information to check whether or not we're going to do
|
|
|
|
* an O_EXCL create using this path component.
|
|
|
|
*/
|
2012-06-11 03:36:40 +08:00
|
|
|
static int nfs_is_exclusive_create(struct inode *dir, unsigned int flags)
|
2007-10-03 07:13:04 +08:00
|
|
|
{
|
|
|
|
if (NFS_PROTO(dir)->version == 2)
|
|
|
|
return 0;
|
2012-06-11 03:36:40 +08:00
|
|
|
return flags & LOOKUP_EXCL;
|
2007-10-03 07:13:04 +08:00
|
|
|
}
|
|
|
|
|
2005-06-08 06:37:01 +08:00
|
|
|
/*
|
|
|
|
* Inode and filehandle revalidation for lookups.
|
|
|
|
*
|
|
|
|
* We force revalidation in the cases where the VFS sets LOOKUP_REVAL,
|
|
|
|
* or if the intent information indicates that we're about to open this
|
|
|
|
* particular file and the "nocto" mount flag is not set.
|
|
|
|
*
|
|
|
|
*/
|
2012-12-15 06:51:40 +08:00
|
|
|
static
|
2012-06-11 03:36:40 +08:00
|
|
|
int nfs_lookup_verify_inode(struct inode *inode, unsigned int flags)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
2012-12-15 06:51:40 +08:00
|
|
|
int ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-01-15 02:45:42 +08:00
|
|
|
if (IS_AUTOMOUNT(inode))
|
2008-03-07 01:34:59 +08:00
|
|
|
return 0;
|
2018-05-10 22:08:36 +08:00
|
|
|
|
|
|
|
if (flags & LOOKUP_OPEN) {
|
|
|
|
switch (inode->i_mode & S_IFMT) {
|
|
|
|
case S_IFREG:
|
|
|
|
/* A NFSv4 OPEN will revalidate later */
|
|
|
|
if (server->caps & NFS_CAP_ATOMIC_OPEN)
|
|
|
|
goto out;
|
2020-08-24 06:36:59 +08:00
|
|
|
fallthrough;
|
2018-05-10 22:08:36 +08:00
|
|
|
case S_IFDIR:
|
|
|
|
if (server->flags & NFS_MOUNT_NOCTO)
|
|
|
|
break;
|
|
|
|
/* NFS close-to-open cache consistency validation */
|
|
|
|
goto out_force;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-06-11 03:33:51 +08:00
|
|
|
/* VFS wants an on-the-wire revalidation */
|
2012-06-11 03:36:40 +08:00
|
|
|
if (flags & LOOKUP_REVAL)
|
2012-06-11 03:33:51 +08:00
|
|
|
goto out_force;
|
2012-12-15 06:51:40 +08:00
|
|
|
out:
|
2022-02-03 06:55:02 +08:00
|
|
|
if (inode->i_nlink > 0 ||
|
|
|
|
(inode->i_nlink == 0 &&
|
|
|
|
test_bit(NFS_INO_PRESERVE_UNLINKED, &NFS_I(inode)->flags)))
|
|
|
|
return 0;
|
|
|
|
else
|
|
|
|
return -ESTALE;
|
2005-04-17 06:20:36 +08:00
|
|
|
out_force:
|
2014-07-14 09:28:20 +08:00
|
|
|
if (flags & LOOKUP_RCU)
|
|
|
|
return -ECHILD;
|
2012-12-15 06:51:40 +08:00
|
|
|
ret = __nfs_revalidate_inode(server, inode);
|
|
|
|
if (ret != 0)
|
|
|
|
return ret;
|
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2021-03-09 03:42:51 +08:00
|
|
|
static void nfs_mark_dir_for_revalidate(struct inode *inode)
|
|
|
|
{
|
|
|
|
spin_lock(&inode->i_lock);
|
2021-09-28 23:24:57 +08:00
|
|
|
nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE);
|
2021-03-09 03:42:51 +08:00
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* We judge how long we want to trust negative
|
|
|
|
* dentries by looking at the parent inode mtime.
|
|
|
|
*
|
|
|
|
* If parent mtime has changed, we revalidate, else we wait for a
|
|
|
|
* period corresponding to the parent's attribute cache timeout value.
|
2014-07-14 09:28:20 +08:00
|
|
|
*
|
|
|
|
* If LOOKUP_RCU prevents us from performing a full check, return 1
|
|
|
|
* suggesting a reval is needed.
|
2018-05-10 22:34:21 +08:00
|
|
|
*
|
|
|
|
* Note that when creating a new file, or looking up a rename target,
|
|
|
|
* then it shouldn't be necessary to revalidate a negative dentry.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
static inline
|
|
|
|
int nfs_neg_need_reval(struct inode *dir, struct dentry *dentry,
|
2012-06-11 03:36:40 +08:00
|
|
|
unsigned int flags)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2018-05-10 22:34:21 +08:00
|
|
|
if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET))
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
2008-07-16 05:58:13 +08:00
|
|
|
if (NFS_SERVER(dir)->flags & NFS_MOUNT_LOOKUP_CACHE_NONEG)
|
|
|
|
return 1;
|
2021-12-18 04:36:55 +08:00
|
|
|
/* Case insensitive server? Revalidate negative dentries */
|
|
|
|
if (nfs_server_capable(dir, NFS_CAP_CASE_INSENSITIVE))
|
|
|
|
return 1;
|
2014-07-14 09:28:20 +08:00
|
|
|
return !nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2018-09-28 21:04:05 +08:00
|
|
|
static int
|
|
|
|
nfs_lookup_revalidate_done(struct inode *dir, struct dentry *dentry,
|
|
|
|
struct inode *inode, int error)
|
|
|
|
{
|
|
|
|
switch (error) {
|
|
|
|
case 1:
|
2022-02-20 09:38:19 +08:00
|
|
|
break;
|
2018-09-28 21:04:05 +08:00
|
|
|
case 0:
|
2021-03-09 03:42:52 +08:00
|
|
|
/*
|
|
|
|
* We can't d_drop the root of a disconnected tree:
|
|
|
|
* its d_hash is on the s_anon list and d_drop() would hide
|
|
|
|
* it from shrink_dcache_for_unmount(), leading to busy
|
|
|
|
* inodes on unmount and further oopses.
|
|
|
|
*/
|
|
|
|
if (inode && IS_ROOT(dentry))
|
2022-02-20 09:38:19 +08:00
|
|
|
error = 1;
|
|
|
|
break;
|
2018-09-28 21:04:05 +08:00
|
|
|
}
|
2022-02-20 09:38:19 +08:00
|
|
|
trace_nfs_lookup_revalidate_exit(dir, dentry, 0, error);
|
2018-09-28 21:04:05 +08:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nfs_lookup_revalidate_negative(struct inode *dir, struct dentry *dentry,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
int ret = 1;
|
|
|
|
if (nfs_neg_need_reval(dir, dentry, flags)) {
|
|
|
|
if (flags & LOOKUP_RCU)
|
|
|
|
return -ECHILD;
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
return nfs_lookup_revalidate_done(dir, dentry, NULL, ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nfs_lookup_revalidate_delegated(struct inode *dir, struct dentry *dentry,
|
|
|
|
struct inode *inode)
|
|
|
|
{
|
|
|
|
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
|
|
|
|
return nfs_lookup_revalidate_done(dir, dentry, inode, 1);
|
|
|
|
}
|
|
|
|
|
2022-02-19 23:06:05 +08:00
|
|
|
static int nfs_lookup_revalidate_dentry(struct inode *dir,
|
|
|
|
struct dentry *dentry,
|
|
|
|
struct inode *inode, unsigned int flags)
|
2018-09-28 21:04:05 +08:00
|
|
|
{
|
|
|
|
struct nfs_fh *fhandle;
|
|
|
|
struct nfs_fattr *fattr;
|
2020-02-05 22:01:52 +08:00
|
|
|
unsigned long dir_verifier;
|
2018-09-28 21:04:05 +08:00
|
|
|
int ret;
|
|
|
|
|
2022-02-19 23:06:05 +08:00
|
|
|
trace_nfs_lookup_revalidate_enter(dir, dentry, flags);
|
|
|
|
|
2018-09-28 21:04:05 +08:00
|
|
|
ret = -ENOMEM;
|
|
|
|
fhandle = nfs_alloc_fhandle();
|
2021-10-23 01:11:04 +08:00
|
|
|
fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode));
|
|
|
|
if (fhandle == NULL || fattr == NULL)
|
2018-09-28 21:04:05 +08:00
|
|
|
goto out;
|
|
|
|
|
2020-02-05 22:01:52 +08:00
|
|
|
dir_verifier = nfs_save_change_attribute(dir);
|
2021-10-23 01:11:04 +08:00
|
|
|
ret = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr);
|
2018-09-28 21:04:05 +08:00
|
|
|
if (ret < 0) {
|
2020-01-15 01:06:34 +08:00
|
|
|
switch (ret) {
|
|
|
|
case -ESTALE:
|
|
|
|
case -ENOENT:
|
2018-09-28 21:04:05 +08:00
|
|
|
ret = 0;
|
2020-01-15 01:06:34 +08:00
|
|
|
break;
|
|
|
|
case -ETIMEDOUT:
|
|
|
|
if (NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL)
|
|
|
|
ret = 1;
|
|
|
|
}
|
2018-09-28 21:04:05 +08:00
|
|
|
goto out;
|
|
|
|
}
|
2022-02-19 23:06:05 +08:00
|
|
|
|
|
|
|
/* Request help from readdirplus */
|
|
|
|
nfs_lookup_advise_force_readdirplus(dir, flags);
|
|
|
|
|
2018-09-28 21:04:05 +08:00
|
|
|
ret = 0;
|
|
|
|
if (nfs_compare_fh(NFS_FH(inode), fhandle))
|
|
|
|
goto out;
|
|
|
|
if (nfs_refresh_inode(inode, fattr) < 0)
|
|
|
|
goto out;
|
|
|
|
|
2021-10-23 01:11:12 +08:00
|
|
|
nfs_setsecurity(inode, fattr);
|
2020-02-05 22:01:52 +08:00
|
|
|
nfs_set_verifier(dentry, dir_verifier);
|
2018-09-28 21:04:05 +08:00
|
|
|
|
|
|
|
ret = 1;
|
|
|
|
out:
|
|
|
|
nfs_free_fattr(fattr);
|
|
|
|
nfs_free_fhandle(fhandle);
|
2021-03-09 03:42:51 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the lookup failed despite the dentry change attribute being
|
|
|
|
* a match, then we should revalidate the directory cache.
|
|
|
|
*/
|
2021-12-18 04:36:56 +08:00
|
|
|
if (!ret && nfs_dentry_verify_change(dir, dentry))
|
2021-03-09 03:42:51 +08:00
|
|
|
nfs_mark_dir_for_revalidate(dir);
|
2018-09-28 21:04:05 +08:00
|
|
|
return nfs_lookup_revalidate_done(dir, dentry, inode, ret);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* This is called every time the dcache has a lookup hit,
|
|
|
|
* and we should check whether we can really trust that
|
|
|
|
* lookup.
|
|
|
|
*
|
|
|
|
* NOTE! The hit can be a negative hit too, don't assume
|
|
|
|
* we have an inode!
|
|
|
|
*
|
|
|
|
* If the parent directory is seen to have changed, we throw out the
|
|
|
|
* cached dentry and do a new lookup.
|
|
|
|
*/
|
2018-09-28 21:04:05 +08:00
|
|
|
static int
|
|
|
|
nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
|
|
|
|
unsigned int flags)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct inode *inode;
|
|
|
|
int error;
|
|
|
|
|
2006-03-21 02:44:14 +08:00
|
|
|
nfs_inc_stats(dir, NFSIOS_DENTRYREVALIDATE);
|
2015-03-18 06:25:59 +08:00
|
|
|
inode = d_inode(dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-09-28 21:04:05 +08:00
|
|
|
if (!inode)
|
|
|
|
return nfs_lookup_revalidate_negative(dir, dentry, flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (is_bad_inode(inode)) {
|
2013-09-16 22:53:17 +08:00
|
|
|
dfprintk(LOOKUPCACHE, "%s: %pd2 has dud inode\n",
|
|
|
|
__func__, dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
goto out_bad;
|
|
|
|
}
|
|
|
|
|
2022-06-28 04:04:02 +08:00
|
|
|
if ((flags & LOOKUP_RENAME_TARGET) && d_count(dentry) < 2 &&
|
|
|
|
nfs_server_capable(dir, NFS_CAP_CASE_INSENSITIVE))
|
|
|
|
goto out_bad;
|
|
|
|
|
2020-02-05 22:01:54 +08:00
|
|
|
if (nfs_verifier_is_delegated(dentry))
|
2018-09-28 21:04:05 +08:00
|
|
|
return nfs_lookup_revalidate_delegated(dir, dentry, inode);
|
2008-12-24 04:21:54 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Force a full look up iff the parent directory has changed */
|
2018-05-10 22:13:09 +08:00
|
|
|
if (!(flags & (LOOKUP_EXCL | LOOKUP_REVAL)) &&
|
2014-07-14 09:28:20 +08:00
|
|
|
nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU)) {
|
2017-07-05 10:22:20 +08:00
|
|
|
error = nfs_lookup_verify_inode(inode, flags);
|
|
|
|
if (error) {
|
|
|
|
if (error == -ESTALE)
|
2021-03-09 03:42:51 +08:00
|
|
|
nfs_mark_dir_for_revalidate(dir);
|
2018-09-28 21:04:05 +08:00
|
|
|
goto out_bad;
|
2014-07-14 09:28:20 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
goto out_valid;
|
|
|
|
}
|
|
|
|
|
2014-07-14 09:28:20 +08:00
|
|
|
if (flags & LOOKUP_RCU)
|
|
|
|
return -ECHILD;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (NFS_STALE(inode))
|
|
|
|
goto out_bad;
|
|
|
|
|
2022-02-19 23:06:05 +08:00
|
|
|
return nfs_lookup_revalidate_dentry(dir, dentry, inode, flags);
|
2018-09-28 21:04:05 +08:00
|
|
|
out_valid:
|
|
|
|
return nfs_lookup_revalidate_done(dir, dentry, inode, 1);
|
|
|
|
out_bad:
|
|
|
|
if (flags & LOOKUP_RCU)
|
|
|
|
return -ECHILD;
|
|
|
|
return nfs_lookup_revalidate_done(dir, dentry, inode, 0);
|
|
|
|
}
|
2013-05-23 00:50:43 +08:00
|
|
|
|
2018-09-28 21:04:05 +08:00
|
|
|
static int
|
2018-09-29 00:42:51 +08:00
|
|
|
__nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags,
|
|
|
|
int (*reval)(struct inode *, struct dentry *, unsigned int))
|
2018-09-28 21:04:05 +08:00
|
|
|
{
|
|
|
|
struct dentry *parent;
|
|
|
|
struct inode *dir;
|
|
|
|
int ret;
|
2016-11-20 00:21:54 +08:00
|
|
|
|
2014-07-14 09:28:20 +08:00
|
|
|
if (flags & LOOKUP_RCU) {
|
2022-08-01 08:33:34 +08:00
|
|
|
if (dentry->d_fsdata == NFS_FSDATA_BLOCKED)
|
|
|
|
return -ECHILD;
|
2018-09-28 21:04:05 +08:00
|
|
|
parent = READ_ONCE(dentry->d_parent);
|
|
|
|
dir = d_inode_rcu(parent);
|
|
|
|
if (!dir)
|
|
|
|
return -ECHILD;
|
2018-09-29 00:42:51 +08:00
|
|
|
ret = reval(dir, dentry, flags);
|
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the
coccinelle script shown below and apply its output.
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't harmful, and changing them results in
churn.
However, for some features, the read/write distinction is critical to
correct operation. To distinguish these cases, separate read/write
accessors must be used. This patch migrates (most) remaining
ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following
coccinelle script:
----
// Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and
// WRITE_ONCE()
// $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: snitzer@redhat.com
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-10-24 05:07:29 +08:00
|
|
|
if (parent != READ_ONCE(dentry->d_parent))
|
2014-07-14 09:28:20 +08:00
|
|
|
return -ECHILD;
|
2018-09-28 21:04:05 +08:00
|
|
|
} else {
|
2022-08-01 08:33:34 +08:00
|
|
|
/* Wait for unlink to complete */
|
|
|
|
wait_var_event(&dentry->d_fsdata,
|
|
|
|
dentry->d_fsdata != NFS_FSDATA_BLOCKED);
|
2018-09-28 21:04:05 +08:00
|
|
|
parent = dget_parent(dentry);
|
2018-09-29 00:42:51 +08:00
|
|
|
ret = reval(d_inode(parent), dentry, flags);
|
2014-07-14 09:28:20 +08:00
|
|
|
dput(parent);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2018-09-28 21:04:05 +08:00
|
|
|
return ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2018-09-29 00:42:51 +08:00
|
|
|
static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
|
|
|
|
{
|
|
|
|
return __nfs_lookup_revalidate(dentry, flags, nfs_do_lookup_revalidate);
|
|
|
|
}
|
|
|
|
|
2013-02-21 00:19:05 +08:00
|
|
|
/*
|
2015-03-18 06:25:59 +08:00
|
|
|
* A weaker form of d_revalidate for revalidating just the d_inode(dentry)
|
2013-02-21 00:19:05 +08:00
|
|
|
* when we don't really care about the dentry name. This is called when a
|
|
|
|
* pathwalk ends on a dentry that was not found via a normal lookup in the
|
|
|
|
* parent dir (e.g.: ".", "..", procfs symlinks or mountpoint traversals).
|
|
|
|
*
|
|
|
|
* In this situation, we just want to verify that the inode itself is OK
|
|
|
|
* since the dentry might have changed on the server.
|
|
|
|
*/
|
|
|
|
static int nfs_weak_revalidate(struct dentry *dentry, unsigned int flags)
|
|
|
|
{
|
2015-03-18 06:25:59 +08:00
|
|
|
struct inode *inode = d_inode(dentry);
|
2016-12-17 07:04:47 +08:00
|
|
|
int error = 0;
|
2013-02-21 00:19:05 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* I believe we can only get a negative dentry here in the case of a
|
|
|
|
* procfs-style symlink. Just assume it's correct for now, but we may
|
|
|
|
* eventually need to do something more here.
|
|
|
|
*/
|
|
|
|
if (!inode) {
|
2013-09-16 22:53:17 +08:00
|
|
|
dfprintk(LOOKUPCACHE, "%s: %pd2 has negative inode\n",
|
|
|
|
__func__, dentry);
|
2013-02-21 00:19:05 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_bad_inode(inode)) {
|
2013-09-16 22:53:17 +08:00
|
|
|
dfprintk(LOOKUPCACHE, "%s: %pd2 has dud inode\n",
|
|
|
|
__func__, dentry);
|
2013-02-21 00:19:05 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-08-25 15:34:41 +08:00
|
|
|
error = nfs_lookup_verify_inode(inode, flags);
|
2013-02-21 00:19:05 +08:00
|
|
|
dfprintk(LOOKUPCACHE, "NFS: %s: inode %lu is %s\n",
|
|
|
|
__func__, inode->i_ino, error ? "invalid" : "valid");
|
|
|
|
return !error;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* This is called from dput() when d_count is going to 0.
|
|
|
|
*/
|
2011-01-07 14:49:23 +08:00
|
|
|
static int nfs_dentry_delete(const struct dentry *dentry)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2013-09-16 22:53:17 +08:00
|
|
|
dfprintk(VFS, "NFS: dentry_delete(%pd2, %x)\n",
|
|
|
|
dentry, dentry->d_flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-01-29 08:43:19 +08:00
|
|
|
/* Unhash any dentry with a stale inode */
|
2015-03-18 06:25:59 +08:00
|
|
|
if (d_really_is_positive(dentry) && NFS_STALE(d_inode(dentry)))
|
2008-01-29 08:43:19 +08:00
|
|
|
return 1;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
|
|
|
|
/* Unhash it, so that ->d_iput() would be called */
|
|
|
|
return 1;
|
|
|
|
}
|
2017-11-28 05:05:09 +08:00
|
|
|
if (!(dentry->d_sb->s_flags & SB_ACTIVE)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Unhash it, so that ancestors of killed async unlink
|
|
|
|
* files will be cleaned up during umount */
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2012-12-15 05:38:46 +08:00
|
|
|
/* Ensure that we revalidate inode->i_nlink */
|
2008-06-12 03:44:04 +08:00
|
|
|
static void nfs_drop_nlink(struct inode *inode)
|
|
|
|
{
|
|
|
|
spin_lock(&inode->i_lock);
|
2012-12-15 05:38:46 +08:00
|
|
|
/* drop the inode if we're reasonably sure this is the last link */
|
2018-04-09 06:11:18 +08:00
|
|
|
if (inode->i_nlink > 0)
|
|
|
|
drop_nlink(inode);
|
|
|
|
NFS_I(inode)->attr_gencount = nfs_inc_attr_generation_counter();
|
2021-03-09 03:42:54 +08:00
|
|
|
nfs_set_cache_invalid(
|
|
|
|
inode, NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_CTIME |
|
2021-04-02 02:57:56 +08:00
|
|
|
NFS_INO_INVALID_NLINK);
|
2008-06-12 03:44:04 +08:00
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Called when the dentry loses inode.
|
|
|
|
* We use it to clean up silly-renamed files.
|
|
|
|
*/
|
|
|
|
static void nfs_dentry_iput(struct dentry *dentry, struct inode *inode)
|
|
|
|
{
|
|
|
|
if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
|
2007-07-15 03:39:58 +08:00
|
|
|
nfs_complete_unlink(dentry, inode);
|
2012-12-15 05:38:46 +08:00
|
|
|
nfs_drop_nlink(inode);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
iput(inode);
|
|
|
|
}
|
|
|
|
|
2011-03-16 17:44:14 +08:00
|
|
|
static void nfs_d_release(struct dentry *dentry)
|
|
|
|
{
|
|
|
|
/* free cached devname value, if it survived that far */
|
|
|
|
if (unlikely(dentry->d_fsdata)) {
|
|
|
|
if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
|
|
|
|
WARN_ON(1);
|
|
|
|
else
|
|
|
|
kfree(dentry->d_fsdata);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-02-20 13:51:22 +08:00
|
|
|
const struct dentry_operations nfs_dentry_operations = {
|
2005-04-17 06:20:36 +08:00
|
|
|
.d_revalidate = nfs_lookup_revalidate,
|
2013-02-21 00:19:05 +08:00
|
|
|
.d_weak_revalidate = nfs_weak_revalidate,
|
2005-04-17 06:20:36 +08:00
|
|
|
.d_delete = nfs_dentry_delete,
|
|
|
|
.d_iput = nfs_dentry_iput,
|
2011-01-15 02:45:42 +08:00
|
|
|
.d_automount = nfs_d_automount,
|
2011-03-16 17:44:14 +08:00
|
|
|
.d_release = nfs_d_release,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
2012-07-31 04:05:23 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_dentry_operations);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-07-17 04:39:10 +08:00
|
|
|
struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct dentry *res;
|
|
|
|
struct inode *inode = NULL;
|
2010-04-17 04:22:47 +08:00
|
|
|
struct nfs_fh *fhandle = NULL;
|
|
|
|
struct nfs_fattr *fattr = NULL;
|
2020-02-05 22:01:52 +08:00
|
|
|
unsigned long dir_verifier;
|
2005-04-17 06:20:36 +08:00
|
|
|
int error;
|
|
|
|
|
2013-09-16 22:53:17 +08:00
|
|
|
dfprintk(VFS, "NFS: lookup(%pd2)\n", dentry);
|
2006-03-21 02:44:14 +08:00
|
|
|
nfs_inc_stats(dir, NFSIOS_VFSLOOKUP);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2016-03-08 11:40:43 +08:00
|
|
|
if (unlikely(dentry->d_name.len > NFS_SERVER(dir)->namelen))
|
|
|
|
return ERR_PTR(-ENAMETOOLONG);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-09-06 00:27:44 +08:00
|
|
|
/*
|
|
|
|
* If we're doing an exclusive create, optimize away the lookup
|
|
|
|
* but don't hash the dentry.
|
|
|
|
*/
|
2018-05-10 22:34:21 +08:00
|
|
|
if (nfs_is_exclusive_create(dir, flags) || flags & LOOKUP_RENAME_TARGET)
|
2016-03-08 11:40:43 +08:00
|
|
|
return NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-04-17 04:22:47 +08:00
|
|
|
res = ERR_PTR(-ENOMEM);
|
|
|
|
fhandle = nfs_alloc_fhandle();
|
2021-10-23 01:11:04 +08:00
|
|
|
fattr = nfs_alloc_fattr_with_label(NFS_SERVER(dir));
|
2010-04-17 04:22:47 +08:00
|
|
|
if (fhandle == NULL || fattr == NULL)
|
|
|
|
goto out;
|
|
|
|
|
2020-02-05 22:01:52 +08:00
|
|
|
dir_verifier = nfs_save_change_attribute(dir);
|
2013-08-20 23:26:17 +08:00
|
|
|
trace_nfs_lookup_enter(dir, dentry, flags);
|
2021-10-23 01:11:04 +08:00
|
|
|
error = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr);
|
2021-12-18 04:36:56 +08:00
|
|
|
if (error == -ENOENT) {
|
|
|
|
if (nfs_server_capable(dir, NFS_CAP_CASE_INSENSITIVE))
|
|
|
|
dir_verifier = inode_peek_iversion_raw(dir);
|
2005-04-17 06:20:36 +08:00
|
|
|
goto no_entry;
|
2021-12-18 04:36:56 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
if (error < 0) {
|
|
|
|
res = ERR_PTR(error);
|
2021-10-23 01:11:04 +08:00
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2021-10-23 01:11:11 +08:00
|
|
|
inode = nfs_fhget(dentry->d_sb, fhandle, fattr);
|
2010-12-29 01:02:46 +08:00
|
|
|
res = ERR_CAST(inode);
|
2006-03-21 02:44:48 +08:00
|
|
|
if (IS_ERR(res))
|
2021-10-23 01:11:04 +08:00
|
|
|
goto out;
|
NFS: Share NFS superblocks per-protocol per-server per-FSID
The attached patch makes NFS share superblocks between mounts from the same
server and FSID over the same protocol.
It does this by creating each superblock with a false root and returning the
real root dentry in the vfsmount presented by get_sb(). The root dentry set
starts off as an anonymous dentry if we don't already have the dentry for its
inode, otherwise it simply returns the dentry we already have.
We may thus end up with several trees of dentries in the superblock, and if at
some later point one of anonymous tree roots is discovered by normal filesystem
activity to be located in another tree within the superblock, the anonymous
root is named and materialises attached to the second tree at the appropriate
point.
Why do it this way? Why not pass an extra argument to the mount() syscall to
indicate the subpath and then pathwalk from the server root to the desired
directory? You can't guarantee this will work for two reasons:
(1) The root and intervening nodes may not be accessible to the client.
With NFS2 and NFS3, for instance, mountd is called on the server to get
the filehandle for the tip of a path. mountd won't give us handles for
anything we don't have permission to access, and so we can't set up NFS
inodes for such nodes, and so can't easily set up dentries (we'd have to
have ghost inodes or something).
With this patch we don't actually create dentries until we get handles
from the server that we can use to set up their inodes, and we don't
actually bind them into the tree until we know for sure where they go.
(2) Inaccessible symbolic links.
If we're asked to mount two exports from the server, eg:
mount warthog:/warthog/aaa/xxx /mmm
mount warthog:/warthog/bbb/yyy /nnn
We may not be able to access anything nearer the root than xxx and yyy,
but we may find out later that /mmm/www/yyy, say, is actually the same
directory as the one mounted on /nnn. What we might then find out, for
example, is that /warthog/bbb was actually a symbolic link to
/warthog/aaa/xxx/www, but we can't actually determine that by talking to
the server until /warthog is made available by NFS.
This would lead to having constructed an errneous dentry tree which we
can't easily fix. We can end up with a dentry marked as a directory when
it should actually be a symlink, or we could end up with an apparently
hardlinked directory.
With this patch we need not make assumptions about the type of a dentry
for which we can't retrieve information, nor need we assume we know its
place in the grand scheme of things until we actually see that place.
This patch reduces the possibility of aliasing in the inode and page caches for
inodes that may be accessed by more than one NFS export. It also reduces the
number of superblocks required for NFS where there are many NFS exports being
used from a server (home directory server + autofs for example).
This in turn makes it simpler to do local caching of network filesystems, as it
can then be guaranteed that there won't be links from multiple inodes in
separate superblocks to the same cache file.
Obviously, cache aliasing between different levels of NFS protocol could still
be a problem, but at least that gives us another key to use when indexing the
cache.
This patch makes the following changes:
(1) The server record construction/destruction has been abstracted out into
its own set of functions to make things easier to get right. These have
been moved into fs/nfs/client.c.
All the code in fs/nfs/client.c has to do with the management of
connections to servers, and doesn't touch superblocks in any way; the
remaining code in fs/nfs/super.c has to do with VFS superblock management.
(2) The sequence of events undertaken by NFS mount is now reordered:
(a) A volume representation (struct nfs_server) is allocated.
(b) A server representation (struct nfs_client) is acquired. This may be
allocated or shared, and is keyed on server address, port and NFS
version.
(c) If allocated, the client representation is initialised. The state
member variable of nfs_client is used to prevent a race during
initialisation from two mounts.
(d) For NFS4 a simple pathwalk is performed, walking from FH to FH to find
the root filehandle for the mount (fs/nfs/getroot.c). For NFS2/3 we
are given the root FH in advance.
(e) The volume FSID is probed for on the root FH.
(f) The volume representation is initialised from the FSINFO record
retrieved on the root FH.
(g) sget() is called to acquire a superblock. This may be allocated or
shared, keyed on client pointer and FSID.
(h) If allocated, the superblock is initialised.
(i) If the superblock is shared, then the new nfs_server record is
discarded.
(j) The root dentry for this mount is looked up from the root FH.
(k) The root dentry for this mount is assigned to the vfsmount.
(3) nfs_readdir_lookup() creates dentries for each of the entries readdir()
returns; this function now attaches disconnected trees from alternate
roots that happen to be discovered attached to a directory being read (in
the same way nfs_lookup() is made to do for lookup ops).
The new d_materialise_unique() function is now used to do this, thus
permitting the whole thing to be done under one set of locks, and thus
avoiding any race between mount and lookup operations on the same
directory.
(4) The client management code uses a new debug facility: NFSDBG_CLIENT which
is set by echoing 1024 to /proc/net/sunrpc/nfs_debug.
(5) Clone mounts are now called xdev mounts.
(6) Use the dentry passed to the statfs() op as the handle for retrieving fs
statistics rather than the root dentry of the superblock (which is now a
dummy).
Signed-Off-By: David Howells <dhowells@redhat.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2006-08-23 08:06:13 +08:00
|
|
|
|
2016-11-20 00:21:54 +08:00
|
|
|
/* Notify readdir to use READDIRPLUS */
|
2022-02-19 23:06:05 +08:00
|
|
|
nfs_lookup_advise_force_readdirplus(dir, flags);
|
2012-05-02 05:37:59 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
no_entry:
|
2014-10-13 10:24:21 +08:00
|
|
|
res = d_splice_alias(inode, dentry);
|
2006-10-22 01:24:20 +08:00
|
|
|
if (res != NULL) {
|
|
|
|
if (IS_ERR(res))
|
2021-10-23 01:11:04 +08:00
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
dentry = res;
|
2006-10-22 01:24:20 +08:00
|
|
|
}
|
2020-02-05 22:01:52 +08:00
|
|
|
nfs_set_verifier(dentry, dir_verifier);
|
2005-04-17 06:20:36 +08:00
|
|
|
out:
|
2021-10-23 01:11:04 +08:00
|
|
|
trace_nfs_lookup_exit(dir, dentry, flags, PTR_ERR_OR_ZERO(res));
|
2010-04-17 04:22:47 +08:00
|
|
|
nfs_free_fattr(fattr);
|
|
|
|
nfs_free_fhandle(fhandle);
|
2005-04-17 06:20:36 +08:00
|
|
|
return res;
|
|
|
|
}
|
2012-07-31 04:05:23 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_lookup);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2021-12-18 04:36:57 +08:00
|
|
|
void nfs_d_prune_case_insensitive_aliases(struct inode *inode)
|
|
|
|
{
|
|
|
|
/* Case insensitive server? Revalidate dentries */
|
|
|
|
if (inode && nfs_server_capable(inode, NFS_CAP_CASE_INSENSITIVE))
|
|
|
|
d_prune_aliases(inode);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nfs_d_prune_case_insensitive_aliases);
|
|
|
|
|
2012-07-31 04:05:25 +08:00
|
|
|
#if IS_ENABLED(CONFIG_NFS_V4)
|
2012-06-11 04:03:43 +08:00
|
|
|
static int nfs4_lookup_revalidate(struct dentry *, unsigned int);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-02-20 13:51:22 +08:00
|
|
|
const struct dentry_operations nfs4_dentry_operations = {
|
2012-05-21 23:30:20 +08:00
|
|
|
.d_revalidate = nfs4_lookup_revalidate,
|
2017-08-25 15:34:41 +08:00
|
|
|
.d_weak_revalidate = nfs_weak_revalidate,
|
2005-04-17 06:20:36 +08:00
|
|
|
.d_delete = nfs_dentry_delete,
|
|
|
|
.d_iput = nfs_dentry_iput,
|
2011-01-15 02:45:42 +08:00
|
|
|
.d_automount = nfs_d_automount,
|
2011-03-16 17:44:14 +08:00
|
|
|
.d_release = nfs_d_release,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
2012-07-31 04:05:25 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs4_dentry_operations);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2016-10-13 12:26:47 +08:00
|
|
|
static struct nfs_open_context *create_nfs_open_context(struct dentry *dentry, int open_flags, struct file *filp)
|
2010-09-17 22:56:50 +08:00
|
|
|
{
|
2016-10-13 12:26:47 +08:00
|
|
|
return alloc_nfs_open_context(dentry, flags_to_mode(open_flags), filp);
|
2010-09-17 22:56:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int do_open(struct inode *inode, struct file *filp)
|
|
|
|
{
|
2013-09-27 18:20:03 +08:00
|
|
|
nfs_fscache_open_file(inode, filp);
|
2010-09-17 22:56:50 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-06-22 16:39:14 +08:00
|
|
|
static int nfs_finish_open(struct nfs_open_context *ctx,
|
|
|
|
struct dentry *dentry,
|
2018-06-09 01:06:28 +08:00
|
|
|
struct file *file, unsigned open_flags)
|
2010-09-17 22:56:50 +08:00
|
|
|
{
|
2012-06-05 21:10:18 +08:00
|
|
|
int err;
|
|
|
|
|
2018-06-08 23:44:56 +08:00
|
|
|
err = finish_open(file, dentry, do_open);
|
2012-06-22 16:40:19 +08:00
|
|
|
if (err)
|
2012-06-22 16:39:14 +08:00
|
|
|
goto out;
|
2022-08-20 23:47:03 +08:00
|
|
|
if (S_ISREG(file_inode(file)->i_mode))
|
2017-07-03 13:27:26 +08:00
|
|
|
nfs_file_set_open_context(file, ctx);
|
|
|
|
else
|
2019-08-10 00:15:07 +08:00
|
|
|
err = -EOPENSTALE;
|
2010-09-17 22:56:50 +08:00
|
|
|
out:
|
2012-06-22 16:39:14 +08:00
|
|
|
return err;
|
2010-09-17 22:56:50 +08:00
|
|
|
}
|
|
|
|
|
2012-07-17 04:39:12 +08:00
|
|
|
int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
|
|
|
|
struct file *file, unsigned open_flags,
|
2018-06-09 01:32:02 +08:00
|
|
|
umode_t mode)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2016-07-05 21:49:21 +08:00
|
|
|
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
|
2010-09-17 22:56:50 +08:00
|
|
|
struct nfs_open_context *ctx;
|
2012-06-05 21:10:18 +08:00
|
|
|
struct dentry *res;
|
|
|
|
struct iattr attr = { .ia_valid = ATTR_OPEN };
|
2010-09-17 22:56:50 +08:00
|
|
|
struct inode *inode;
|
2013-08-20 23:59:41 +08:00
|
|
|
unsigned int lookup_flags = 0;
|
2021-12-18 04:36:58 +08:00
|
|
|
unsigned long dir_verifier;
|
2016-07-05 21:49:21 +08:00
|
|
|
bool switched = false;
|
2018-06-09 01:22:02 +08:00
|
|
|
int created = 0;
|
2010-10-23 23:24:25 +08:00
|
|
|
int err;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-06-05 21:10:18 +08:00
|
|
|
/* Expect a negative dentry */
|
2015-03-18 06:25:59 +08:00
|
|
|
BUG_ON(d_inode(dentry));
|
2012-06-05 21:10:18 +08:00
|
|
|
|
2013-12-18 01:20:16 +08:00
|
|
|
dfprintk(VFS, "NFS: atomic_open(%s/%lu), %pd\n",
|
2013-09-16 22:53:17 +08:00
|
|
|
dir->i_sb->s_id, dir->i_ino, dentry);
|
2006-03-21 02:44:24 +08:00
|
|
|
|
2013-08-02 23:39:32 +08:00
|
|
|
err = nfs_check_flags(open_flags);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2012-06-05 21:10:18 +08:00
|
|
|
/* NFS only supports OPEN on regular files */
|
|
|
|
if ((open_flags & O_DIRECTORY)) {
|
2016-07-05 21:44:53 +08:00
|
|
|
if (!d_in_lookup(dentry)) {
|
2012-06-05 21:10:18 +08:00
|
|
|
/*
|
|
|
|
* Hashed negative dentry with O_DIRECTORY: dentry was
|
|
|
|
* revalidated and is fine, no need to perform lookup
|
|
|
|
* again
|
|
|
|
*/
|
2012-06-22 16:39:14 +08:00
|
|
|
return -ENOENT;
|
2012-06-05 21:10:18 +08:00
|
|
|
}
|
2013-08-20 23:59:41 +08:00
|
|
|
lookup_flags = LOOKUP_OPEN|LOOKUP_DIRECTORY;
|
2005-04-17 06:20:36 +08:00
|
|
|
goto no_open;
|
2005-10-19 05:20:17 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-06-05 21:10:18 +08:00
|
|
|
if (dentry->d_name.len > NFS_SERVER(dir)->namelen)
|
2012-06-22 16:39:14 +08:00
|
|
|
return -ENAMETOOLONG;
|
2010-09-17 22:56:50 +08:00
|
|
|
|
2012-06-05 21:10:18 +08:00
|
|
|
if (open_flags & O_CREAT) {
|
2016-12-03 11:53:30 +08:00
|
|
|
struct nfs_server *server = NFS_SERVER(dir);
|
|
|
|
|
|
|
|
if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
|
|
|
|
mode &= ~current_umask();
|
|
|
|
|
2012-01-18 11:04:26 +08:00
|
|
|
attr.ia_valid |= ATTR_MODE;
|
2016-12-03 11:53:30 +08:00
|
|
|
attr.ia_mode = mode;
|
2012-06-05 21:10:18 +08:00
|
|
|
}
|
2012-01-18 11:04:26 +08:00
|
|
|
if (open_flags & O_TRUNC) {
|
|
|
|
attr.ia_valid |= ATTR_SIZE;
|
|
|
|
attr.ia_size = 0;
|
2010-09-17 22:56:50 +08:00
|
|
|
}
|
|
|
|
|
2016-07-05 21:49:21 +08:00
|
|
|
if (!(open_flags & O_CREAT) && !d_in_lookup(dentry)) {
|
|
|
|
d_drop(dentry);
|
|
|
|
switched = true;
|
|
|
|
dentry = d_alloc_parallel(dentry->d_parent,
|
|
|
|
&dentry->d_name, &wq);
|
|
|
|
if (IS_ERR(dentry))
|
|
|
|
return PTR_ERR(dentry);
|
|
|
|
if (unlikely(!d_in_lookup(dentry)))
|
|
|
|
return finish_no_open(file, dentry);
|
|
|
|
}
|
|
|
|
|
2016-10-13 12:26:47 +08:00
|
|
|
ctx = create_nfs_open_context(dentry, open_flags, file);
|
2012-06-05 21:10:18 +08:00
|
|
|
err = PTR_ERR(ctx);
|
|
|
|
if (IS_ERR(ctx))
|
2012-06-22 16:39:14 +08:00
|
|
|
goto out;
|
2012-06-05 21:10:18 +08:00
|
|
|
|
2013-08-20 23:26:17 +08:00
|
|
|
trace_nfs_atomic_open_enter(dir, ctx, open_flags);
|
2018-06-09 01:22:02 +08:00
|
|
|
inode = NFS_PROTO(dir)->open_context(dir, ctx, open_flags, &attr, &created);
|
|
|
|
if (created)
|
|
|
|
file->f_mode |= FMODE_CREATED;
|
2010-09-17 22:56:50 +08:00
|
|
|
if (IS_ERR(inode)) {
|
2012-06-05 21:10:18 +08:00
|
|
|
err = PTR_ERR(inode);
|
2013-08-20 23:26:17 +08:00
|
|
|
trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
|
2013-08-30 21:17:33 +08:00
|
|
|
put_nfs_open_context(ctx);
|
2016-06-21 01:14:36 +08:00
|
|
|
d_drop(dentry);
|
2012-06-05 21:10:18 +08:00
|
|
|
switch (err) {
|
|
|
|
case -ENOENT:
|
2017-06-29 21:34:50 +08:00
|
|
|
d_splice_alias(NULL, dentry);
|
2021-12-18 04:36:58 +08:00
|
|
|
if (nfs_server_capable(dir, NFS_CAP_CASE_INSENSITIVE))
|
|
|
|
dir_verifier = inode_peek_iversion_raw(dir);
|
|
|
|
else
|
|
|
|
dir_verifier = nfs_save_change_attribute(dir);
|
|
|
|
nfs_set_verifier(dentry, dir_verifier);
|
2012-06-05 21:10:18 +08:00
|
|
|
break;
|
|
|
|
case -EISDIR:
|
|
|
|
case -ENOTDIR:
|
|
|
|
goto no_open;
|
|
|
|
case -ELOOP:
|
|
|
|
if (!(open_flags & O_NOFOLLOW))
|
2005-10-19 05:20:18 +08:00
|
|
|
goto no_open;
|
2012-06-05 21:10:18 +08:00
|
|
|
break;
|
2005-04-17 06:20:36 +08:00
|
|
|
/* case -EINVAL: */
|
2012-06-05 21:10:18 +08:00
|
|
|
default:
|
|
|
|
break;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2012-06-22 16:39:14 +08:00
|
|
|
goto out;
|
2010-09-17 22:56:50 +08:00
|
|
|
}
|
2022-06-10 08:46:29 +08:00
|
|
|
file->f_mode |= FMODE_CAN_ODIRECT;
|
2012-06-05 21:10:18 +08:00
|
|
|
|
2018-06-09 01:06:28 +08:00
|
|
|
err = nfs_finish_open(ctx, ctx->dentry, file, open_flags);
|
2013-08-20 23:26:17 +08:00
|
|
|
trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
|
2013-08-30 21:17:33 +08:00
|
|
|
put_nfs_open_context(ctx);
|
2012-06-22 16:39:14 +08:00
|
|
|
out:
|
2016-07-05 21:49:21 +08:00
|
|
|
if (unlikely(switched)) {
|
|
|
|
d_lookup_done(dentry);
|
|
|
|
dput(dentry);
|
|
|
|
}
|
2012-06-22 16:39:14 +08:00
|
|
|
return err;
|
2012-06-05 21:10:18 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
no_open:
|
2013-08-20 23:59:41 +08:00
|
|
|
res = nfs_lookup(dir, dentry, lookup_flags);
|
2022-01-07 07:24:02 +08:00
|
|
|
if (!res) {
|
|
|
|
inode = d_inode(dentry);
|
|
|
|
if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
|
2022-02-09 02:38:23 +08:00
|
|
|
!(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)))
|
2022-01-07 07:24:02 +08:00
|
|
|
res = ERR_PTR(-ENOTDIR);
|
2022-01-07 07:24:03 +08:00
|
|
|
else if (inode && S_ISREG(inode->i_mode))
|
|
|
|
res = ERR_PTR(-EOPENSTALE);
|
2022-01-07 07:24:02 +08:00
|
|
|
} else if (!IS_ERR(res)) {
|
|
|
|
inode = d_inode(res);
|
|
|
|
if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
|
2022-02-09 02:38:23 +08:00
|
|
|
!(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) {
|
2022-01-07 07:24:02 +08:00
|
|
|
dput(res);
|
|
|
|
res = ERR_PTR(-ENOTDIR);
|
2022-01-07 07:24:03 +08:00
|
|
|
} else if (inode && S_ISREG(inode->i_mode)) {
|
|
|
|
dput(res);
|
|
|
|
res = ERR_PTR(-EOPENSTALE);
|
2022-01-07 07:24:02 +08:00
|
|
|
}
|
|
|
|
}
|
2016-07-05 21:49:21 +08:00
|
|
|
if (switched) {
|
|
|
|
d_lookup_done(dentry);
|
|
|
|
if (!res)
|
|
|
|
res = dentry;
|
|
|
|
else
|
|
|
|
dput(dentry);
|
|
|
|
}
|
2012-06-05 21:10:18 +08:00
|
|
|
if (IS_ERR(res))
|
2016-07-05 21:49:21 +08:00
|
|
|
return PTR_ERR(res);
|
2012-06-10 18:48:09 +08:00
|
|
|
return finish_no_open(file, res);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2012-07-31 04:05:25 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_atomic_open);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-09-29 00:42:51 +08:00
|
|
|
static int
|
|
|
|
nfs4_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
|
|
|
|
unsigned int flags)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2011-01-14 10:48:39 +08:00
|
|
|
struct inode *inode;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-06-11 03:36:40 +08:00
|
|
|
if (!(flags & LOOKUP_OPEN) || (flags & LOOKUP_DIRECTORY))
|
2018-09-29 00:42:51 +08:00
|
|
|
goto full_reval;
|
2012-06-05 21:10:21 +08:00
|
|
|
if (d_mountpoint(dentry))
|
2018-09-29 00:42:51 +08:00
|
|
|
goto full_reval;
|
2010-09-17 22:56:51 +08:00
|
|
|
|
2015-03-18 06:25:59 +08:00
|
|
|
inode = d_inode(dentry);
|
2010-09-17 22:56:51 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* We can't create new files in nfs_open_revalidate(), so we
|
|
|
|
* optimize away revalidation of negative dentries.
|
|
|
|
*/
|
2018-09-29 00:42:51 +08:00
|
|
|
if (inode == NULL)
|
|
|
|
goto full_reval;
|
|
|
|
|
2020-02-05 22:01:54 +08:00
|
|
|
if (nfs_verifier_is_delegated(dentry))
|
2018-09-29 00:42:51 +08:00
|
|
|
return nfs_lookup_revalidate_delegated(dir, dentry, inode);
|
2007-10-02 08:10:12 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* NFS only supports OPEN on regular files */
|
|
|
|
if (!S_ISREG(inode->i_mode))
|
2018-09-29 00:42:51 +08:00
|
|
|
goto full_reval;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* We cannot do exclusive creation on a positive dentry */
|
2018-09-29 00:42:51 +08:00
|
|
|
if (flags & (LOOKUP_EXCL | LOOKUP_REVAL))
|
|
|
|
goto reval_dentry;
|
|
|
|
|
|
|
|
/* Check if the directory changed */
|
|
|
|
if (!nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU))
|
|
|
|
goto reval_dentry;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-05-21 23:30:20 +08:00
|
|
|
/* Let f_op->open() actually open (and revalidate) the file */
|
2018-09-29 00:42:51 +08:00
|
|
|
return 1;
|
|
|
|
reval_dentry:
|
|
|
|
if (flags & LOOKUP_RCU)
|
|
|
|
return -ECHILD;
|
2022-02-19 23:06:05 +08:00
|
|
|
return nfs_lookup_revalidate_dentry(dir, dentry, inode, flags);
|
2012-01-18 11:04:26 +08:00
|
|
|
|
2018-09-29 00:42:51 +08:00
|
|
|
full_reval:
|
|
|
|
return nfs_do_lookup_revalidate(dir, dentry, flags);
|
|
|
|
}
|
2010-09-17 22:56:51 +08:00
|
|
|
|
2018-09-29 00:42:51 +08:00
|
|
|
static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags)
|
|
|
|
{
|
|
|
|
return __nfs_lookup_revalidate(dentry, flags,
|
|
|
|
nfs4_do_lookup_revalidate);
|
2010-09-17 22:56:51 +08:00
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif /* CONFIG_NFSV4 */
|
|
|
|
|
2019-09-13 20:29:02 +08:00
|
|
|
struct dentry *
|
|
|
|
nfs_add_or_obtain(struct dentry *dentry, struct nfs_fh *fhandle,
|
2021-10-23 01:11:10 +08:00
|
|
|
struct nfs_fattr *fattr)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2007-09-30 05:41:33 +08:00
|
|
|
struct dentry *parent = dget_parent(dentry);
|
2015-03-18 06:25:59 +08:00
|
|
|
struct inode *dir = d_inode(parent);
|
2005-04-17 06:20:36 +08:00
|
|
|
struct inode *inode;
|
2018-05-16 22:55:01 +08:00
|
|
|
struct dentry *d;
|
2019-09-13 20:29:02 +08:00
|
|
|
int error;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-09-30 05:41:33 +08:00
|
|
|
d_drop(dentry);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (fhandle->size == 0) {
|
2021-10-23 01:11:04 +08:00
|
|
|
error = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (error)
|
2007-09-30 05:41:33 +08:00
|
|
|
goto out_error;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2007-10-02 09:51:38 +08:00
|
|
|
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!(fattr->valid & NFS_ATTR_FATTR)) {
|
|
|
|
struct nfs_server *server = NFS_SB(dentry->d_sb);
|
2018-04-08 01:50:59 +08:00
|
|
|
error = server->nfs_client->rpc_ops->getattr(server, fhandle,
|
2021-10-23 01:11:07 +08:00
|
|
|
fattr, NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (error < 0)
|
2007-09-30 05:41:33 +08:00
|
|
|
goto out_error;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2021-10-23 01:11:11 +08:00
|
|
|
inode = nfs_fhget(dentry->d_sb, fhandle, fattr);
|
2018-05-16 22:55:01 +08:00
|
|
|
d = d_splice_alias(inode, dentry);
|
2007-09-30 05:41:33 +08:00
|
|
|
out:
|
|
|
|
dput(parent);
|
2019-09-13 20:29:02 +08:00
|
|
|
return d;
|
2007-09-30 05:41:33 +08:00
|
|
|
out_error:
|
2019-09-13 20:29:02 +08:00
|
|
|
d = ERR_PTR(error);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nfs_add_or_obtain);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Code common to create, mkdir, and mknod.
|
|
|
|
*/
|
|
|
|
int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle,
|
2021-10-23 01:11:09 +08:00
|
|
|
struct nfs_fattr *fattr)
|
2019-09-13 20:29:02 +08:00
|
|
|
{
|
|
|
|
struct dentry *d;
|
|
|
|
|
2021-10-23 01:11:10 +08:00
|
|
|
d = nfs_add_or_obtain(dentry, fhandle, fattr);
|
2019-09-13 20:29:02 +08:00
|
|
|
if (IS_ERR(d))
|
|
|
|
return PTR_ERR(d);
|
|
|
|
|
|
|
|
/* Callers don't care */
|
|
|
|
dput(d);
|
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2012-07-31 04:05:23 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_instantiate);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Following a failed create operation, we drop the dentry rather
|
|
|
|
* than retain a negative dentry. This avoids a problem in the event
|
|
|
|
* that the operation succeeded on the server, but an error in the
|
|
|
|
* reply path made it appear to have failed.
|
|
|
|
*/
|
2023-01-13 19:49:13 +08:00
|
|
|
int nfs_create(struct mnt_idmap *idmap, struct inode *dir,
|
2021-01-21 21:19:43 +08:00
|
|
|
struct dentry *dentry, umode_t mode, bool excl)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct iattr attr;
|
2012-06-11 06:05:36 +08:00
|
|
|
int open_flags = excl ? O_CREAT | O_EXCL : O_CREAT;
|
2005-04-17 06:20:36 +08:00
|
|
|
int error;
|
|
|
|
|
2013-12-18 01:20:16 +08:00
|
|
|
dfprintk(VFS, "NFS: create(%s/%lu), %pd\n",
|
2013-09-16 22:53:17 +08:00
|
|
|
dir->i_sb->s_id, dir->i_ino, dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
attr.ia_mode = mode;
|
|
|
|
attr.ia_valid = ATTR_MODE;
|
|
|
|
|
2013-08-21 22:53:09 +08:00
|
|
|
trace_nfs_create_enter(dir, dentry, open_flags);
|
2012-06-05 21:10:19 +08:00
|
|
|
error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags);
|
2013-08-21 22:53:09 +08:00
|
|
|
trace_nfs_create_exit(dir, dentry, open_flags, error);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (error != 0)
|
|
|
|
goto out_err;
|
|
|
|
return 0;
|
|
|
|
out_err:
|
|
|
|
d_drop(dentry);
|
|
|
|
return error;
|
|
|
|
}
|
2012-07-31 04:05:23 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_create);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* See comments for nfs_proc_create regarding failed operations.
|
|
|
|
*/
|
2012-07-17 04:39:10 +08:00
|
|
|
int
|
2023-01-13 19:49:16 +08:00
|
|
|
nfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
|
2021-01-21 21:19:43 +08:00
|
|
|
struct dentry *dentry, umode_t mode, dev_t rdev)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct iattr attr;
|
|
|
|
int status;
|
|
|
|
|
2013-12-18 01:20:16 +08:00
|
|
|
dfprintk(VFS, "NFS: mknod(%s/%lu), %pd\n",
|
2013-09-16 22:53:17 +08:00
|
|
|
dir->i_sb->s_id, dir->i_ino, dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
attr.ia_mode = mode;
|
|
|
|
attr.ia_valid = ATTR_MODE;
|
|
|
|
|
2013-08-22 00:36:04 +08:00
|
|
|
trace_nfs_mknod_enter(dir, dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
status = NFS_PROTO(dir)->mknod(dir, dentry, &attr, rdev);
|
2013-08-22 00:36:04 +08:00
|
|
|
trace_nfs_mknod_exit(dir, dentry, status);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (status != 0)
|
|
|
|
goto out_err;
|
|
|
|
return 0;
|
|
|
|
out_err:
|
|
|
|
d_drop(dentry);
|
|
|
|
return status;
|
|
|
|
}
|
2012-07-31 04:05:23 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_mknod);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* See comments for nfs_proc_create regarding failed operations.
|
|
|
|
*/
|
2023-01-13 19:49:15 +08:00
|
|
|
int nfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
|
2021-01-21 21:19:43 +08:00
|
|
|
struct dentry *dentry, umode_t mode)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct iattr attr;
|
|
|
|
int error;
|
|
|
|
|
2013-12-18 01:20:16 +08:00
|
|
|
dfprintk(VFS, "NFS: mkdir(%s/%lu), %pd\n",
|
2013-09-16 22:53:17 +08:00
|
|
|
dir->i_sb->s_id, dir->i_ino, dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
attr.ia_valid = ATTR_MODE;
|
|
|
|
attr.ia_mode = mode | S_IFDIR;
|
|
|
|
|
2013-08-22 00:36:04 +08:00
|
|
|
trace_nfs_mkdir_enter(dir, dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
error = NFS_PROTO(dir)->mkdir(dir, dentry, &attr);
|
2013-08-22 00:36:04 +08:00
|
|
|
trace_nfs_mkdir_exit(dir, dentry, error);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (error != 0)
|
|
|
|
goto out_err;
|
|
|
|
return 0;
|
|
|
|
out_err:
|
|
|
|
d_drop(dentry);
|
|
|
|
return error;
|
|
|
|
}
|
2012-07-31 04:05:23 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_mkdir);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-01-29 08:43:18 +08:00
|
|
|
static void nfs_dentry_handle_enoent(struct dentry *dentry)
|
|
|
|
{
|
2015-05-18 22:10:34 +08:00
|
|
|
if (simple_positive(dentry))
|
2008-01-29 08:43:18 +08:00
|
|
|
d_delete(dentry);
|
|
|
|
}
|
|
|
|
|
2021-07-08 09:43:09 +08:00
|
|
|
static void nfs_dentry_remove_handle_error(struct inode *dir,
|
|
|
|
struct dentry *dentry, int error)
|
|
|
|
{
|
|
|
|
switch (error) {
|
|
|
|
case -ENOENT:
|
2022-08-19 07:55:59 +08:00
|
|
|
if (d_really_is_positive(dentry))
|
|
|
|
d_delete(dentry);
|
2021-12-18 04:36:57 +08:00
|
|
|
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
|
|
|
|
break;
|
2021-07-08 09:43:09 +08:00
|
|
|
case 0:
|
2021-12-18 04:36:57 +08:00
|
|
|
nfs_d_prune_case_insensitive_aliases(d_inode(dentry));
|
2021-07-08 09:43:09 +08:00
|
|
|
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-07-17 04:39:10 +08:00
|
|
|
int nfs_rmdir(struct inode *dir, struct dentry *dentry)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
2013-12-18 01:20:16 +08:00
|
|
|
dfprintk(VFS, "NFS: rmdir(%s/%lu), %pd\n",
|
2013-09-16 22:53:17 +08:00
|
|
|
dir->i_sb->s_id, dir->i_ino, dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-08-22 00:36:04 +08:00
|
|
|
trace_nfs_rmdir_enter(dir, dentry);
|
2015-03-18 06:25:59 +08:00
|
|
|
if (d_really_is_positive(dentry)) {
|
2016-04-29 11:56:31 +08:00
|
|
|
down_write(&NFS_I(d_inode(dentry))->rmdir_sem);
|
2013-08-31 00:24:25 +08:00
|
|
|
error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
|
|
|
|
/* Ensure the VFS deletes this inode */
|
|
|
|
switch (error) {
|
|
|
|
case 0:
|
2015-03-18 06:25:59 +08:00
|
|
|
clear_nlink(d_inode(dentry));
|
2013-08-31 00:24:25 +08:00
|
|
|
break;
|
|
|
|
case -ENOENT:
|
|
|
|
nfs_dentry_handle_enoent(dentry);
|
|
|
|
}
|
2016-04-29 11:56:31 +08:00
|
|
|
up_write(&NFS_I(d_inode(dentry))->rmdir_sem);
|
2013-08-31 00:24:25 +08:00
|
|
|
} else
|
|
|
|
error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
|
2021-07-08 09:43:09 +08:00
|
|
|
nfs_dentry_remove_handle_error(dir, dentry, error);
|
2013-08-22 00:36:04 +08:00
|
|
|
trace_nfs_rmdir_exit(dir, dentry, error);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return error;
|
|
|
|
}
|
2012-07-31 04:05:23 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_rmdir);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove a file after making sure there are no pending writes,
|
|
|
|
* and after checking that the file has only one user.
|
|
|
|
*
|
|
|
|
* We invalidate the attribute cache and free the inode prior to the operation
|
|
|
|
* to avoid possible races if the server reuses the inode.
|
|
|
|
*/
|
|
|
|
static int nfs_safe_remove(struct dentry *dentry)
|
|
|
|
{
|
2015-03-18 06:25:59 +08:00
|
|
|
struct inode *dir = d_inode(dentry->d_parent);
|
|
|
|
struct inode *inode = d_inode(dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
int error = -EBUSY;
|
|
|
|
|
2013-09-16 22:53:17 +08:00
|
|
|
dfprintk(VFS, "NFS: safe_remove(%pd2)\n", dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* If the dentry was sillyrenamed, we simply call d_delete() */
|
|
|
|
if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
|
|
|
|
error = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2013-08-22 00:36:04 +08:00
|
|
|
trace_nfs_remove_enter(dir, dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (inode != NULL) {
|
2018-03-21 04:43:15 +08:00
|
|
|
error = NFS_PROTO(dir)->remove(dir, dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (error == 0)
|
2008-06-12 03:44:04 +08:00
|
|
|
nfs_drop_nlink(inode);
|
2005-04-17 06:20:36 +08:00
|
|
|
} else
|
2018-03-21 04:43:15 +08:00
|
|
|
error = NFS_PROTO(dir)->remove(dir, dentry);
|
2008-01-29 08:43:18 +08:00
|
|
|
if (error == -ENOENT)
|
|
|
|
nfs_dentry_handle_enoent(dentry);
|
2013-08-22 00:36:04 +08:00
|
|
|
trace_nfs_remove_exit(dir, dentry, error);
|
2005-04-17 06:20:36 +08:00
|
|
|
out:
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We do silly rename. In case sillyrename() returns -EBUSY, the inode
|
|
|
|
* belongs to an active ".nfs..." file and we return -EBUSY.
|
|
|
|
*
|
|
|
|
* If sillyrename() returns 0, we do nothing, otherwise we unlink.
|
|
|
|
*/
|
2012-07-17 04:39:10 +08:00
|
|
|
int nfs_unlink(struct inode *dir, struct dentry *dentry)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
2013-12-18 01:20:16 +08:00
|
|
|
dfprintk(VFS, "NFS: unlink(%s/%lu, %pd)\n", dir->i_sb->s_id,
|
2013-09-16 22:53:17 +08:00
|
|
|
dir->i_ino, dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-08-22 00:36:04 +08:00
|
|
|
trace_nfs_unlink_enter(dir, dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
spin_lock(&dentry->d_lock);
|
2022-02-03 06:55:02 +08:00
|
|
|
if (d_count(dentry) > 1 && !test_bit(NFS_INO_PRESERVE_UNLINKED,
|
|
|
|
&NFS_I(d_inode(dentry))->flags)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
spin_unlock(&dentry->d_lock);
|
2007-01-13 15:28:12 +08:00
|
|
|
/* Start asynchronous writeout of the inode */
|
2015-03-18 06:25:59 +08:00
|
|
|
write_inode_now(d_inode(dentry), 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
error = nfs_sillyrename(dir, dentry);
|
2013-08-22 00:36:04 +08:00
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2022-08-01 08:33:34 +08:00
|
|
|
/* We must prevent any concurrent open until the unlink
|
|
|
|
* completes. ->d_revalidate will wait for ->d_fsdata
|
|
|
|
* to clear. We set it here to ensure no lookup succeeds until
|
|
|
|
* the unlink is complete on the server.
|
|
|
|
*/
|
|
|
|
error = -ETXTBSY;
|
|
|
|
if (WARN_ON(dentry->d_flags & DCACHE_NFSFS_RENAMED) ||
|
2022-08-12 09:14:40 +08:00
|
|
|
WARN_ON(dentry->d_fsdata == NFS_FSDATA_BLOCKED)) {
|
|
|
|
spin_unlock(&dentry->d_lock);
|
2022-08-01 08:33:34 +08:00
|
|
|
goto out;
|
2022-08-12 09:14:40 +08:00
|
|
|
}
|
2022-10-18 12:07:08 +08:00
|
|
|
/* old devname */
|
|
|
|
kfree(dentry->d_fsdata);
|
2022-08-01 08:33:34 +08:00
|
|
|
dentry->d_fsdata = NFS_FSDATA_BLOCKED;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
spin_unlock(&dentry->d_lock);
|
|
|
|
error = nfs_safe_remove(dentry);
|
2021-07-08 09:43:09 +08:00
|
|
|
nfs_dentry_remove_handle_error(dir, dentry, error);
|
2022-08-01 08:33:34 +08:00
|
|
|
dentry->d_fsdata = NULL;
|
|
|
|
wake_up_var(&dentry->d_fsdata);
|
2013-08-22 00:36:04 +08:00
|
|
|
out:
|
|
|
|
trace_nfs_unlink_exit(dir, dentry, error);
|
2005-04-17 06:20:36 +08:00
|
|
|
return error;
|
|
|
|
}
|
2012-07-31 04:05:23 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_unlink);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-08-23 08:06:23 +08:00
|
|
|
/*
|
|
|
|
* To create a symbolic link, most file systems instantiate a new inode,
|
|
|
|
* add a page to it containing the path, then write it out to the disk
|
|
|
|
* using prepare_write/commit_write.
|
|
|
|
*
|
|
|
|
* Unfortunately the NFS client can't create the in-core inode first
|
|
|
|
* because it needs a file handle to create an in-core inode (see
|
|
|
|
* fs/nfs/inode.c:nfs_fhget). We only have a file handle *after* the
|
|
|
|
* symlink request has completed on the server.
|
|
|
|
*
|
|
|
|
* So instead we allocate a raw page, copy the symname into it, then do
|
|
|
|
* the SYMLINK request with the page as the buffer. If it succeeds, we
|
|
|
|
* now have a new file handle and can instantiate an in-core NFS inode
|
|
|
|
* and move the raw page into its mapping.
|
|
|
|
*/
|
2023-01-13 19:49:14 +08:00
|
|
|
int nfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
|
2021-01-21 21:19:43 +08:00
|
|
|
struct dentry *dentry, const char *symname)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-08-23 08:06:23 +08:00
|
|
|
struct page *page;
|
|
|
|
char *kaddr;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct iattr attr;
|
2006-08-23 08:06:23 +08:00
|
|
|
unsigned int pathlen = strlen(symname);
|
2005-04-17 06:20:36 +08:00
|
|
|
int error;
|
|
|
|
|
2013-12-18 01:20:16 +08:00
|
|
|
dfprintk(VFS, "NFS: symlink(%s/%lu, %pd, %s)\n", dir->i_sb->s_id,
|
2013-09-16 22:53:17 +08:00
|
|
|
dir->i_ino, dentry, symname);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-08-23 08:06:23 +08:00
|
|
|
if (pathlen > PAGE_SIZE)
|
|
|
|
return -ENAMETOOLONG;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-08-23 08:06:23 +08:00
|
|
|
attr.ia_mode = S_IFLNK | S_IRWXUGO;
|
|
|
|
attr.ia_valid = ATTR_MODE;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2016-01-15 06:52:59 +08:00
|
|
|
page = alloc_page(GFP_USER);
|
2008-06-12 03:44:22 +08:00
|
|
|
if (!page)
|
2006-08-23 08:06:23 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
|
2016-01-15 06:52:59 +08:00
|
|
|
kaddr = page_address(page);
|
2006-08-23 08:06:23 +08:00
|
|
|
memcpy(kaddr, symname, pathlen);
|
|
|
|
if (pathlen < PAGE_SIZE)
|
|
|
|
memset(kaddr + pathlen, 0, PAGE_SIZE - pathlen);
|
|
|
|
|
2013-08-22 00:36:04 +08:00
|
|
|
trace_nfs_symlink_enter(dir, dentry);
|
2006-08-23 08:06:23 +08:00
|
|
|
error = NFS_PROTO(dir)->symlink(dir, dentry, page, pathlen, &attr);
|
2013-08-22 00:36:04 +08:00
|
|
|
trace_nfs_symlink_exit(dir, dentry, error);
|
2006-08-23 08:06:23 +08:00
|
|
|
if (error != 0) {
|
2013-12-18 01:20:16 +08:00
|
|
|
dfprintk(VFS, "NFS: symlink(%s/%lu, %pd, %s) error %d\n",
|
2006-08-23 08:06:23 +08:00
|
|
|
dir->i_sb->s_id, dir->i_ino,
|
2013-09-16 22:53:17 +08:00
|
|
|
dentry, symname, error);
|
2005-04-17 06:20:36 +08:00
|
|
|
d_drop(dentry);
|
2006-08-23 08:06:23 +08:00
|
|
|
__free_page(page);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2021-07-08 10:08:32 +08:00
|
|
|
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
|
|
|
|
|
2006-08-23 08:06:23 +08:00
|
|
|
/*
|
|
|
|
* No big deal if we can't add this page to the page cache here.
|
|
|
|
* READLINK will get the missing page from the server if needed.
|
|
|
|
*/
|
2015-03-18 06:25:59 +08:00
|
|
|
if (!add_to_page_cache_lru(page, d_inode(dentry)->i_mapping, 0,
|
2006-08-23 08:06:23 +08:00
|
|
|
GFP_KERNEL)) {
|
|
|
|
SetPageUptodate(page);
|
|
|
|
unlock_page(page);
|
2014-02-11 06:25:48 +08:00
|
|
|
/*
|
|
|
|
* add_to_page_cache_lru() grabs an extra page refcount.
|
|
|
|
* Drop it here to avoid leaking this page later.
|
|
|
|
*/
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 20:29:47 +08:00
|
|
|
put_page(page);
|
2006-08-23 08:06:23 +08:00
|
|
|
} else
|
|
|
|
__free_page(page);
|
|
|
|
|
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2012-07-31 04:05:23 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_symlink);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-07-17 04:39:10 +08:00
|
|
|
int
|
2005-04-17 06:20:36 +08:00
|
|
|
nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
|
|
|
|
{
|
2015-03-18 06:25:59 +08:00
|
|
|
struct inode *inode = d_inode(old_dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
int error;
|
|
|
|
|
2013-09-16 22:53:17 +08:00
|
|
|
dfprintk(VFS, "NFS: link(%pd2 -> %pd2)\n",
|
|
|
|
old_dentry, dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-08-22 01:54:44 +08:00
|
|
|
trace_nfs_link_enter(inode, dir, dentry);
|
2007-10-03 09:58:05 +08:00
|
|
|
d_drop(dentry);
|
2021-12-16 05:38:15 +08:00
|
|
|
if (S_ISREG(inode->i_mode))
|
|
|
|
nfs_sync_inode(inode);
|
2005-04-17 06:20:36 +08:00
|
|
|
error = NFS_PROTO(dir)->link(inode, dir, &dentry->d_name);
|
2005-10-28 10:12:42 +08:00
|
|
|
if (error == 0) {
|
2021-07-08 10:08:32 +08:00
|
|
|
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
|
2010-10-23 23:11:40 +08:00
|
|
|
ihold(inode);
|
2007-10-03 09:58:05 +08:00
|
|
|
d_add(dentry, inode);
|
2005-10-28 10:12:42 +08:00
|
|
|
}
|
2013-08-22 01:54:44 +08:00
|
|
|
trace_nfs_link_exit(inode, dir, dentry, error);
|
2005-04-17 06:20:36 +08:00
|
|
|
return error;
|
|
|
|
}
|
2012-07-31 04:05:23 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_link);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2022-08-01 08:33:34 +08:00
|
|
|
static void
|
|
|
|
nfs_unblock_rename(struct rpc_task *task, struct nfs_renamedata *data)
|
|
|
|
{
|
|
|
|
struct dentry *new_dentry = data->new_dentry;
|
|
|
|
|
|
|
|
new_dentry->d_fsdata = NULL;
|
|
|
|
wake_up_var(&new_dentry->d_fsdata);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* RENAME
|
|
|
|
* FIXME: Some nfsds, like the Linux user space nfsd, may generate a
|
|
|
|
* different file handle for the same inode after a rename (e.g. when
|
|
|
|
* moving to a different directory). A fail-safe method to do so would
|
|
|
|
* be to look up old_dir/old_name, create a link to new_dir/new_name and
|
|
|
|
* rename the old file using the sillyrename stuff. This way, the original
|
|
|
|
* file in old_dir will go away when the last process iput()s the inode.
|
|
|
|
*
|
|
|
|
* FIXED.
|
|
|
|
*
|
|
|
|
* It actually works quite well. One needs to have the possibility for
|
|
|
|
* at least one ".nfs..." file in each directory the file ever gets
|
|
|
|
* moved or linked to which happens automagically with the new
|
|
|
|
* implementation that only depends on the dcache stuff instead of
|
|
|
|
* using the inode layer
|
|
|
|
*
|
|
|
|
* Unfortunately, things are a little more complicated than indicated
|
|
|
|
* above. For a cross-directory move, we want to make sure we can get
|
|
|
|
* rid of the old inode after the operation. This means there must be
|
|
|
|
* no pending writes (if it's a file), and the use count must be 1.
|
|
|
|
* If these conditions are met, we can drop the dentries before doing
|
|
|
|
* the rename.
|
|
|
|
*/
|
2023-01-13 19:49:17 +08:00
|
|
|
int nfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
|
2021-01-21 21:19:43 +08:00
|
|
|
struct dentry *old_dentry, struct inode *new_dir,
|
|
|
|
struct dentry *new_dentry, unsigned int flags)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2015-03-18 06:25:59 +08:00
|
|
|
struct inode *old_inode = d_inode(old_dentry);
|
|
|
|
struct inode *new_inode = d_inode(new_dentry);
|
2022-08-01 08:33:34 +08:00
|
|
|
struct dentry *dentry = NULL;
|
2014-03-17 19:06:56 +08:00
|
|
|
struct rpc_task *task;
|
2022-08-01 08:33:34 +08:00
|
|
|
bool must_unblock = false;
|
2005-04-17 06:20:36 +08:00
|
|
|
int error = -EBUSY;
|
|
|
|
|
fs: make remaining filesystems use .rename2
This is trivial to do:
- add flags argument to foo_rename()
- check if flags is zero
- assign foo_rename() to .rename2 instead of .rename
This doesn't mean it's impossible to support RENAME_NOREPLACE for these
filesystems, but it is not trivial, like for local filesystems.
RENAME_NOREPLACE must guarantee atomicity (i.e. it shouldn't be possible
for a file to be created on one host while it is overwritten by rename on
another host).
Filesystems converted:
9p, afs, ceph, coda, ecryptfs, kernfs, lustre, ncpfs, nfs, ocfs2, orangefs.
After this, we can get rid of the duplicate interfaces for rename.
Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Acked-by: David Howells <dhowells@redhat.com> [AFS]
Acked-by: Mike Marshall <hubcap@omnibond.com>
Cc: Eric Van Hensbergen <ericvh@gmail.com>
Cc: Ilya Dryomov <idryomov@gmail.com>
Cc: Jan Harkes <jaharkes@cs.cmu.edu>
Cc: Tyler Hicks <tyhicks@canonical.com>
Cc: Oleg Drokin <oleg.drokin@intel.com>
Cc: Trond Myklebust <trond.myklebust@primarydata.com>
Cc: Mark Fasheh <mfasheh@suse.com>
2016-09-27 17:03:58 +08:00
|
|
|
if (flags)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2013-09-16 22:53:17 +08:00
|
|
|
dfprintk(VFS, "NFS: rename(%pd2 -> %pd2, ct=%d)\n",
|
|
|
|
old_dentry, new_dentry,
|
2013-07-05 22:59:33 +08:00
|
|
|
d_count(new_dentry));
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-08-22 00:08:45 +08:00
|
|
|
trace_nfs_rename_enter(old_dir, old_dentry, new_dir, new_dentry);
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2009-12-04 04:58:56 +08:00
|
|
|
* For non-directories, check whether the target is busy and if so,
|
|
|
|
* make a copy of the dentry and then do a silly-rename. If the
|
|
|
|
* silly-rename succeeds, the copied dentry is hashed and becomes
|
|
|
|
* the new target.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2009-12-04 04:58:56 +08:00
|
|
|
if (new_inode && !S_ISDIR(new_inode->i_mode)) {
|
2022-08-01 08:33:34 +08:00
|
|
|
/* We must prevent any concurrent open until the unlink
|
|
|
|
* completes. ->d_revalidate will wait for ->d_fsdata
|
|
|
|
* to clear. We set it here to ensure no lookup succeeds until
|
|
|
|
* the unlink is complete on the server.
|
2009-12-04 04:58:56 +08:00
|
|
|
*/
|
2022-08-01 08:33:34 +08:00
|
|
|
error = -ETXTBSY;
|
|
|
|
if (WARN_ON(new_dentry->d_flags & DCACHE_NFSFS_RENAMED) ||
|
|
|
|
WARN_ON(new_dentry->d_fsdata == NFS_FSDATA_BLOCKED))
|
|
|
|
goto out;
|
|
|
|
if (new_dentry->d_fsdata) {
|
|
|
|
/* old devname */
|
|
|
|
kfree(new_dentry->d_fsdata);
|
|
|
|
new_dentry->d_fsdata = NULL;
|
2017-06-16 23:12:59 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2022-08-01 08:33:34 +08:00
|
|
|
spin_lock(&new_dentry->d_lock);
|
2013-07-05 22:59:33 +08:00
|
|
|
if (d_count(new_dentry) > 2) {
|
2009-12-04 04:58:56 +08:00
|
|
|
int err;
|
|
|
|
|
2022-08-01 08:33:34 +08:00
|
|
|
spin_unlock(&new_dentry->d_lock);
|
|
|
|
|
2009-12-04 04:58:56 +08:00
|
|
|
/* copy the target dentry's name */
|
|
|
|
dentry = d_alloc(new_dentry->d_parent,
|
|
|
|
&new_dentry->d_name);
|
|
|
|
if (!dentry)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* silly-rename the existing target ... */
|
|
|
|
err = nfs_sillyrename(new_dir, new_dentry);
|
2009-12-04 04:58:56 +08:00
|
|
|
if (err)
|
2009-12-04 04:58:56 +08:00
|
|
|
goto out;
|
2009-12-04 04:58:56 +08:00
|
|
|
|
|
|
|
new_dentry = dentry;
|
|
|
|
new_inode = NULL;
|
2022-08-01 08:33:34 +08:00
|
|
|
} else {
|
|
|
|
new_dentry->d_fsdata = NFS_FSDATA_BLOCKED;
|
|
|
|
must_unblock = true;
|
|
|
|
spin_unlock(&new_dentry->d_lock);
|
2009-12-04 04:58:56 +08:00
|
|
|
}
|
2022-08-01 08:33:34 +08:00
|
|
|
|
2009-03-20 03:35:49 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2021-12-16 05:38:16 +08:00
|
|
|
if (S_ISREG(old_inode->i_mode))
|
|
|
|
nfs_sync_inode(old_inode);
|
2022-08-01 08:33:34 +08:00
|
|
|
task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry,
|
|
|
|
must_unblock ? nfs_unblock_rename : NULL);
|
2014-03-17 19:06:56 +08:00
|
|
|
if (IS_ERR(task)) {
|
|
|
|
error = PTR_ERR(task);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
error = rpc_wait_for_completion_task(task);
|
2017-06-16 23:13:00 +08:00
|
|
|
if (error != 0) {
|
|
|
|
((struct nfs_renamedata *)task->tk_calldata)->cancelled = 1;
|
|
|
|
/* Paired with the atomic_dec_and_test() barrier in rpc_do_put_task() */
|
|
|
|
smp_wmb();
|
|
|
|
} else
|
2014-03-17 19:06:56 +08:00
|
|
|
error = task->tk_status;
|
|
|
|
rpc_put_task(task);
|
2018-04-09 06:11:18 +08:00
|
|
|
/* Ensure the inode attributes are revalidated */
|
|
|
|
if (error == 0) {
|
|
|
|
spin_lock(&old_inode->i_lock);
|
|
|
|
NFS_I(old_inode)->attr_gencount = nfs_inc_attr_generation_counter();
|
2021-03-09 03:42:54 +08:00
|
|
|
nfs_set_cache_invalid(old_inode, NFS_INO_INVALID_CHANGE |
|
|
|
|
NFS_INO_INVALID_CTIME |
|
|
|
|
NFS_INO_REVAL_FORCED);
|
2018-04-09 06:11:18 +08:00
|
|
|
spin_unlock(&old_inode->i_lock);
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
out:
|
2013-08-22 00:08:45 +08:00
|
|
|
trace_nfs_rename_exit(old_dir, old_dentry,
|
|
|
|
new_dir, new_dentry, error);
|
2017-06-16 23:12:59 +08:00
|
|
|
if (!error) {
|
|
|
|
if (new_inode != NULL)
|
|
|
|
nfs_drop_nlink(new_inode);
|
|
|
|
/*
|
|
|
|
* The d_move() should be here instead of in an async RPC completion
|
|
|
|
* handler because we need the proper locks to move the dentry. If
|
|
|
|
* we're interrupted by a signal, the async RPC completion handler
|
|
|
|
* should mark the directories for revalidation.
|
|
|
|
*/
|
|
|
|
d_move(old_dentry, new_dentry);
|
2017-11-07 04:28:04 +08:00
|
|
|
nfs_set_verifier(old_dentry,
|
2017-06-16 23:12:59 +08:00
|
|
|
nfs_save_change_attribute(new_dir));
|
|
|
|
} else if (error == -ENOENT)
|
|
|
|
nfs_dentry_handle_enoent(old_dentry);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* new dentry created? */
|
|
|
|
if (dentry)
|
|
|
|
dput(dentry);
|
|
|
|
return error;
|
|
|
|
}
|
2012-07-31 04:05:23 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_rename);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-07-25 23:28:18 +08:00
|
|
|
static DEFINE_SPINLOCK(nfs_access_lru_lock);
|
|
|
|
static LIST_HEAD(nfs_access_lru_list);
|
|
|
|
static atomic_long_t nfs_access_nr_entries;
|
|
|
|
|
2020-02-08 22:14:11 +08:00
|
|
|
static unsigned long nfs_access_max_cachesize = 4*1024*1024;
|
2014-07-22 01:53:48 +08:00
|
|
|
module_param(nfs_access_max_cachesize, ulong, 0644);
|
|
|
|
MODULE_PARM_DESC(nfs_access_max_cachesize, "NFS access maximum total cache length");
|
|
|
|
|
2006-07-25 23:28:18 +08:00
|
|
|
static void nfs_access_free_entry(struct nfs_access_entry *entry)
|
|
|
|
{
|
2021-09-28 07:47:57 +08:00
|
|
|
put_group_info(entry->group_info);
|
2014-07-14 09:28:20 +08:00
|
|
|
kfree_rcu(entry, rcu_head);
|
2014-03-18 01:06:10 +08:00
|
|
|
smp_mb__before_atomic();
|
2006-07-25 23:28:18 +08:00
|
|
|
atomic_long_dec(&nfs_access_nr_entries);
|
2014-03-18 01:06:10 +08:00
|
|
|
smp_mb__after_atomic();
|
2006-07-25 23:28:18 +08:00
|
|
|
}
|
|
|
|
|
2010-05-14 00:51:06 +08:00
|
|
|
static void nfs_access_free_list(struct list_head *head)
|
|
|
|
{
|
|
|
|
struct nfs_access_entry *cache;
|
|
|
|
|
|
|
|
while (!list_empty(head)) {
|
|
|
|
cache = list_entry(head->next, struct nfs_access_entry, lru);
|
|
|
|
list_del(&cache->lru);
|
|
|
|
nfs_access_free_entry(cache);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-22 01:53:48 +08:00
|
|
|
static unsigned long
|
|
|
|
nfs_do_access_cache_scan(unsigned int nr_to_scan)
|
2006-07-25 23:28:19 +08:00
|
|
|
{
|
|
|
|
LIST_HEAD(head);
|
2010-09-30 03:11:56 +08:00
|
|
|
struct nfs_inode *nfsi, *next;
|
2006-07-25 23:28:19 +08:00
|
|
|
struct nfs_access_entry *cache;
|
2013-08-28 08:18:09 +08:00
|
|
|
long freed = 0;
|
2006-07-25 23:28:19 +08:00
|
|
|
|
2007-06-06 07:23:43 +08:00
|
|
|
spin_lock(&nfs_access_lru_lock);
|
2010-09-30 03:11:56 +08:00
|
|
|
list_for_each_entry_safe(nfsi, next, &nfs_access_lru_list, access_cache_inode_lru) {
|
2006-07-25 23:28:19 +08:00
|
|
|
struct inode *inode;
|
|
|
|
|
|
|
|
if (nr_to_scan-- == 0)
|
|
|
|
break;
|
2010-05-14 00:51:06 +08:00
|
|
|
inode = &nfsi->vfs_inode;
|
2006-07-25 23:28:19 +08:00
|
|
|
spin_lock(&inode->i_lock);
|
|
|
|
if (list_empty(&nfsi->access_cache_entry_lru))
|
|
|
|
goto remove_lru_entry;
|
|
|
|
cache = list_entry(nfsi->access_cache_entry_lru.next,
|
|
|
|
struct nfs_access_entry, lru);
|
|
|
|
list_move(&cache->lru, &head);
|
|
|
|
rb_erase(&cache->rb_node, &nfsi->access_cache);
|
2013-08-28 08:18:09 +08:00
|
|
|
freed++;
|
2006-07-25 23:28:19 +08:00
|
|
|
if (!list_empty(&nfsi->access_cache_entry_lru))
|
|
|
|
list_move_tail(&nfsi->access_cache_inode_lru,
|
|
|
|
&nfs_access_lru_list);
|
|
|
|
else {
|
|
|
|
remove_lru_entry:
|
|
|
|
list_del_init(&nfsi->access_cache_inode_lru);
|
2014-03-18 01:06:10 +08:00
|
|
|
smp_mb__before_atomic();
|
2006-07-25 23:28:19 +08:00
|
|
|
clear_bit(NFS_INO_ACL_LRU_SET, &nfsi->flags);
|
2014-03-18 01:06:10 +08:00
|
|
|
smp_mb__after_atomic();
|
2006-07-25 23:28:19 +08:00
|
|
|
}
|
2010-05-26 20:42:24 +08:00
|
|
|
spin_unlock(&inode->i_lock);
|
2006-07-25 23:28:19 +08:00
|
|
|
}
|
|
|
|
spin_unlock(&nfs_access_lru_lock);
|
2010-05-14 00:51:06 +08:00
|
|
|
nfs_access_free_list(&head);
|
2013-08-28 08:18:09 +08:00
|
|
|
return freed;
|
|
|
|
}
|
|
|
|
|
2014-07-22 01:53:48 +08:00
|
|
|
unsigned long
|
|
|
|
nfs_access_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
|
|
|
|
{
|
|
|
|
int nr_to_scan = sc->nr_to_scan;
|
|
|
|
gfp_t gfp_mask = sc->gfp_mask;
|
|
|
|
|
|
|
|
if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL)
|
|
|
|
return SHRINK_STOP;
|
|
|
|
return nfs_do_access_cache_scan(nr_to_scan);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-08-28 08:18:09 +08:00
|
|
|
unsigned long
|
|
|
|
nfs_access_cache_count(struct shrinker *shrink, struct shrink_control *sc)
|
|
|
|
{
|
2013-08-28 08:17:53 +08:00
|
|
|
return vfs_pressure_ratio(atomic_long_read(&nfs_access_nr_entries));
|
2006-07-25 23:28:19 +08:00
|
|
|
}
|
|
|
|
|
2014-07-22 01:53:48 +08:00
|
|
|
static void
|
|
|
|
nfs_access_cache_enforce_limit(void)
|
|
|
|
{
|
|
|
|
long nr_entries = atomic_long_read(&nfs_access_nr_entries);
|
|
|
|
unsigned long diff;
|
|
|
|
unsigned int nr_to_scan;
|
|
|
|
|
|
|
|
if (nr_entries < 0 || nr_entries <= nfs_access_max_cachesize)
|
|
|
|
return;
|
|
|
|
nr_to_scan = 100;
|
|
|
|
diff = nr_entries - nfs_access_max_cachesize;
|
|
|
|
if (diff < nr_to_scan)
|
|
|
|
nr_to_scan = diff;
|
|
|
|
nfs_do_access_cache_scan(nr_to_scan);
|
|
|
|
}
|
|
|
|
|
2010-05-14 00:51:06 +08:00
|
|
|
static void __nfs_access_zap_cache(struct nfs_inode *nfsi, struct list_head *head)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-07-25 23:28:18 +08:00
|
|
|
struct rb_root *root_node = &nfsi->access_cache;
|
2010-05-14 00:51:06 +08:00
|
|
|
struct rb_node *n;
|
2006-07-25 23:28:18 +08:00
|
|
|
struct nfs_access_entry *entry;
|
|
|
|
|
|
|
|
/* Unhook entries from the cache */
|
|
|
|
while ((n = rb_first(root_node)) != NULL) {
|
|
|
|
entry = rb_entry(n, struct nfs_access_entry, rb_node);
|
|
|
|
rb_erase(n, root_node);
|
2010-05-14 00:51:06 +08:00
|
|
|
list_move(&entry->lru, head);
|
2006-07-25 23:28:18 +08:00
|
|
|
}
|
|
|
|
nfsi->cache_validity &= ~NFS_INO_INVALID_ACCESS;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2006-07-25 23:28:18 +08:00
|
|
|
void nfs_access_zap_cache(struct inode *inode)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2010-05-14 00:51:06 +08:00
|
|
|
LIST_HEAD(head);
|
|
|
|
|
|
|
|
if (test_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags) == 0)
|
|
|
|
return;
|
2006-07-25 23:28:18 +08:00
|
|
|
/* Remove from global LRU init */
|
2010-05-14 00:51:06 +08:00
|
|
|
spin_lock(&nfs_access_lru_lock);
|
|
|
|
if (test_and_clear_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags))
|
2006-07-25 23:28:18 +08:00
|
|
|
list_del_init(&NFS_I(inode)->access_cache_inode_lru);
|
|
|
|
|
2006-07-25 23:28:18 +08:00
|
|
|
spin_lock(&inode->i_lock);
|
2010-05-14 00:51:06 +08:00
|
|
|
__nfs_access_zap_cache(NFS_I(inode), &head);
|
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
spin_unlock(&nfs_access_lru_lock);
|
|
|
|
nfs_access_free_list(&head);
|
2006-07-25 23:28:18 +08:00
|
|
|
}
|
2012-07-31 04:05:24 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_access_zap_cache);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2021-09-28 07:47:57 +08:00
|
|
|
static int access_cmp(const struct cred *a, const struct nfs_access_entry *b)
|
|
|
|
{
|
|
|
|
struct group_info *ga, *gb;
|
|
|
|
int g;
|
|
|
|
|
|
|
|
if (uid_lt(a->fsuid, b->fsuid))
|
|
|
|
return -1;
|
|
|
|
if (uid_gt(a->fsuid, b->fsuid))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (gid_lt(a->fsgid, b->fsgid))
|
|
|
|
return -1;
|
|
|
|
if (gid_gt(a->fsgid, b->fsgid))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
ga = a->group_info;
|
|
|
|
gb = b->group_info;
|
|
|
|
if (ga == gb)
|
|
|
|
return 0;
|
|
|
|
if (ga == NULL)
|
|
|
|
return -1;
|
|
|
|
if (gb == NULL)
|
|
|
|
return 1;
|
|
|
|
if (ga->ngroups < gb->ngroups)
|
|
|
|
return -1;
|
|
|
|
if (ga->ngroups > gb->ngroups)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
for (g = 0; g < ga->ngroups; g++) {
|
|
|
|
if (gid_lt(ga->gid[g], gb->gid[g]))
|
|
|
|
return -1;
|
|
|
|
if (gid_gt(ga->gid[g], gb->gid[g]))
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-12-03 08:30:30 +08:00
|
|
|
static struct nfs_access_entry *nfs_access_search_rbtree(struct inode *inode, const struct cred *cred)
|
2006-07-25 23:28:18 +08:00
|
|
|
{
|
|
|
|
struct rb_node *n = NFS_I(inode)->access_cache.rb_node;
|
|
|
|
|
|
|
|
while (n != NULL) {
|
2018-12-03 08:30:30 +08:00
|
|
|
struct nfs_access_entry *entry =
|
|
|
|
rb_entry(n, struct nfs_access_entry, rb_node);
|
2021-09-28 07:47:57 +08:00
|
|
|
int cmp = access_cmp(cred, entry);
|
2006-07-25 23:28:18 +08:00
|
|
|
|
2018-12-03 08:30:30 +08:00
|
|
|
if (cmp < 0)
|
2006-07-25 23:28:18 +08:00
|
|
|
n = n->rb_left;
|
2018-12-03 08:30:30 +08:00
|
|
|
else if (cmp > 0)
|
2006-07-25 23:28:18 +08:00
|
|
|
n = n->rb_right;
|
|
|
|
else
|
|
|
|
return entry;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2006-07-25 23:28:18 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2022-08-27 07:44:44 +08:00
|
|
|
static u64 nfs_access_login_time(const struct task_struct *task,
|
|
|
|
const struct cred *cred)
|
|
|
|
{
|
|
|
|
const struct task_struct *parent;
|
2023-01-02 09:17:23 +08:00
|
|
|
const struct cred *pcred;
|
2022-08-27 07:44:44 +08:00
|
|
|
u64 ret;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
for (;;) {
|
|
|
|
parent = rcu_dereference(task->real_parent);
|
2023-01-02 09:17:23 +08:00
|
|
|
pcred = rcu_dereference(parent->cred);
|
|
|
|
if (parent == task || cred_fscmp(pcred, cred) != 0)
|
2022-08-27 07:44:44 +08:00
|
|
|
break;
|
|
|
|
task = parent;
|
|
|
|
}
|
|
|
|
ret = task->start_time;
|
|
|
|
rcu_read_unlock();
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-09-28 07:47:57 +08:00
|
|
|
static int nfs_access_get_cached_locked(struct inode *inode, const struct cred *cred, u32 *mask, bool may_block)
|
2006-07-25 23:28:18 +08:00
|
|
|
{
|
|
|
|
struct nfs_inode *nfsi = NFS_I(inode);
|
2022-08-27 07:44:44 +08:00
|
|
|
u64 login_time = nfs_access_login_time(current, cred);
|
2006-07-25 23:28:18 +08:00
|
|
|
struct nfs_access_entry *cache;
|
2016-06-04 05:07:19 +08:00
|
|
|
bool retry = true;
|
|
|
|
int err;
|
2006-07-25 23:28:18 +08:00
|
|
|
|
2005-08-19 02:24:12 +08:00
|
|
|
spin_lock(&inode->i_lock);
|
2016-06-04 05:07:19 +08:00
|
|
|
for(;;) {
|
|
|
|
if (nfsi->cache_validity & NFS_INO_INVALID_ACCESS)
|
|
|
|
goto out_zap;
|
|
|
|
cache = nfs_access_search_rbtree(inode, cred);
|
|
|
|
err = -ENOENT;
|
|
|
|
if (cache == NULL)
|
|
|
|
goto out;
|
|
|
|
/* Found an entry, is our attribute cache valid? */
|
2016-12-17 07:40:03 +08:00
|
|
|
if (!nfs_check_cache_invalid(inode, NFS_INO_INVALID_ACCESS))
|
2016-06-04 05:07:19 +08:00
|
|
|
break;
|
2020-01-07 04:39:36 +08:00
|
|
|
if (!retry)
|
|
|
|
break;
|
2016-06-04 05:07:19 +08:00
|
|
|
err = -ECHILD;
|
|
|
|
if (!may_block)
|
|
|
|
goto out;
|
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
err = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
spin_lock(&inode->i_lock);
|
|
|
|
retry = false;
|
|
|
|
}
|
2022-08-27 07:44:44 +08:00
|
|
|
err = -ENOENT;
|
|
|
|
if ((s64)(login_time - cache->timestamp) > 0)
|
|
|
|
goto out;
|
2021-09-28 07:47:57 +08:00
|
|
|
*mask = cache->mask;
|
2006-07-25 23:28:18 +08:00
|
|
|
list_move_tail(&cache->lru, &nfsi->access_cache_entry_lru);
|
2006-07-25 23:28:18 +08:00
|
|
|
err = 0;
|
|
|
|
out:
|
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
return err;
|
|
|
|
out_zap:
|
2010-05-14 00:51:06 +08:00
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
nfs_access_zap_cache(inode);
|
2006-07-25 23:28:18 +08:00
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
2021-09-28 07:47:57 +08:00
|
|
|
static int nfs_access_get_cached_rcu(struct inode *inode, const struct cred *cred, u32 *mask)
|
2014-07-14 09:28:20 +08:00
|
|
|
{
|
|
|
|
/* Only check the most recently returned cache entry,
|
|
|
|
* but do it without locking.
|
|
|
|
*/
|
|
|
|
struct nfs_inode *nfsi = NFS_I(inode);
|
2022-12-30 11:04:32 +08:00
|
|
|
u64 login_time = nfs_access_login_time(current, cred);
|
2014-07-14 09:28:20 +08:00
|
|
|
struct nfs_access_entry *cache;
|
|
|
|
int err = -ECHILD;
|
|
|
|
struct list_head *lh;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
if (nfsi->cache_validity & NFS_INO_INVALID_ACCESS)
|
|
|
|
goto out;
|
2019-12-10 13:46:39 +08:00
|
|
|
lh = rcu_dereference(list_tail_rcu(&nfsi->access_cache_entry_lru));
|
2014-07-14 09:28:20 +08:00
|
|
|
cache = list_entry(lh, struct nfs_access_entry, lru);
|
|
|
|
if (lh == &nfsi->access_cache_entry_lru ||
|
2021-09-28 07:47:57 +08:00
|
|
|
access_cmp(cred, cache) != 0)
|
2014-07-14 09:28:20 +08:00
|
|
|
cache = NULL;
|
|
|
|
if (cache == NULL)
|
|
|
|
goto out;
|
2022-12-30 11:04:32 +08:00
|
|
|
if ((s64)(login_time - cache->timestamp) > 0)
|
|
|
|
goto out;
|
2016-12-17 07:40:03 +08:00
|
|
|
if (nfs_check_cache_invalid(inode, NFS_INO_INVALID_ACCESS))
|
2014-07-14 09:28:20 +08:00
|
|
|
goto out;
|
2021-09-28 07:47:57 +08:00
|
|
|
*mask = cache->mask;
|
2016-12-17 07:40:03 +08:00
|
|
|
err = 0;
|
2014-07-14 09:28:20 +08:00
|
|
|
out:
|
|
|
|
rcu_read_unlock();
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2021-09-28 07:47:57 +08:00
|
|
|
int nfs_access_get_cached(struct inode *inode, const struct cred *cred,
|
|
|
|
u32 *mask, bool may_block)
|
2020-06-24 06:38:57 +08:00
|
|
|
{
|
|
|
|
int status;
|
|
|
|
|
2021-09-28 07:47:57 +08:00
|
|
|
status = nfs_access_get_cached_rcu(inode, cred, mask);
|
2020-06-24 06:38:57 +08:00
|
|
|
if (status != 0)
|
2021-09-28 07:47:57 +08:00
|
|
|
status = nfs_access_get_cached_locked(inode, cred, mask,
|
2020-06-24 06:38:57 +08:00
|
|
|
may_block);
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nfs_access_get_cached);
|
|
|
|
|
2021-09-28 07:47:57 +08:00
|
|
|
static void nfs_access_add_rbtree(struct inode *inode,
|
|
|
|
struct nfs_access_entry *set,
|
|
|
|
const struct cred *cred)
|
2006-07-25 23:28:18 +08:00
|
|
|
{
|
2006-07-25 23:28:18 +08:00
|
|
|
struct nfs_inode *nfsi = NFS_I(inode);
|
|
|
|
struct rb_root *root_node = &nfsi->access_cache;
|
2006-07-25 23:28:18 +08:00
|
|
|
struct rb_node **p = &root_node->rb_node;
|
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct nfs_access_entry *entry;
|
2018-12-03 08:30:30 +08:00
|
|
|
int cmp;
|
2006-07-25 23:28:18 +08:00
|
|
|
|
|
|
|
spin_lock(&inode->i_lock);
|
|
|
|
while (*p != NULL) {
|
|
|
|
parent = *p;
|
|
|
|
entry = rb_entry(parent, struct nfs_access_entry, rb_node);
|
2021-09-28 07:47:57 +08:00
|
|
|
cmp = access_cmp(cred, entry);
|
2006-07-25 23:28:18 +08:00
|
|
|
|
2018-12-03 08:30:30 +08:00
|
|
|
if (cmp < 0)
|
2006-07-25 23:28:18 +08:00
|
|
|
p = &parent->rb_left;
|
2018-12-03 08:30:30 +08:00
|
|
|
else if (cmp > 0)
|
2006-07-25 23:28:18 +08:00
|
|
|
p = &parent->rb_right;
|
|
|
|
else
|
|
|
|
goto found;
|
|
|
|
}
|
|
|
|
rb_link_node(&set->rb_node, parent, p);
|
|
|
|
rb_insert_color(&set->rb_node, root_node);
|
2006-07-25 23:28:18 +08:00
|
|
|
list_add_tail(&set->lru, &nfsi->access_cache_entry_lru);
|
2005-08-19 02:24:12 +08:00
|
|
|
spin_unlock(&inode->i_lock);
|
2006-07-25 23:28:18 +08:00
|
|
|
return;
|
|
|
|
found:
|
|
|
|
rb_replace_node(parent, &set->rb_node, root_node);
|
2006-07-25 23:28:18 +08:00
|
|
|
list_add_tail(&set->lru, &nfsi->access_cache_entry_lru);
|
|
|
|
list_del(&entry->lru);
|
2006-07-25 23:28:18 +08:00
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
nfs_access_free_entry(entry);
|
|
|
|
}
|
|
|
|
|
2021-09-28 07:47:57 +08:00
|
|
|
void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set,
|
|
|
|
const struct cred *cred)
|
2006-07-25 23:28:18 +08:00
|
|
|
{
|
|
|
|
struct nfs_access_entry *cache = kmalloc(sizeof(*cache), GFP_KERNEL);
|
|
|
|
if (cache == NULL)
|
|
|
|
return;
|
|
|
|
RB_CLEAR_NODE(&cache->rb_node);
|
2021-09-28 07:47:57 +08:00
|
|
|
cache->fsuid = cred->fsuid;
|
|
|
|
cache->fsgid = cred->fsgid;
|
|
|
|
cache->group_info = get_group_info(cred->group_info);
|
2005-04-17 06:20:36 +08:00
|
|
|
cache->mask = set->mask;
|
2023-03-08 16:03:27 +08:00
|
|
|
cache->timestamp = ktime_get_ns();
|
2006-07-25 23:28:18 +08:00
|
|
|
|
2014-07-14 09:28:20 +08:00
|
|
|
/* The above field assignments must be visible
|
|
|
|
* before this item appears on the lru. We cannot easily
|
|
|
|
* use rcu_assign_pointer, so just force the memory barrier.
|
|
|
|
*/
|
|
|
|
smp_wmb();
|
2021-09-28 07:47:57 +08:00
|
|
|
nfs_access_add_rbtree(inode, cache, cred);
|
2006-07-25 23:28:18 +08:00
|
|
|
|
|
|
|
/* Update accounting */
|
2014-03-18 01:06:10 +08:00
|
|
|
smp_mb__before_atomic();
|
2006-07-25 23:28:18 +08:00
|
|
|
atomic_long_inc(&nfs_access_nr_entries);
|
2014-03-18 01:06:10 +08:00
|
|
|
smp_mb__after_atomic();
|
2006-07-25 23:28:18 +08:00
|
|
|
|
|
|
|
/* Add inode to global LRU list */
|
2010-05-14 00:51:06 +08:00
|
|
|
if (!test_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags)) {
|
2006-07-25 23:28:18 +08:00
|
|
|
spin_lock(&nfs_access_lru_lock);
|
2010-05-14 00:51:06 +08:00
|
|
|
if (!test_and_set_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags))
|
|
|
|
list_add_tail(&NFS_I(inode)->access_cache_inode_lru,
|
|
|
|
&nfs_access_lru_list);
|
2006-07-25 23:28:18 +08:00
|
|
|
spin_unlock(&nfs_access_lru_lock);
|
|
|
|
}
|
2014-07-22 01:53:48 +08:00
|
|
|
nfs_access_cache_enforce_limit();
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2012-09-11 02:00:46 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_access_add_cache);
|
|
|
|
|
2017-07-26 22:14:55 +08:00
|
|
|
#define NFS_MAY_READ (NFS_ACCESS_READ)
|
|
|
|
#define NFS_MAY_WRITE (NFS_ACCESS_MODIFY | \
|
|
|
|
NFS_ACCESS_EXTEND | \
|
|
|
|
NFS_ACCESS_DELETE)
|
|
|
|
#define NFS_FILE_MAY_WRITE (NFS_ACCESS_MODIFY | \
|
|
|
|
NFS_ACCESS_EXTEND)
|
2017-07-12 05:54:35 +08:00
|
|
|
#define NFS_DIR_MAY_WRITE NFS_MAY_WRITE
|
2017-07-26 22:14:55 +08:00
|
|
|
#define NFS_MAY_LOOKUP (NFS_ACCESS_LOOKUP)
|
|
|
|
#define NFS_MAY_EXECUTE (NFS_ACCESS_EXECUTE)
|
2017-07-12 05:54:32 +08:00
|
|
|
static int
|
2017-07-12 05:54:35 +08:00
|
|
|
nfs_access_calc_mask(u32 access_result, umode_t umode)
|
2017-07-12 05:54:32 +08:00
|
|
|
{
|
|
|
|
int mask = 0;
|
|
|
|
|
|
|
|
if (access_result & NFS_MAY_READ)
|
|
|
|
mask |= MAY_READ;
|
2017-07-12 05:54:35 +08:00
|
|
|
if (S_ISDIR(umode)) {
|
|
|
|
if ((access_result & NFS_DIR_MAY_WRITE) == NFS_DIR_MAY_WRITE)
|
|
|
|
mask |= MAY_WRITE;
|
|
|
|
if ((access_result & NFS_MAY_LOOKUP) == NFS_MAY_LOOKUP)
|
|
|
|
mask |= MAY_EXEC;
|
|
|
|
} else if (S_ISREG(umode)) {
|
|
|
|
if ((access_result & NFS_FILE_MAY_WRITE) == NFS_FILE_MAY_WRITE)
|
|
|
|
mask |= MAY_WRITE;
|
|
|
|
if ((access_result & NFS_MAY_EXECUTE) == NFS_MAY_EXECUTE)
|
|
|
|
mask |= MAY_EXEC;
|
|
|
|
} else if (access_result & NFS_MAY_WRITE)
|
|
|
|
mask |= MAY_WRITE;
|
2017-07-12 05:54:32 +08:00
|
|
|
return mask;
|
|
|
|
}
|
|
|
|
|
2012-09-11 02:00:46 +08:00
|
|
|
void nfs_access_set_mask(struct nfs_access_entry *entry, u32 access_result)
|
|
|
|
{
|
2017-07-12 05:54:34 +08:00
|
|
|
entry->mask = access_result;
|
2012-09-11 02:00:46 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nfs_access_set_mask);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-12-03 08:30:30 +08:00
|
|
|
static int nfs_do_access(struct inode *inode, const struct cred *cred, int mask)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct nfs_access_entry cache;
|
2016-06-04 05:07:19 +08:00
|
|
|
bool may_block = (mask & MAY_NOT_BLOCK) == 0;
|
2020-01-07 04:25:12 +08:00
|
|
|
int cache_mask = -1;
|
2005-04-17 06:20:36 +08:00
|
|
|
int status;
|
|
|
|
|
2013-08-20 06:59:33 +08:00
|
|
|
trace_nfs_access_enter(inode);
|
|
|
|
|
2021-09-28 07:47:57 +08:00
|
|
|
status = nfs_access_get_cached(inode, cred, &cache.mask, may_block);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (status == 0)
|
2013-08-20 06:59:33 +08:00
|
|
|
goto out_cached;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-07-14 09:28:20 +08:00
|
|
|
status = -ECHILD;
|
2016-06-04 05:07:19 +08:00
|
|
|
if (!may_block)
|
2014-07-14 09:28:20 +08:00
|
|
|
goto out;
|
|
|
|
|
2017-07-27 00:00:21 +08:00
|
|
|
/*
|
|
|
|
* Determine which access bits we want to ask for...
|
|
|
|
*/
|
2022-02-24 04:43:26 +08:00
|
|
|
cache.mask = NFS_ACCESS_READ | NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND |
|
|
|
|
nfs_access_xattr_mask(NFS_SERVER(inode));
|
2017-07-27 00:00:21 +08:00
|
|
|
if (S_ISDIR(inode->i_mode))
|
|
|
|
cache.mask |= NFS_ACCESS_DELETE | NFS_ACCESS_LOOKUP;
|
|
|
|
else
|
|
|
|
cache.mask |= NFS_ACCESS_EXECUTE;
|
2021-09-28 07:47:57 +08:00
|
|
|
status = NFS_PROTO(inode)->access(inode, &cache, cred);
|
2009-03-11 08:33:21 +08:00
|
|
|
if (status != 0) {
|
|
|
|
if (status == -ESTALE) {
|
|
|
|
if (!S_ISDIR(inode->i_mode))
|
2020-04-07 01:39:29 +08:00
|
|
|
nfs_set_inode_stale(inode);
|
|
|
|
else
|
|
|
|
nfs_zap_caches(inode);
|
2009-03-11 08:33:21 +08:00
|
|
|
}
|
2013-08-20 06:59:33 +08:00
|
|
|
goto out;
|
2009-03-11 08:33:21 +08:00
|
|
|
}
|
2021-09-28 07:47:57 +08:00
|
|
|
nfs_access_add_cache(inode, &cache, cred);
|
2013-08-20 06:59:33 +08:00
|
|
|
out_cached:
|
2017-07-12 05:54:35 +08:00
|
|
|
cache_mask = nfs_access_calc_mask(cache.mask, inode->i_mode);
|
2017-07-12 05:54:34 +08:00
|
|
|
if ((mask & ~cache_mask & (MAY_READ | MAY_WRITE | MAY_EXEC)) != 0)
|
2013-08-20 06:59:33 +08:00
|
|
|
status = -EACCES;
|
2005-04-17 06:20:36 +08:00
|
|
|
out:
|
2020-01-07 04:25:12 +08:00
|
|
|
trace_nfs_access_exit(inode, mask, cache_mask, status);
|
2013-08-20 06:59:33 +08:00
|
|
|
return status;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2007-08-11 05:45:10 +08:00
|
|
|
static int nfs_open_permission_mask(int openflags)
|
|
|
|
{
|
|
|
|
int mask = 0;
|
|
|
|
|
2013-01-04 05:42:29 +08:00
|
|
|
if (openflags & __FMODE_EXEC) {
|
|
|
|
/* ONLY check exec rights */
|
|
|
|
mask = MAY_EXEC;
|
|
|
|
} else {
|
|
|
|
if ((openflags & O_ACCMODE) != O_WRONLY)
|
|
|
|
mask |= MAY_READ;
|
|
|
|
if ((openflags & O_ACCMODE) != O_RDONLY)
|
|
|
|
mask |= MAY_WRITE;
|
|
|
|
}
|
|
|
|
|
2007-08-11 05:45:10 +08:00
|
|
|
return mask;
|
|
|
|
}
|
|
|
|
|
2018-12-03 08:30:30 +08:00
|
|
|
int nfs_may_open(struct inode *inode, const struct cred *cred, int openflags)
|
2007-08-11 05:45:10 +08:00
|
|
|
{
|
|
|
|
return nfs_do_access(inode, cred, nfs_open_permission_mask(openflags));
|
|
|
|
}
|
2012-07-31 04:05:25 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_may_open);
|
2007-08-11 05:45:10 +08:00
|
|
|
|
2015-12-29 08:30:05 +08:00
|
|
|
static int nfs_execute_ok(struct inode *inode, int mask)
|
|
|
|
{
|
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
2016-12-17 07:40:03 +08:00
|
|
|
int ret = 0;
|
2015-12-29 08:30:05 +08:00
|
|
|
|
2018-07-25 02:27:11 +08:00
|
|
|
if (S_ISDIR(inode->i_mode))
|
|
|
|
return 0;
|
2021-04-13 21:41:16 +08:00
|
|
|
if (nfs_check_cache_invalid(inode, NFS_INO_INVALID_MODE)) {
|
2016-12-17 07:40:03 +08:00
|
|
|
if (mask & MAY_NOT_BLOCK)
|
|
|
|
return -ECHILD;
|
|
|
|
ret = __nfs_revalidate_inode(server, inode);
|
|
|
|
}
|
2015-12-29 08:30:05 +08:00
|
|
|
if (ret == 0 && !execute_ok(inode))
|
|
|
|
ret = -EACCES;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-01-13 19:49:22 +08:00
|
|
|
int nfs_permission(struct mnt_idmap *idmap,
|
2021-01-21 21:19:43 +08:00
|
|
|
struct inode *inode,
|
|
|
|
int mask)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2018-12-03 08:30:30 +08:00
|
|
|
const struct cred *cred = current_cred();
|
2005-04-17 06:20:36 +08:00
|
|
|
int res = 0;
|
|
|
|
|
2006-03-21 02:44:14 +08:00
|
|
|
nfs_inc_stats(inode, NFSIOS_VFSACCESS);
|
|
|
|
|
2008-07-16 09:03:57 +08:00
|
|
|
if ((mask & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0)
|
2005-04-17 06:20:36 +08:00
|
|
|
goto out;
|
|
|
|
/* Is this sys_access() ? */
|
2010-07-23 23:43:51 +08:00
|
|
|
if (mask & (MAY_ACCESS | MAY_CHDIR))
|
2005-04-17 06:20:36 +08:00
|
|
|
goto force_lookup;
|
|
|
|
|
|
|
|
switch (inode->i_mode & S_IFMT) {
|
|
|
|
case S_IFLNK:
|
|
|
|
goto out;
|
|
|
|
case S_IFREG:
|
2015-12-27 10:54:58 +08:00
|
|
|
if ((mask & MAY_OPEN) &&
|
|
|
|
nfs_server_capable(inode, NFS_CAP_ATOMIC_OPEN))
|
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
|
|
|
case S_IFDIR:
|
|
|
|
/*
|
|
|
|
* Optimize away all write operations, since the server
|
|
|
|
* will check permissions when we perform the op.
|
|
|
|
*/
|
|
|
|
if ((mask & MAY_WRITE) && !(mask & MAY_READ))
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
force_lookup:
|
|
|
|
if (!NFS_PROTO(inode)->access)
|
|
|
|
goto out_notsup;
|
|
|
|
|
2020-03-06 11:45:26 +08:00
|
|
|
res = nfs_do_access(inode, cred, mask);
|
2005-04-17 06:20:36 +08:00
|
|
|
out:
|
2015-12-29 08:30:05 +08:00
|
|
|
if (!res && (mask & MAY_EXEC))
|
|
|
|
res = nfs_execute_ok(inode, mask);
|
2008-07-31 19:41:58 +08:00
|
|
|
|
2013-12-18 01:20:16 +08:00
|
|
|
dfprintk(VFS, "NFS: permission(%s/%lu), mask=0x%x, res=%d\n",
|
2006-03-21 02:44:24 +08:00
|
|
|
inode->i_sb->s_id, inode->i_ino, mask, res);
|
2005-04-17 06:20:36 +08:00
|
|
|
return res;
|
|
|
|
out_notsup:
|
2014-07-14 09:28:20 +08:00
|
|
|
if (mask & MAY_NOT_BLOCK)
|
|
|
|
return -ECHILD;
|
|
|
|
|
2021-04-13 21:41:16 +08:00
|
|
|
res = nfs_revalidate_inode(inode, NFS_INO_INVALID_MODE |
|
|
|
|
NFS_INO_INVALID_OTHER);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (res == 0)
|
2023-01-13 19:49:22 +08:00
|
|
|
res = generic_permission(&nop_mnt_idmap, inode, mask);
|
2006-03-21 02:44:24 +08:00
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2012-07-31 04:05:23 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nfs_permission);
|