OpenCloudOS-Kernel/fs/debugfs/inode.c

914 lines
26 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0
/*
* inode.c - part of debugfs, a tiny little debug file system
*
* Copyright (C) 2004,2019 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (C) 2004 IBM Inc.
* Copyright (C) 2019 Linux Foundation <gregkh@linuxfoundation.org>
*
* debugfs is for people to use instead of /proc or /sys.
* See ./Documentation/core-api/kernel-api.rst for more details.
*/
#define pr_fmt(fmt) "debugfs: " fmt
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/kobject.h>
#include <linux/namei.h>
#include <linux/debugfs.h>
#include <linux/fsnotify.h>
#include <linux/string.h>
#include <linux/seq_file.h>
#include <linux/parser.h>
#include <linux/magic.h>
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h percpu.h is included by sched.h and module.h and thus ends up being included when building most .c files. percpu.h includes slab.h which in turn includes gfp.h making everything defined by the two files universally available and complicating inclusion dependencies. percpu.h -> slab.h dependency is about to be removed. Prepare for this change by updating users of gfp and slab facilities include those headers directly instead of assuming availability. As this conversion needs to touch large number of source files, the following script is used as the basis of conversion. http://userweb.kernel.org/~tj/misc/slabh-sweep.py The script does the followings. * Scan files for gfp and slab usages and update includes such that only the necessary includes are there. ie. if only gfp is used, gfp.h, if slab is used, slab.h. * When the script inserts a new include, it looks at the include blocks and try to put the new include such that its order conforms to its surrounding. It's put in the include block which contains core kernel includes, in the same order that the rest are ordered - alphabetical, Christmas tree, rev-Xmas-tree or at the end if there doesn't seem to be any matching order. * If the script can't find a place to put a new include (mostly because the file doesn't have fitting include block), it prints out an error message indicating which .h file needs to be added to the file. The conversion was done in the following steps. 1. The initial automatic conversion of all .c files updated slightly over 4000 files, deleting around 700 includes and adding ~480 gfp.h and ~3000 slab.h inclusions. The script emitted errors for ~400 files. 2. Each error was manually checked. Some didn't need the inclusion, some needed manual addition while adding it to implementation .h or embedding .c file was more appropriate for others. This step added inclusions to around 150 files. 3. The script was run again and the output was compared to the edits from #2 to make sure no file was left behind. 4. Several build tests were done and a couple of problems were fixed. e.g. lib/decompress_*.c used malloc/free() wrappers around slab APIs requiring slab.h to be added manually. 5. The script was run on all .h files but without automatically editing them as sprinkling gfp.h and slab.h inclusions around .h files could easily lead to inclusion dependency hell. Most gfp.h inclusion directives were ignored as stuff from gfp.h was usually wildly available and often used in preprocessor macros. Each slab.h inclusion directive was examined and added manually as necessary. 6. percpu.h was updated not to include slab.h. 7. Build test were done on the following configurations and failures were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my distributed build env didn't work with gcov compiles) and a few more options had to be turned off depending on archs to make things build (like ipr on powerpc/64 which failed due to missing writeq). * x86 and x86_64 UP and SMP allmodconfig and a custom test config. * powerpc and powerpc64 SMP allmodconfig * sparc and sparc64 SMP allmodconfig * ia64 SMP allmodconfig * s390 SMP allmodconfig * alpha SMP allmodconfig * um on x86_64 SMP allmodconfig 8. percpu.h modifications were reverted so that it could be applied as a separate patch and serve as bisection point. Given the fact that I had only a couple of failures from tests on step 6, I'm fairly confident about the coverage of this conversion patch. If there is a breakage, it's likely to be something in one of the arch headers which should be easily discoverable easily on most builds of the specific arch. Signed-off-by: Tejun Heo <tj@kernel.org> Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
#include <linux/slab.h>
debugfs: Restrict debugfs when the kernel is locked down Disallow opening of debugfs files that might be used to muck around when the kernel is locked down as various drivers give raw access to hardware through debugfs. Given the effort of auditing all 2000 or so files and manually fixing each one as necessary, I've chosen to apply a heuristic instead. The following changes are made: (1) chmod and chown are disallowed on debugfs objects (though the root dir can be modified by mount and remount, but I'm not worried about that). (2) When the kernel is locked down, only files with the following criteria are permitted to be opened: - The file must have mode 00444 - The file must not have ioctl methods - The file must not have mmap (3) When the kernel is locked down, files may only be opened for reading. Normal device interaction should be done through configfs, sysfs or a miscdev, not debugfs. Note that this makes it unnecessary to specifically lock down show_dsts(), show_devs() and show_call() in the asus-wmi driver. I would actually prefer to lock down all files by default and have the the files unlocked by the creator. This is tricky to manage correctly, though, as there are 19 creation functions and ~1600 call sites (some of them in loops scanning tables). Signed-off-by: David Howells <dhowells@redhat.com> cc: Andy Shevchenko <andy.shevchenko@gmail.com> cc: acpi4asus-user@lists.sourceforge.net cc: platform-driver-x86@vger.kernel.org cc: Matthew Garrett <mjg59@srcf.ucam.org> cc: Thomas Gleixner <tglx@linutronix.de> Cc: Greg KH <greg@kroah.com> Cc: Rafael J. Wysocki <rafael@kernel.org> Signed-off-by: Matthew Garrett <matthewgarrett@google.com> Signed-off-by: James Morris <jmorris@namei.org>
2019-08-20 08:18:02 +08:00
#include <linux/security.h>
debugfs: prevent access to possibly dead file_operations at file open Nothing prevents a dentry found by path lookup before a return of __debugfs_remove() to actually get opened after that return. Now, after the return of __debugfs_remove(), there are no guarantees whatsoever regarding the memory the corresponding inode's file_operations object had been kept in. Since __debugfs_remove() is seldomly invoked, usually from module exit handlers only, the race is hard to trigger and the impact is very low. A discussion of the problem outlined above as well as a suggested solution can be found in the (sub-)thread rooted at http://lkml.kernel.org/g/20130401203445.GA20862@ZenIV.linux.org.uk ("Yet another pipe related oops.") Basically, Greg KH suggests to introduce an intermediate fops and Al Viro points out that a pointer to the original ones may be stored in ->d_fsdata. Follow this line of reasoning: - Add SRCU as a reverse dependency of DEBUG_FS. - Introduce a srcu_struct object for the debugfs subsystem. - In debugfs_create_file(), store a pointer to the original file_operations object in ->d_fsdata. - Make debugfs_remove() and debugfs_remove_recursive() wait for a SRCU grace period after the dentry has been delete()'d and before they return to their callers. - Introduce an intermediate file_operations object named "debugfs_open_proxy_file_operations". It's ->open() functions checks, under the protection of a SRCU read lock, whether the dentry is still alive, i.e. has not been d_delete()'d and if so, tries to acquire a reference on the owning module. On success, it sets the file object's ->f_op to the original file_operations and forwards the ongoing open() call to the original ->open(). - For clarity, rename the former debugfs_file_operations to debugfs_noop_file_operations -- they are in no way canonical. The choice of SRCU over "normal" RCU is justified by the fact, that the former may also be used to protect ->i_private data from going away during the execution of a file's readers and writers which may (and do) sleep. Finally, introduce the fs/debugfs/internal.h header containing some declarations internal to the debugfs implementation. Signed-off-by: Nicolai Stange <nicstange@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2016-03-22 21:11:13 +08:00
#include "internal.h"
#define DEBUGFS_DEFAULT_MODE 0700
static struct vfsmount *debugfs_mount;
static int debugfs_mount_count;
static bool debugfs_registered;
debugfs: Restrict debugfs when the kernel is locked down Disallow opening of debugfs files that might be used to muck around when the kernel is locked down as various drivers give raw access to hardware through debugfs. Given the effort of auditing all 2000 or so files and manually fixing each one as necessary, I've chosen to apply a heuristic instead. The following changes are made: (1) chmod and chown are disallowed on debugfs objects (though the root dir can be modified by mount and remount, but I'm not worried about that). (2) When the kernel is locked down, only files with the following criteria are permitted to be opened: - The file must have mode 00444 - The file must not have ioctl methods - The file must not have mmap (3) When the kernel is locked down, files may only be opened for reading. Normal device interaction should be done through configfs, sysfs or a miscdev, not debugfs. Note that this makes it unnecessary to specifically lock down show_dsts(), show_devs() and show_call() in the asus-wmi driver. I would actually prefer to lock down all files by default and have the the files unlocked by the creator. This is tricky to manage correctly, though, as there are 19 creation functions and ~1600 call sites (some of them in loops scanning tables). Signed-off-by: David Howells <dhowells@redhat.com> cc: Andy Shevchenko <andy.shevchenko@gmail.com> cc: acpi4asus-user@lists.sourceforge.net cc: platform-driver-x86@vger.kernel.org cc: Matthew Garrett <mjg59@srcf.ucam.org> cc: Thomas Gleixner <tglx@linutronix.de> Cc: Greg KH <greg@kroah.com> Cc: Rafael J. Wysocki <rafael@kernel.org> Signed-off-by: Matthew Garrett <matthewgarrett@google.com> Signed-off-by: James Morris <jmorris@namei.org>
2019-08-20 08:18:02 +08:00
/*
* Don't allow access attributes to be changed whilst the kernel is locked down
* so that we can use the file mode as part of a heuristic to determine whether
* to lock down individual files.
*/
static int debugfs_setattr(struct dentry *dentry, struct iattr *ia)
{
int ret = security_locked_down(LOCKDOWN_DEBUGFS);
if (ret && (ia->ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)))
return ret;
return simple_setattr(dentry, ia);
}
static const struct inode_operations debugfs_file_inode_operations = {
.setattr = debugfs_setattr,
};
static const struct inode_operations debugfs_dir_inode_operations = {
.lookup = simple_lookup,
.setattr = debugfs_setattr,
};
static const struct inode_operations debugfs_symlink_inode_operations = {
.get_link = simple_get_link,
.setattr = debugfs_setattr,
};
static struct inode *debugfs_get_inode(struct super_block *sb)
{
struct inode *inode = new_inode(sb);
if (inode) {
inode->i_ino = get_next_ino();
inode->i_atime = inode->i_mtime =
inode->i_ctime = current_time(inode);
}
return inode;
}
struct debugfs_mount_opts {
kuid_t uid;
kgid_t gid;
umode_t mode;
};
enum {
Opt_uid,
Opt_gid,
Opt_mode,
Opt_err
};
static const match_table_t tokens = {
{Opt_uid, "uid=%u"},
{Opt_gid, "gid=%u"},
{Opt_mode, "mode=%o"},
{Opt_err, NULL}
};
struct debugfs_fs_info {
struct debugfs_mount_opts mount_opts;
};
static int debugfs_parse_options(char *data, struct debugfs_mount_opts *opts)
{
substring_t args[MAX_OPT_ARGS];
int option;
int token;
kuid_t uid;
kgid_t gid;
char *p;
opts->mode = DEBUGFS_DEFAULT_MODE;
while ((p = strsep(&data, ",")) != NULL) {
if (!*p)
continue;
token = match_token(p, tokens, args);
switch (token) {
case Opt_uid:
if (match_int(&args[0], &option))
return -EINVAL;
uid = make_kuid(current_user_ns(), option);
if (!uid_valid(uid))
return -EINVAL;
opts->uid = uid;
break;
case Opt_gid:
if (match_int(&args[0], &option))
return -EINVAL;
gid = make_kgid(current_user_ns(), option);
if (!gid_valid(gid))
return -EINVAL;
opts->gid = gid;
break;
case Opt_mode:
if (match_octal(&args[0], &option))
return -EINVAL;
opts->mode = option & S_IALLUGO;
break;
/*
* We might like to report bad mount options here;
* but traditionally debugfs has ignored all mount options
*/
}
}
return 0;
}
static int debugfs_apply_options(struct super_block *sb)
{
struct debugfs_fs_info *fsi = sb->s_fs_info;
struct inode *inode = d_inode(sb->s_root);
struct debugfs_mount_opts *opts = &fsi->mount_opts;
inode->i_mode &= ~S_IALLUGO;
inode->i_mode |= opts->mode;
inode->i_uid = opts->uid;
inode->i_gid = opts->gid;
return 0;
}
static int debugfs_remount(struct super_block *sb, int *flags, char *data)
{
int err;
struct debugfs_fs_info *fsi = sb->s_fs_info;
fs: push sync_filesystem() down to the file system's remount_fs() Previously, the no-op "mount -o mount /dev/xxx" operation when the file system is already mounted read-write causes an implied, unconditional syncfs(). This seems pretty stupid, and it's certainly documented or guaraunteed to do this, nor is it particularly useful, except in the case where the file system was mounted rw and is getting remounted read-only. However, it's possible that there might be some file systems that are actually depending on this behavior. In most file systems, it's probably fine to only call sync_filesystem() when transitioning from read-write to read-only, and there are some file systems where this is not needed at all (for example, for a pseudo-filesystem or something like romfs). Signed-off-by: "Theodore Ts'o" <tytso@mit.edu> Cc: linux-fsdevel@vger.kernel.org Cc: Christoph Hellwig <hch@infradead.org> Cc: Artem Bityutskiy <dedekind1@gmail.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Evgeniy Dushistov <dushistov@mail.ru> Cc: Jan Kara <jack@suse.cz> Cc: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp> Cc: Anders Larsen <al@alarsen.net> Cc: Phillip Lougher <phillip@squashfs.org.uk> Cc: Kees Cook <keescook@chromium.org> Cc: Mikulas Patocka <mikulas@artax.karlin.mff.cuni.cz> Cc: Petr Vandrovec <petr@vandrovec.name> Cc: xfs@oss.sgi.com Cc: linux-btrfs@vger.kernel.org Cc: linux-cifs@vger.kernel.org Cc: samba-technical@lists.samba.org Cc: codalist@coda.cs.cmu.edu Cc: linux-ext4@vger.kernel.org Cc: linux-f2fs-devel@lists.sourceforge.net Cc: fuse-devel@lists.sourceforge.net Cc: cluster-devel@redhat.com Cc: linux-mtd@lists.infradead.org Cc: jfs-discussion@lists.sourceforge.net Cc: linux-nfs@vger.kernel.org Cc: linux-nilfs@vger.kernel.org Cc: linux-ntfs-dev@lists.sourceforge.net Cc: ocfs2-devel@oss.oracle.com Cc: reiserfs-devel@vger.kernel.org
2014-03-13 22:14:33 +08:00
sync_filesystem(sb);
err = debugfs_parse_options(data, &fsi->mount_opts);
if (err)
goto fail;
debugfs_apply_options(sb);
fail:
return err;
}
static int debugfs_show_options(struct seq_file *m, struct dentry *root)
{
struct debugfs_fs_info *fsi = root->d_sb->s_fs_info;
struct debugfs_mount_opts *opts = &fsi->mount_opts;
if (!uid_eq(opts->uid, GLOBAL_ROOT_UID))
seq_printf(m, ",uid=%u",
from_kuid_munged(&init_user_ns, opts->uid));
if (!gid_eq(opts->gid, GLOBAL_ROOT_GID))
seq_printf(m, ",gid=%u",
from_kgid_munged(&init_user_ns, opts->gid));
if (opts->mode != DEBUGFS_DEFAULT_MODE)
seq_printf(m, ",mode=%o", opts->mode);
return 0;
}
static void debugfs_free_inode(struct inode *inode)
{
if (S_ISLNK(inode->i_mode))
kfree(inode->i_link);
free_inode_nonrcu(inode);
}
static const struct super_operations debugfs_super_operations = {
.statfs = simple_statfs,
.remount_fs = debugfs_remount,
.show_options = debugfs_show_options,
.free_inode = debugfs_free_inode,
};
static void debugfs_release_dentry(struct dentry *dentry)
{
debugfs: defer debugfs_fsdata allocation to first usage Currently, __debugfs_create_file allocates one struct debugfs_fsdata instance for every file created. However, there are potentially many debugfs file around, most of which are never touched by userspace. Thus, defer the allocations to the first usage, i.e. to the first debugfs_file_get(). A dentry's ->d_fsdata starts out to point to the "real", user provided fops. After a debugfs_fsdata instance has been allocated (and the real fops pointer has been moved over into its ->real_fops member), ->d_fsdata is changed to point to it from then on. The two cases are distinguished by setting BIT(0) for the real fops case. struct debugfs_fsdata's foremost purpose is to track active users and to make debugfs_remove() block until they are done. Since no debugfs_fsdata instance means no active users, make debugfs_remove() return immediately in this case. Take care of possible races between debugfs_file_get() and debugfs_remove(): either debugfs_remove() must see a debugfs_fsdata instance and thus wait for possible active users or debugfs_file_get() must see a dead dentry and return immediately. Make a dentry's ->d_release(), i.e. debugfs_release_dentry(), check whether ->d_fsdata is actually a debugfs_fsdata instance before kfree()ing it. Similarly, make debugfs_real_fops() check whether ->d_fsdata is actually a debugfs_fsdata instance before returning it, otherwise emit a warning. The set of possible error codes returned from debugfs_file_get() has grown from -EIO to -EIO and -ENOMEM. Make open_proxy_open() and full_proxy_open() pass the -ENOMEM onwards to their callers. Signed-off-by: Nicolai Stange <nicstange@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2017-10-31 07:15:54 +08:00
void *fsd = dentry->d_fsdata;
if (!((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT))
kfree(dentry->d_fsdata);
}
static struct vfsmount *debugfs_automount(struct path *path)
{
fs: Better permission checking for submounts To support unprivileged users mounting filesystems two permission checks have to be performed: a test to see if the user allowed to create a mount in the mount namespace, and a test to see if the user is allowed to access the specified filesystem. The automount case is special in that mounting the original filesystem grants permission to mount the sub-filesystems, to any user who happens to stumble across the their mountpoint and satisfies the ordinary filesystem permission checks. Attempting to handle the automount case by using override_creds almost works. It preserves the idea that permission to mount the original filesystem is permission to mount the sub-filesystem. Unfortunately using override_creds messes up the filesystems ordinary permission checks. Solve this by being explicit that a mount is a submount by introducing vfs_submount, and using it where appropriate. vfs_submount uses a new mount internal mount flags MS_SUBMOUNT, to let sget and friends know that a mount is a submount so they can take appropriate action. sget and sget_userns are modified to not perform any permission checks on submounts. follow_automount is modified to stop using override_creds as that has proven problemantic. do_mount is modified to always remove the new MS_SUBMOUNT flag so that we know userspace will never by able to specify it. autofs4 is modified to stop using current_real_cred that was put in there to handle the previous version of submount permission checking. cifs is modified to pass the mountpoint all of the way down to vfs_submount. debugfs is modified to pass the mountpoint all of the way down to trace_automount by adding a new parameter. To make this change easier a new typedef debugfs_automount_t is introduced to capture the type of the debugfs automount function. Cc: stable@vger.kernel.org Fixes: 069d5ac9ae0d ("autofs: Fix automounts by using current_real_cred()->uid") Fixes: aeaa4a79ff6a ("fs: Call d_automount with the filesystems creds") Reviewed-by: Trond Myklebust <trond.myklebust@primarydata.com> Reviewed-by: Seth Forshee <seth.forshee@canonical.com> Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
2017-02-01 01:06:16 +08:00
debugfs_automount_t f;
f = (debugfs_automount_t)path->dentry->d_fsdata;
return f(path->dentry, d_inode(path->dentry)->i_private);
}
static const struct dentry_operations debugfs_dops = {
.d_delete = always_delete_dentry,
.d_release = debugfs_release_dentry,
.d_automount = debugfs_automount,
};
static int debug_fill_super(struct super_block *sb, void *data, int silent)
{
static const struct tree_descr debug_files[] = {{""}};
struct debugfs_fs_info *fsi;
int err;
fsi = kzalloc(sizeof(struct debugfs_fs_info), GFP_KERNEL);
sb->s_fs_info = fsi;
if (!fsi) {
err = -ENOMEM;
goto fail;
}
err = debugfs_parse_options(data, &fsi->mount_opts);
if (err)
goto fail;
err = simple_fill_super(sb, DEBUGFS_MAGIC, debug_files);
if (err)
goto fail;
sb->s_op = &debugfs_super_operations;
sb->s_d_op = &debugfs_dops;
debugfs_apply_options(sb);
return 0;
fail:
kfree(fsi);
sb->s_fs_info = NULL;
return err;
}
static struct dentry *debug_mount(struct file_system_type *fs_type,
[PATCH] VFS: Permit filesystem to override root dentry on mount Extend the get_sb() filesystem operation to take an extra argument that permits the VFS to pass in the target vfsmount that defines the mountpoint. The filesystem is then required to manually set the superblock and root dentry pointers. For most filesystems, this should be done with simple_set_mnt() which will set the superblock pointer and then set the root dentry to the superblock's s_root (as per the old default behaviour). The get_sb() op now returns an integer as there's now no need to return the superblock pointer. This patch permits a superblock to be implicitly shared amongst several mount points, such as can be done with NFS to avoid potential inode aliasing. In such a case, simple_set_mnt() would not be called, and instead the mnt_root and mnt_sb would be set directly. The patch also makes the following changes: (*) the get_sb_*() convenience functions in the core kernel now take a vfsmount pointer argument and return an integer, so most filesystems have to change very little. (*) If one of the convenience function is not used, then get_sb() should normally call simple_set_mnt() to instantiate the vfsmount. This will always return 0, and so can be tail-called from get_sb(). (*) generic_shutdown_super() now calls shrink_dcache_sb() to clean up the dcache upon superblock destruction rather than shrink_dcache_anon(). This is required because the superblock may now have multiple trees that aren't actually bound to s_root, but that still need to be cleaned up. The currently called functions assume that the whole tree is rooted at s_root, and that anonymous dentries are not the roots of trees which results in dentries being left unculled. However, with the way NFS superblock sharing are currently set to be implemented, these assumptions are violated: the root of the filesystem is simply a dummy dentry and inode (the real inode for '/' may well be inaccessible), and all the vfsmounts are rooted on anonymous[*] dentries with child trees. [*] Anonymous until discovered from another tree. (*) The documentation has been adjusted, including the additional bit of changing ext2_* into foo_* in the documentation. [akpm@osdl.org: convert ipath_fs, do other stuff] Signed-off-by: David Howells <dhowells@redhat.com> Acked-by: Al Viro <viro@zeniv.linux.org.uk> Cc: Nathan Scott <nathans@sgi.com> Cc: Roland Dreier <rolandd@cisco.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-23 17:02:57 +08:00
int flags, const char *dev_name,
void *data)
{
return mount_single(fs_type, flags, data, debug_fill_super);
}
static struct file_system_type debug_fs_type = {
.owner = THIS_MODULE,
.name = "debugfs",
.mount = debug_mount,
.kill_sb = kill_litter_super,
};
fs: Limit sys_mount to only request filesystem modules. Modify the request_module to prefix the file system type with "fs-" and add aliases to all of the filesystems that can be built as modules to match. A common practice is to build all of the kernel code and leave code that is not commonly needed as modules, with the result that many users are exposed to any bug anywhere in the kernel. Looking for filesystems with a fs- prefix limits the pool of possible modules that can be loaded by mount to just filesystems trivially making things safer with no real cost. Using aliases means user space can control the policy of which filesystem modules are auto-loaded by editing /etc/modprobe.d/*.conf with blacklist and alias directives. Allowing simple, safe, well understood work-arounds to known problematic software. This also addresses a rare but unfortunate problem where the filesystem name is not the same as it's module name and module auto-loading would not work. While writing this patch I saw a handful of such cases. The most significant being autofs that lives in the module autofs4. This is relevant to user namespaces because we can reach the request module in get_fs_type() without having any special permissions, and people get uncomfortable when a user specified string (in this case the filesystem type) goes all of the way to request_module. After having looked at this issue I don't think there is any particular reason to perform any filtering or permission checks beyond making it clear in the module request that we want a filesystem module. The common pattern in the kernel is to call request_module() without regards to the users permissions. In general all a filesystem module does once loaded is call register_filesystem() and go to sleep. Which means there is not much attack surface exposed by loading a filesytem module unless the filesystem is mounted. In a user namespace filesystems are not mounted unless .fs_flags = FS_USERNS_MOUNT, which most filesystems do not set today. Acked-by: Serge Hallyn <serge.hallyn@canonical.com> Acked-by: Kees Cook <keescook@chromium.org> Reported-by: Kees Cook <keescook@google.com> Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
2013-03-03 11:39:14 +08:00
MODULE_ALIAS_FS("debugfs");
/**
* debugfs_lookup() - look up an existing debugfs file
* @name: a pointer to a string containing the name of the file to look up.
* @parent: a pointer to the parent dentry of the file.
*
* This function will return a pointer to a dentry if it succeeds. If the file
* doesn't exist or an error occurs, %NULL will be returned. The returned
* dentry must be passed to dput() when it is no longer needed.
*
* If debugfs is not enabled in the kernel, the value -%ENODEV will be
* returned.
*/
struct dentry *debugfs_lookup(const char *name, struct dentry *parent)
{
struct dentry *dentry;
if (IS_ERR(parent))
return NULL;
if (!parent)
parent = debugfs_mount->mnt_root;
dentry = lookup_one_len_unlocked(name, parent, strlen(name));
if (IS_ERR(dentry))
return NULL;
if (!d_really_is_positive(dentry)) {
dput(dentry);
return NULL;
}
return dentry;
}
EXPORT_SYMBOL_GPL(debugfs_lookup);
static struct dentry *start_creating(const char *name, struct dentry *parent)
{
struct dentry *dentry;
int error;
pr_debug("creating file '%s'\n", name);
if (IS_ERR(parent))
return parent;
error = simple_pin_fs(&debug_fs_type, &debugfs_mount,
&debugfs_mount_count);
if (error) {
pr_err("Unable to pin filesystem for file '%s'\n", name);
return ERR_PTR(error);
}
/* If the parent is not specified, we create it in the root.
* We need the root dentry to do this, which is in the super
* block. A pointer to that is in the struct vfsmount that we
* have around.
*/
if (!parent)
parent = debugfs_mount->mnt_root;
inode_lock(d_inode(parent));
dentry = lookup_one_len(name, parent, strlen(name));
if (!IS_ERR(dentry) && d_really_is_positive(dentry)) {
if (d_is_dir(dentry))
pr_err("Directory '%s' with parent '%s' already present!\n",
name, parent->d_name.name);
else
pr_err("File '%s' in directory '%s' already present!\n",
name, parent->d_name.name);
dput(dentry);
dentry = ERR_PTR(-EEXIST);
}
if (IS_ERR(dentry)) {
inode_unlock(d_inode(parent));
simple_release_fs(&debugfs_mount, &debugfs_mount_count);
}
return dentry;
}
static struct dentry *failed_creating(struct dentry *dentry)
{
inode_unlock(d_inode(dentry->d_parent));
dput(dentry);
simple_release_fs(&debugfs_mount, &debugfs_mount_count);
return ERR_PTR(-ENOMEM);
}
static struct dentry *end_creating(struct dentry *dentry)
{
inode_unlock(d_inode(dentry->d_parent));
return dentry;
}
debugfs: prevent access to removed files' private data Upon return of debugfs_remove()/debugfs_remove_recursive(), it might still be attempted to access associated private file data through previously opened struct file objects. If that data has been freed by the caller of debugfs_remove*() in the meanwhile, the reading/writing process would either encounter a fault or, if the memory address in question has been reassigned again, unrelated data structures could get overwritten. However, since debugfs files are seldomly removed, usually from module exit handlers only, the impact is very low. Currently, there are ~1000 call sites of debugfs_create_file() spread throughout the whole tree and touching all of those struct file_operations in order to make them file removal aware by means of checking the result of debugfs_use_file_start() from within their methods is unfeasible. Instead, wrap the struct file_operations by a lifetime managing proxy at file open: - In debugfs_create_file(), the original fops handed in has got stashed away in ->d_fsdata already. - In debugfs_create_file(), install a proxy file_operations factory, debugfs_full_proxy_file_operations, at ->i_fop. This proxy factory has got an ->open() method only. It carries out some lifetime checks and if successful, dynamically allocates and sets up a new struct file_operations proxy at ->f_op. Afterwards, it forwards to the ->open() of the original struct file_operations in ->d_fsdata, if any. The dynamically set up proxy at ->f_op has got a lifetime managing wrapper set for each of the methods defined in the original struct file_operations in ->d_fsdata. Its ->release()er frees the proxy again and forwards to the original ->release(), if any. In order not to mislead the VFS layer, it is strictly necessary to leave those fields blank in the proxy that have been NULL in the original struct file_operations also, i.e. aren't supported. This is why there is a need for dynamically allocated proxies. The choice made not to allocate a proxy instance for every dentry at file creation, but for every struct file object instantiated thereof is justified by the expected usage pattern of debugfs, namely that in general very few files get opened more than once at a time. The wrapper methods set in the struct file_operations implement lifetime managing by means of the SRCU protection facilities already in place for debugfs: They set up a SRCU read side critical section and check whether the dentry is still alive by means of debugfs_use_file_start(). If so, they forward the call to the original struct file_operation stored in ->d_fsdata, still under the protection of the SRCU read side critical section. This SRCU read side critical section prevents any pending debugfs_remove() and friends to return to their callers. Since a file's private data must only be freed after the return of debugfs_remove(), the ongoing proxied call is guarded against any file removal race. If, on the other hand, the initial call to debugfs_use_file_start() detects that the dentry is dead, the wrapper simply returns -EIO and does not forward the call. Note that the ->poll() wrapper is special in that its signature does not allow for the return of arbitrary -EXXX values and thus, POLLHUP is returned here. In order not to pollute debugfs with wrapper definitions that aren't ever needed, I chose not to define a wrapper for every struct file_operations method possible. Instead, a wrapper is defined only for the subset of methods which are actually set by any debugfs users. Currently, these are: ->llseek() ->read() ->write() ->unlocked_ioctl() ->poll() The ->release() wrapper is special in that it does not protect the original ->release() in any way from dead files in order not to leak resources. Thus, any ->release() handed to debugfs must implement file lifetime management manually, if needed. For only 33 out of a total of 434 releasers handed in to debugfs, it could not be verified immediately whether they access data structures that might have been freed upon a debugfs_remove() return in the meanwhile. Export debugfs_use_file_start() and debugfs_use_file_finish() in order to allow any ->release() to manually implement file lifetime management. For a set of common cases of struct file_operations implemented by the debugfs_core itself, future patches will incorporate file lifetime management directly within those in order to allow for their unproxied operation. Rename the original, non-proxying "debugfs_create_file()" to "debugfs_create_file_unsafe()" and keep it for future internal use by debugfs itself. Factor out code common to both into the new __debugfs_create_file(). Signed-off-by: Nicolai Stange <nicstange@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2016-03-22 21:11:14 +08:00
static struct dentry *__debugfs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *proxy_fops,
const struct file_operations *real_fops)
{
struct dentry *dentry;
struct inode *inode;
if (!(mode & S_IFMT))
mode |= S_IFREG;
BUG_ON(!S_ISREG(mode));
dentry = start_creating(name, parent);
debugfs: defer debugfs_fsdata allocation to first usage Currently, __debugfs_create_file allocates one struct debugfs_fsdata instance for every file created. However, there are potentially many debugfs file around, most of which are never touched by userspace. Thus, defer the allocations to the first usage, i.e. to the first debugfs_file_get(). A dentry's ->d_fsdata starts out to point to the "real", user provided fops. After a debugfs_fsdata instance has been allocated (and the real fops pointer has been moved over into its ->real_fops member), ->d_fsdata is changed to point to it from then on. The two cases are distinguished by setting BIT(0) for the real fops case. struct debugfs_fsdata's foremost purpose is to track active users and to make debugfs_remove() block until they are done. Since no debugfs_fsdata instance means no active users, make debugfs_remove() return immediately in this case. Take care of possible races between debugfs_file_get() and debugfs_remove(): either debugfs_remove() must see a debugfs_fsdata instance and thus wait for possible active users or debugfs_file_get() must see a dead dentry and return immediately. Make a dentry's ->d_release(), i.e. debugfs_release_dentry(), check whether ->d_fsdata is actually a debugfs_fsdata instance before kfree()ing it. Similarly, make debugfs_real_fops() check whether ->d_fsdata is actually a debugfs_fsdata instance before returning it, otherwise emit a warning. The set of possible error codes returned from debugfs_file_get() has grown from -EIO to -EIO and -ENOMEM. Make open_proxy_open() and full_proxy_open() pass the -ENOMEM onwards to their callers. Signed-off-by: Nicolai Stange <nicstange@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2017-10-31 07:15:54 +08:00
if (IS_ERR(dentry))
return dentry;
debugfs: prevent access to removed files' private data Upon return of debugfs_remove()/debugfs_remove_recursive(), it might still be attempted to access associated private file data through previously opened struct file objects. If that data has been freed by the caller of debugfs_remove*() in the meanwhile, the reading/writing process would either encounter a fault or, if the memory address in question has been reassigned again, unrelated data structures could get overwritten. However, since debugfs files are seldomly removed, usually from module exit handlers only, the impact is very low. Currently, there are ~1000 call sites of debugfs_create_file() spread throughout the whole tree and touching all of those struct file_operations in order to make them file removal aware by means of checking the result of debugfs_use_file_start() from within their methods is unfeasible. Instead, wrap the struct file_operations by a lifetime managing proxy at file open: - In debugfs_create_file(), the original fops handed in has got stashed away in ->d_fsdata already. - In debugfs_create_file(), install a proxy file_operations factory, debugfs_full_proxy_file_operations, at ->i_fop. This proxy factory has got an ->open() method only. It carries out some lifetime checks and if successful, dynamically allocates and sets up a new struct file_operations proxy at ->f_op. Afterwards, it forwards to the ->open() of the original struct file_operations in ->d_fsdata, if any. The dynamically set up proxy at ->f_op has got a lifetime managing wrapper set for each of the methods defined in the original struct file_operations in ->d_fsdata. Its ->release()er frees the proxy again and forwards to the original ->release(), if any. In order not to mislead the VFS layer, it is strictly necessary to leave those fields blank in the proxy that have been NULL in the original struct file_operations also, i.e. aren't supported. This is why there is a need for dynamically allocated proxies. The choice made not to allocate a proxy instance for every dentry at file creation, but for every struct file object instantiated thereof is justified by the expected usage pattern of debugfs, namely that in general very few files get opened more than once at a time. The wrapper methods set in the struct file_operations implement lifetime managing by means of the SRCU protection facilities already in place for debugfs: They set up a SRCU read side critical section and check whether the dentry is still alive by means of debugfs_use_file_start(). If so, they forward the call to the original struct file_operation stored in ->d_fsdata, still under the protection of the SRCU read side critical section. This SRCU read side critical section prevents any pending debugfs_remove() and friends to return to their callers. Since a file's private data must only be freed after the return of debugfs_remove(), the ongoing proxied call is guarded against any file removal race. If, on the other hand, the initial call to debugfs_use_file_start() detects that the dentry is dead, the wrapper simply returns -EIO and does not forward the call. Note that the ->poll() wrapper is special in that its signature does not allow for the return of arbitrary -EXXX values and thus, POLLHUP is returned here. In order not to pollute debugfs with wrapper definitions that aren't ever needed, I chose not to define a wrapper for every struct file_operations method possible. Instead, a wrapper is defined only for the subset of methods which are actually set by any debugfs users. Currently, these are: ->llseek() ->read() ->write() ->unlocked_ioctl() ->poll() The ->release() wrapper is special in that it does not protect the original ->release() in any way from dead files in order not to leak resources. Thus, any ->release() handed to debugfs must implement file lifetime management manually, if needed. For only 33 out of a total of 434 releasers handed in to debugfs, it could not be verified immediately whether they access data structures that might have been freed upon a debugfs_remove() return in the meanwhile. Export debugfs_use_file_start() and debugfs_use_file_finish() in order to allow any ->release() to manually implement file lifetime management. For a set of common cases of struct file_operations implemented by the debugfs_core itself, future patches will incorporate file lifetime management directly within those in order to allow for their unproxied operation. Rename the original, non-proxying "debugfs_create_file()" to "debugfs_create_file_unsafe()" and keep it for future internal use by debugfs itself. Factor out code common to both into the new __debugfs_create_file(). Signed-off-by: Nicolai Stange <nicstange@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2016-03-22 21:11:14 +08:00
inode = debugfs_get_inode(dentry->d_sb);
if (unlikely(!inode)) {
pr_err("out of free dentries, can not create file '%s'\n",
name);
debugfs: prevent access to removed files' private data Upon return of debugfs_remove()/debugfs_remove_recursive(), it might still be attempted to access associated private file data through previously opened struct file objects. If that data has been freed by the caller of debugfs_remove*() in the meanwhile, the reading/writing process would either encounter a fault or, if the memory address in question has been reassigned again, unrelated data structures could get overwritten. However, since debugfs files are seldomly removed, usually from module exit handlers only, the impact is very low. Currently, there are ~1000 call sites of debugfs_create_file() spread throughout the whole tree and touching all of those struct file_operations in order to make them file removal aware by means of checking the result of debugfs_use_file_start() from within their methods is unfeasible. Instead, wrap the struct file_operations by a lifetime managing proxy at file open: - In debugfs_create_file(), the original fops handed in has got stashed away in ->d_fsdata already. - In debugfs_create_file(), install a proxy file_operations factory, debugfs_full_proxy_file_operations, at ->i_fop. This proxy factory has got an ->open() method only. It carries out some lifetime checks and if successful, dynamically allocates and sets up a new struct file_operations proxy at ->f_op. Afterwards, it forwards to the ->open() of the original struct file_operations in ->d_fsdata, if any. The dynamically set up proxy at ->f_op has got a lifetime managing wrapper set for each of the methods defined in the original struct file_operations in ->d_fsdata. Its ->release()er frees the proxy again and forwards to the original ->release(), if any. In order not to mislead the VFS layer, it is strictly necessary to leave those fields blank in the proxy that have been NULL in the original struct file_operations also, i.e. aren't supported. This is why there is a need for dynamically allocated proxies. The choice made not to allocate a proxy instance for every dentry at file creation, but for every struct file object instantiated thereof is justified by the expected usage pattern of debugfs, namely that in general very few files get opened more than once at a time. The wrapper methods set in the struct file_operations implement lifetime managing by means of the SRCU protection facilities already in place for debugfs: They set up a SRCU read side critical section and check whether the dentry is still alive by means of debugfs_use_file_start(). If so, they forward the call to the original struct file_operation stored in ->d_fsdata, still under the protection of the SRCU read side critical section. This SRCU read side critical section prevents any pending debugfs_remove() and friends to return to their callers. Since a file's private data must only be freed after the return of debugfs_remove(), the ongoing proxied call is guarded against any file removal race. If, on the other hand, the initial call to debugfs_use_file_start() detects that the dentry is dead, the wrapper simply returns -EIO and does not forward the call. Note that the ->poll() wrapper is special in that its signature does not allow for the return of arbitrary -EXXX values and thus, POLLHUP is returned here. In order not to pollute debugfs with wrapper definitions that aren't ever needed, I chose not to define a wrapper for every struct file_operations method possible. Instead, a wrapper is defined only for the subset of methods which are actually set by any debugfs users. Currently, these are: ->llseek() ->read() ->write() ->unlocked_ioctl() ->poll() The ->release() wrapper is special in that it does not protect the original ->release() in any way from dead files in order not to leak resources. Thus, any ->release() handed to debugfs must implement file lifetime management manually, if needed. For only 33 out of a total of 434 releasers handed in to debugfs, it could not be verified immediately whether they access data structures that might have been freed upon a debugfs_remove() return in the meanwhile. Export debugfs_use_file_start() and debugfs_use_file_finish() in order to allow any ->release() to manually implement file lifetime management. For a set of common cases of struct file_operations implemented by the debugfs_core itself, future patches will incorporate file lifetime management directly within those in order to allow for their unproxied operation. Rename the original, non-proxying "debugfs_create_file()" to "debugfs_create_file_unsafe()" and keep it for future internal use by debugfs itself. Factor out code common to both into the new __debugfs_create_file(). Signed-off-by: Nicolai Stange <nicstange@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2016-03-22 21:11:14 +08:00
return failed_creating(dentry);
}
debugfs: prevent access to removed files' private data Upon return of debugfs_remove()/debugfs_remove_recursive(), it might still be attempted to access associated private file data through previously opened struct file objects. If that data has been freed by the caller of debugfs_remove*() in the meanwhile, the reading/writing process would either encounter a fault or, if the memory address in question has been reassigned again, unrelated data structures could get overwritten. However, since debugfs files are seldomly removed, usually from module exit handlers only, the impact is very low. Currently, there are ~1000 call sites of debugfs_create_file() spread throughout the whole tree and touching all of those struct file_operations in order to make them file removal aware by means of checking the result of debugfs_use_file_start() from within their methods is unfeasible. Instead, wrap the struct file_operations by a lifetime managing proxy at file open: - In debugfs_create_file(), the original fops handed in has got stashed away in ->d_fsdata already. - In debugfs_create_file(), install a proxy file_operations factory, debugfs_full_proxy_file_operations, at ->i_fop. This proxy factory has got an ->open() method only. It carries out some lifetime checks and if successful, dynamically allocates and sets up a new struct file_operations proxy at ->f_op. Afterwards, it forwards to the ->open() of the original struct file_operations in ->d_fsdata, if any. The dynamically set up proxy at ->f_op has got a lifetime managing wrapper set for each of the methods defined in the original struct file_operations in ->d_fsdata. Its ->release()er frees the proxy again and forwards to the original ->release(), if any. In order not to mislead the VFS layer, it is strictly necessary to leave those fields blank in the proxy that have been NULL in the original struct file_operations also, i.e. aren't supported. This is why there is a need for dynamically allocated proxies. The choice made not to allocate a proxy instance for every dentry at file creation, but for every struct file object instantiated thereof is justified by the expected usage pattern of debugfs, namely that in general very few files get opened more than once at a time. The wrapper methods set in the struct file_operations implement lifetime managing by means of the SRCU protection facilities already in place for debugfs: They set up a SRCU read side critical section and check whether the dentry is still alive by means of debugfs_use_file_start(). If so, they forward the call to the original struct file_operation stored in ->d_fsdata, still under the protection of the SRCU read side critical section. This SRCU read side critical section prevents any pending debugfs_remove() and friends to return to their callers. Since a file's private data must only be freed after the return of debugfs_remove(), the ongoing proxied call is guarded against any file removal race. If, on the other hand, the initial call to debugfs_use_file_start() detects that the dentry is dead, the wrapper simply returns -EIO and does not forward the call. Note that the ->poll() wrapper is special in that its signature does not allow for the return of arbitrary -EXXX values and thus, POLLHUP is returned here. In order not to pollute debugfs with wrapper definitions that aren't ever needed, I chose not to define a wrapper for every struct file_operations method possible. Instead, a wrapper is defined only for the subset of methods which are actually set by any debugfs users. Currently, these are: ->llseek() ->read() ->write() ->unlocked_ioctl() ->poll() The ->release() wrapper is special in that it does not protect the original ->release() in any way from dead files in order not to leak resources. Thus, any ->release() handed to debugfs must implement file lifetime management manually, if needed. For only 33 out of a total of 434 releasers handed in to debugfs, it could not be verified immediately whether they access data structures that might have been freed upon a debugfs_remove() return in the meanwhile. Export debugfs_use_file_start() and debugfs_use_file_finish() in order to allow any ->release() to manually implement file lifetime management. For a set of common cases of struct file_operations implemented by the debugfs_core itself, future patches will incorporate file lifetime management directly within those in order to allow for their unproxied operation. Rename the original, non-proxying "debugfs_create_file()" to "debugfs_create_file_unsafe()" and keep it for future internal use by debugfs itself. Factor out code common to both into the new __debugfs_create_file(). Signed-off-by: Nicolai Stange <nicstange@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2016-03-22 21:11:14 +08:00
inode->i_mode = mode;
inode->i_private = data;
debugfs: Restrict debugfs when the kernel is locked down Disallow opening of debugfs files that might be used to muck around when the kernel is locked down as various drivers give raw access to hardware through debugfs. Given the effort of auditing all 2000 or so files and manually fixing each one as necessary, I've chosen to apply a heuristic instead. The following changes are made: (1) chmod and chown are disallowed on debugfs objects (though the root dir can be modified by mount and remount, but I'm not worried about that). (2) When the kernel is locked down, only files with the following criteria are permitted to be opened: - The file must have mode 00444 - The file must not have ioctl methods - The file must not have mmap (3) When the kernel is locked down, files may only be opened for reading. Normal device interaction should be done through configfs, sysfs or a miscdev, not debugfs. Note that this makes it unnecessary to specifically lock down show_dsts(), show_devs() and show_call() in the asus-wmi driver. I would actually prefer to lock down all files by default and have the the files unlocked by the creator. This is tricky to manage correctly, though, as there are 19 creation functions and ~1600 call sites (some of them in loops scanning tables). Signed-off-by: David Howells <dhowells@redhat.com> cc: Andy Shevchenko <andy.shevchenko@gmail.com> cc: acpi4asus-user@lists.sourceforge.net cc: platform-driver-x86@vger.kernel.org cc: Matthew Garrett <mjg59@srcf.ucam.org> cc: Thomas Gleixner <tglx@linutronix.de> Cc: Greg KH <greg@kroah.com> Cc: Rafael J. Wysocki <rafael@kernel.org> Signed-off-by: Matthew Garrett <matthewgarrett@google.com> Signed-off-by: James Morris <jmorris@namei.org>
2019-08-20 08:18:02 +08:00
inode->i_op = &debugfs_file_inode_operations;
debugfs: prevent access to removed files' private data Upon return of debugfs_remove()/debugfs_remove_recursive(), it might still be attempted to access associated private file data through previously opened struct file objects. If that data has been freed by the caller of debugfs_remove*() in the meanwhile, the reading/writing process would either encounter a fault or, if the memory address in question has been reassigned again, unrelated data structures could get overwritten. However, since debugfs files are seldomly removed, usually from module exit handlers only, the impact is very low. Currently, there are ~1000 call sites of debugfs_create_file() spread throughout the whole tree and touching all of those struct file_operations in order to make them file removal aware by means of checking the result of debugfs_use_file_start() from within their methods is unfeasible. Instead, wrap the struct file_operations by a lifetime managing proxy at file open: - In debugfs_create_file(), the original fops handed in has got stashed away in ->d_fsdata already. - In debugfs_create_file(), install a proxy file_operations factory, debugfs_full_proxy_file_operations, at ->i_fop. This proxy factory has got an ->open() method only. It carries out some lifetime checks and if successful, dynamically allocates and sets up a new struct file_operations proxy at ->f_op. Afterwards, it forwards to the ->open() of the original struct file_operations in ->d_fsdata, if any. The dynamically set up proxy at ->f_op has got a lifetime managing wrapper set for each of the methods defined in the original struct file_operations in ->d_fsdata. Its ->release()er frees the proxy again and forwards to the original ->release(), if any. In order not to mislead the VFS layer, it is strictly necessary to leave those fields blank in the proxy that have been NULL in the original struct file_operations also, i.e. aren't supported. This is why there is a need for dynamically allocated proxies. The choice made not to allocate a proxy instance for every dentry at file creation, but for every struct file object instantiated thereof is justified by the expected usage pattern of debugfs, namely that in general very few files get opened more than once at a time. The wrapper methods set in the struct file_operations implement lifetime managing by means of the SRCU protection facilities already in place for debugfs: They set up a SRCU read side critical section and check whether the dentry is still alive by means of debugfs_use_file_start(). If so, they forward the call to the original struct file_operation stored in ->d_fsdata, still under the protection of the SRCU read side critical section. This SRCU read side critical section prevents any pending debugfs_remove() and friends to return to their callers. Since a file's private data must only be freed after the return of debugfs_remove(), the ongoing proxied call is guarded against any file removal race. If, on the other hand, the initial call to debugfs_use_file_start() detects that the dentry is dead, the wrapper simply returns -EIO and does not forward the call. Note that the ->poll() wrapper is special in that its signature does not allow for the return of arbitrary -EXXX values and thus, POLLHUP is returned here. In order not to pollute debugfs with wrapper definitions that aren't ever needed, I chose not to define a wrapper for every struct file_operations method possible. Instead, a wrapper is defined only for the subset of methods which are actually set by any debugfs users. Currently, these are: ->llseek() ->read() ->write() ->unlocked_ioctl() ->poll() The ->release() wrapper is special in that it does not protect the original ->release() in any way from dead files in order not to leak resources. Thus, any ->release() handed to debugfs must implement file lifetime management manually, if needed. For only 33 out of a total of 434 releasers handed in to debugfs, it could not be verified immediately whether they access data structures that might have been freed upon a debugfs_remove() return in the meanwhile. Export debugfs_use_file_start() and debugfs_use_file_finish() in order to allow any ->release() to manually implement file lifetime management. For a set of common cases of struct file_operations implemented by the debugfs_core itself, future patches will incorporate file lifetime management directly within those in order to allow for their unproxied operation. Rename the original, non-proxying "debugfs_create_file()" to "debugfs_create_file_unsafe()" and keep it for future internal use by debugfs itself. Factor out code common to both into the new __debugfs_create_file(). Signed-off-by: Nicolai Stange <nicstange@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2016-03-22 21:11:14 +08:00
inode->i_fop = proxy_fops;
debugfs: defer debugfs_fsdata allocation to first usage Currently, __debugfs_create_file allocates one struct debugfs_fsdata instance for every file created. However, there are potentially many debugfs file around, most of which are never touched by userspace. Thus, defer the allocations to the first usage, i.e. to the first debugfs_file_get(). A dentry's ->d_fsdata starts out to point to the "real", user provided fops. After a debugfs_fsdata instance has been allocated (and the real fops pointer has been moved over into its ->real_fops member), ->d_fsdata is changed to point to it from then on. The two cases are distinguished by setting BIT(0) for the real fops case. struct debugfs_fsdata's foremost purpose is to track active users and to make debugfs_remove() block until they are done. Since no debugfs_fsdata instance means no active users, make debugfs_remove() return immediately in this case. Take care of possible races between debugfs_file_get() and debugfs_remove(): either debugfs_remove() must see a debugfs_fsdata instance and thus wait for possible active users or debugfs_file_get() must see a dead dentry and return immediately. Make a dentry's ->d_release(), i.e. debugfs_release_dentry(), check whether ->d_fsdata is actually a debugfs_fsdata instance before kfree()ing it. Similarly, make debugfs_real_fops() check whether ->d_fsdata is actually a debugfs_fsdata instance before returning it, otherwise emit a warning. The set of possible error codes returned from debugfs_file_get() has grown from -EIO to -EIO and -ENOMEM. Make open_proxy_open() and full_proxy_open() pass the -ENOMEM onwards to their callers. Signed-off-by: Nicolai Stange <nicstange@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2017-10-31 07:15:54 +08:00
dentry->d_fsdata = (void *)((unsigned long)real_fops |
DEBUGFS_FSDATA_IS_REAL_FOPS_BIT);
debugfs: prevent access to removed files' private data Upon return of debugfs_remove()/debugfs_remove_recursive(), it might still be attempted to access associated private file data through previously opened struct file objects. If that data has been freed by the caller of debugfs_remove*() in the meanwhile, the reading/writing process would either encounter a fault or, if the memory address in question has been reassigned again, unrelated data structures could get overwritten. However, since debugfs files are seldomly removed, usually from module exit handlers only, the impact is very low. Currently, there are ~1000 call sites of debugfs_create_file() spread throughout the whole tree and touching all of those struct file_operations in order to make them file removal aware by means of checking the result of debugfs_use_file_start() from within their methods is unfeasible. Instead, wrap the struct file_operations by a lifetime managing proxy at file open: - In debugfs_create_file(), the original fops handed in has got stashed away in ->d_fsdata already. - In debugfs_create_file(), install a proxy file_operations factory, debugfs_full_proxy_file_operations, at ->i_fop. This proxy factory has got an ->open() method only. It carries out some lifetime checks and if successful, dynamically allocates and sets up a new struct file_operations proxy at ->f_op. Afterwards, it forwards to the ->open() of the original struct file_operations in ->d_fsdata, if any. The dynamically set up proxy at ->f_op has got a lifetime managing wrapper set for each of the methods defined in the original struct file_operations in ->d_fsdata. Its ->release()er frees the proxy again and forwards to the original ->release(), if any. In order not to mislead the VFS layer, it is strictly necessary to leave those fields blank in the proxy that have been NULL in the original struct file_operations also, i.e. aren't supported. This is why there is a need for dynamically allocated proxies. The choice made not to allocate a proxy instance for every dentry at file creation, but for every struct file object instantiated thereof is justified by the expected usage pattern of debugfs, namely that in general very few files get opened more than once at a time. The wrapper methods set in the struct file_operations implement lifetime managing by means of the SRCU protection facilities already in place for debugfs: They set up a SRCU read side critical section and check whether the dentry is still alive by means of debugfs_use_file_start(). If so, they forward the call to the original struct file_operation stored in ->d_fsdata, still under the protection of the SRCU read side critical section. This SRCU read side critical section prevents any pending debugfs_remove() and friends to return to their callers. Since a file's private data must only be freed after the return of debugfs_remove(), the ongoing proxied call is guarded against any file removal race. If, on the other hand, the initial call to debugfs_use_file_start() detects that the dentry is dead, the wrapper simply returns -EIO and does not forward the call. Note that the ->poll() wrapper is special in that its signature does not allow for the return of arbitrary -EXXX values and thus, POLLHUP is returned here. In order not to pollute debugfs with wrapper definitions that aren't ever needed, I chose not to define a wrapper for every struct file_operations method possible. Instead, a wrapper is defined only for the subset of methods which are actually set by any debugfs users. Currently, these are: ->llseek() ->read() ->write() ->unlocked_ioctl() ->poll() The ->release() wrapper is special in that it does not protect the original ->release() in any way from dead files in order not to leak resources. Thus, any ->release() handed to debugfs must implement file lifetime management manually, if needed. For only 33 out of a total of 434 releasers handed in to debugfs, it could not be verified immediately whether they access data structures that might have been freed upon a debugfs_remove() return in the meanwhile. Export debugfs_use_file_start() and debugfs_use_file_finish() in order to allow any ->release() to manually implement file lifetime management. For a set of common cases of struct file_operations implemented by the debugfs_core itself, future patches will incorporate file lifetime management directly within those in order to allow for their unproxied operation. Rename the original, non-proxying "debugfs_create_file()" to "debugfs_create_file_unsafe()" and keep it for future internal use by debugfs itself. Factor out code common to both into the new __debugfs_create_file(). Signed-off-by: Nicolai Stange <nicstange@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2016-03-22 21:11:14 +08:00
d_instantiate(dentry, inode);
fsnotify_create(d_inode(dentry->d_parent), dentry);
return end_creating(dentry);
}
/**
* debugfs_create_file - create a file in the debugfs filesystem
* @name: a pointer to a string containing the name of the file to create.
* @mode: the permission that the file should have.
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this parameter is NULL, then the
* file will be created in the root of the debugfs filesystem.
* @data: a pointer to something that the caller will want to get to later
* on. The inode.i_private pointer will point to this value on
* the open() call.
* @fops: a pointer to a struct file_operations that should be used for
* this file.
*
* This is the basic "create a file" function for debugfs. It allows for a
* wide range of flexibility in creating a file, or a directory (if you want
* to create a directory, the debugfs_create_dir() function is
* recommended to be used instead.)
*
* This function will return a pointer to a dentry if it succeeds. This
* pointer must be passed to the debugfs_remove() function when the file is
* to be removed (no automatic cleanup happens if your module is unloaded,
* you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be
* returned.
*
* If debugfs is not enabled in the kernel, the value -%ENODEV will be
* returned.
*/
struct dentry *debugfs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops)
{
debugfs: prevent access to removed files' private data Upon return of debugfs_remove()/debugfs_remove_recursive(), it might still be attempted to access associated private file data through previously opened struct file objects. If that data has been freed by the caller of debugfs_remove*() in the meanwhile, the reading/writing process would either encounter a fault or, if the memory address in question has been reassigned again, unrelated data structures could get overwritten. However, since debugfs files are seldomly removed, usually from module exit handlers only, the impact is very low. Currently, there are ~1000 call sites of debugfs_create_file() spread throughout the whole tree and touching all of those struct file_operations in order to make them file removal aware by means of checking the result of debugfs_use_file_start() from within their methods is unfeasible. Instead, wrap the struct file_operations by a lifetime managing proxy at file open: - In debugfs_create_file(), the original fops handed in has got stashed away in ->d_fsdata already. - In debugfs_create_file(), install a proxy file_operations factory, debugfs_full_proxy_file_operations, at ->i_fop. This proxy factory has got an ->open() method only. It carries out some lifetime checks and if successful, dynamically allocates and sets up a new struct file_operations proxy at ->f_op. Afterwards, it forwards to the ->open() of the original struct file_operations in ->d_fsdata, if any. The dynamically set up proxy at ->f_op has got a lifetime managing wrapper set for each of the methods defined in the original struct file_operations in ->d_fsdata. Its ->release()er frees the proxy again and forwards to the original ->release(), if any. In order not to mislead the VFS layer, it is strictly necessary to leave those fields blank in the proxy that have been NULL in the original struct file_operations also, i.e. aren't supported. This is why there is a need for dynamically allocated proxies. The choice made not to allocate a proxy instance for every dentry at file creation, but for every struct file object instantiated thereof is justified by the expected usage pattern of debugfs, namely that in general very few files get opened more than once at a time. The wrapper methods set in the struct file_operations implement lifetime managing by means of the SRCU protection facilities already in place for debugfs: They set up a SRCU read side critical section and check whether the dentry is still alive by means of debugfs_use_file_start(). If so, they forward the call to the original struct file_operation stored in ->d_fsdata, still under the protection of the SRCU read side critical section. This SRCU read side critical section prevents any pending debugfs_remove() and friends to return to their callers. Since a file's private data must only be freed after the return of debugfs_remove(), the ongoing proxied call is guarded against any file removal race. If, on the other hand, the initial call to debugfs_use_file_start() detects that the dentry is dead, the wrapper simply returns -EIO and does not forward the call. Note that the ->poll() wrapper is special in that its signature does not allow for the return of arbitrary -EXXX values and thus, POLLHUP is returned here. In order not to pollute debugfs with wrapper definitions that aren't ever needed, I chose not to define a wrapper for every struct file_operations method possible. Instead, a wrapper is defined only for the subset of methods which are actually set by any debugfs users. Currently, these are: ->llseek() ->read() ->write() ->unlocked_ioctl() ->poll() The ->release() wrapper is special in that it does not protect the original ->release() in any way from dead files in order not to leak resources. Thus, any ->release() handed to debugfs must implement file lifetime management manually, if needed. For only 33 out of a total of 434 releasers handed in to debugfs, it could not be verified immediately whether they access data structures that might have been freed upon a debugfs_remove() return in the meanwhile. Export debugfs_use_file_start() and debugfs_use_file_finish() in order to allow any ->release() to manually implement file lifetime management. For a set of common cases of struct file_operations implemented by the debugfs_core itself, future patches will incorporate file lifetime management directly within those in order to allow for their unproxied operation. Rename the original, non-proxying "debugfs_create_file()" to "debugfs_create_file_unsafe()" and keep it for future internal use by debugfs itself. Factor out code common to both into the new __debugfs_create_file(). Signed-off-by: Nicolai Stange <nicstange@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2016-03-22 21:11:14 +08:00
return __debugfs_create_file(name, mode, parent, data,
fops ? &debugfs_full_proxy_file_operations :
&debugfs_noop_file_operations,
fops);
}
EXPORT_SYMBOL_GPL(debugfs_create_file);
debugfs: prevent access to possibly dead file_operations at file open Nothing prevents a dentry found by path lookup before a return of __debugfs_remove() to actually get opened after that return. Now, after the return of __debugfs_remove(), there are no guarantees whatsoever regarding the memory the corresponding inode's file_operations object had been kept in. Since __debugfs_remove() is seldomly invoked, usually from module exit handlers only, the race is hard to trigger and the impact is very low. A discussion of the problem outlined above as well as a suggested solution can be found in the (sub-)thread rooted at http://lkml.kernel.org/g/20130401203445.GA20862@ZenIV.linux.org.uk ("Yet another pipe related oops.") Basically, Greg KH suggests to introduce an intermediate fops and Al Viro points out that a pointer to the original ones may be stored in ->d_fsdata. Follow this line of reasoning: - Add SRCU as a reverse dependency of DEBUG_FS. - Introduce a srcu_struct object for the debugfs subsystem. - In debugfs_create_file(), store a pointer to the original file_operations object in ->d_fsdata. - Make debugfs_remove() and debugfs_remove_recursive() wait for a SRCU grace period after the dentry has been delete()'d and before they return to their callers. - Introduce an intermediate file_operations object named "debugfs_open_proxy_file_operations". It's ->open() functions checks, under the protection of a SRCU read lock, whether the dentry is still alive, i.e. has not been d_delete()'d and if so, tries to acquire a reference on the owning module. On success, it sets the file object's ->f_op to the original file_operations and forwards the ongoing open() call to the original ->open(). - For clarity, rename the former debugfs_file_operations to debugfs_noop_file_operations -- they are in no way canonical. The choice of SRCU over "normal" RCU is justified by the fact, that the former may also be used to protect ->i_private data from going away during the execution of a file's readers and writers which may (and do) sleep. Finally, introduce the fs/debugfs/internal.h header containing some declarations internal to the debugfs implementation. Signed-off-by: Nicolai Stange <nicstange@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2016-03-22 21:11:13 +08:00
/**
* debugfs_create_file_unsafe - create a file in the debugfs filesystem
* @name: a pointer to a string containing the name of the file to create.
* @mode: the permission that the file should have.
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this parameter is NULL, then the
* file will be created in the root of the debugfs filesystem.
* @data: a pointer to something that the caller will want to get to later
* on. The inode.i_private pointer will point to this value on
* the open() call.
* @fops: a pointer to a struct file_operations that should be used for
* this file.
*
* debugfs_create_file_unsafe() is completely analogous to
* debugfs_create_file(), the only difference being that the fops
* handed it will not get protected against file removals by the
* debugfs core.
*
* It is your responsibility to protect your struct file_operation
* methods against file removals by means of debugfs_file_get()
* and debugfs_file_put(). ->open() is still protected by
* debugfs though.
*
* Any struct file_operations defined by means of
* DEFINE_DEBUGFS_ATTRIBUTE() is protected against file removals and
* thus, may be used here.
*/
debugfs: prevent access to removed files' private data Upon return of debugfs_remove()/debugfs_remove_recursive(), it might still be attempted to access associated private file data through previously opened struct file objects. If that data has been freed by the caller of debugfs_remove*() in the meanwhile, the reading/writing process would either encounter a fault or, if the memory address in question has been reassigned again, unrelated data structures could get overwritten. However, since debugfs files are seldomly removed, usually from module exit handlers only, the impact is very low. Currently, there are ~1000 call sites of debugfs_create_file() spread throughout the whole tree and touching all of those struct file_operations in order to make them file removal aware by means of checking the result of debugfs_use_file_start() from within their methods is unfeasible. Instead, wrap the struct file_operations by a lifetime managing proxy at file open: - In debugfs_create_file(), the original fops handed in has got stashed away in ->d_fsdata already. - In debugfs_create_file(), install a proxy file_operations factory, debugfs_full_proxy_file_operations, at ->i_fop. This proxy factory has got an ->open() method only. It carries out some lifetime checks and if successful, dynamically allocates and sets up a new struct file_operations proxy at ->f_op. Afterwards, it forwards to the ->open() of the original struct file_operations in ->d_fsdata, if any. The dynamically set up proxy at ->f_op has got a lifetime managing wrapper set for each of the methods defined in the original struct file_operations in ->d_fsdata. Its ->release()er frees the proxy again and forwards to the original ->release(), if any. In order not to mislead the VFS layer, it is strictly necessary to leave those fields blank in the proxy that have been NULL in the original struct file_operations also, i.e. aren't supported. This is why there is a need for dynamically allocated proxies. The choice made not to allocate a proxy instance for every dentry at file creation, but for every struct file object instantiated thereof is justified by the expected usage pattern of debugfs, namely that in general very few files get opened more than once at a time. The wrapper methods set in the struct file_operations implement lifetime managing by means of the SRCU protection facilities already in place for debugfs: They set up a SRCU read side critical section and check whether the dentry is still alive by means of debugfs_use_file_start(). If so, they forward the call to the original struct file_operation stored in ->d_fsdata, still under the protection of the SRCU read side critical section. This SRCU read side critical section prevents any pending debugfs_remove() and friends to return to their callers. Since a file's private data must only be freed after the return of debugfs_remove(), the ongoing proxied call is guarded against any file removal race. If, on the other hand, the initial call to debugfs_use_file_start() detects that the dentry is dead, the wrapper simply returns -EIO and does not forward the call. Note that the ->poll() wrapper is special in that its signature does not allow for the return of arbitrary -EXXX values and thus, POLLHUP is returned here. In order not to pollute debugfs with wrapper definitions that aren't ever needed, I chose not to define a wrapper for every struct file_operations method possible. Instead, a wrapper is defined only for the subset of methods which are actually set by any debugfs users. Currently, these are: ->llseek() ->read() ->write() ->unlocked_ioctl() ->poll() The ->release() wrapper is special in that it does not protect the original ->release() in any way from dead files in order not to leak resources. Thus, any ->release() handed to debugfs must implement file lifetime management manually, if needed. For only 33 out of a total of 434 releasers handed in to debugfs, it could not be verified immediately whether they access data structures that might have been freed upon a debugfs_remove() return in the meanwhile. Export debugfs_use_file_start() and debugfs_use_file_finish() in order to allow any ->release() to manually implement file lifetime management. For a set of common cases of struct file_operations implemented by the debugfs_core itself, future patches will incorporate file lifetime management directly within those in order to allow for their unproxied operation. Rename the original, non-proxying "debugfs_create_file()" to "debugfs_create_file_unsafe()" and keep it for future internal use by debugfs itself. Factor out code common to both into the new __debugfs_create_file(). Signed-off-by: Nicolai Stange <nicstange@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2016-03-22 21:11:14 +08:00
struct dentry *debugfs_create_file_unsafe(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops)
{
debugfs: prevent access to possibly dead file_operations at file open Nothing prevents a dentry found by path lookup before a return of __debugfs_remove() to actually get opened after that return. Now, after the return of __debugfs_remove(), there are no guarantees whatsoever regarding the memory the corresponding inode's file_operations object had been kept in. Since __debugfs_remove() is seldomly invoked, usually from module exit handlers only, the race is hard to trigger and the impact is very low. A discussion of the problem outlined above as well as a suggested solution can be found in the (sub-)thread rooted at http://lkml.kernel.org/g/20130401203445.GA20862@ZenIV.linux.org.uk ("Yet another pipe related oops.") Basically, Greg KH suggests to introduce an intermediate fops and Al Viro points out that a pointer to the original ones may be stored in ->d_fsdata. Follow this line of reasoning: - Add SRCU as a reverse dependency of DEBUG_FS. - Introduce a srcu_struct object for the debugfs subsystem. - In debugfs_create_file(), store a pointer to the original file_operations object in ->d_fsdata. - Make debugfs_remove() and debugfs_remove_recursive() wait for a SRCU grace period after the dentry has been delete()'d and before they return to their callers. - Introduce an intermediate file_operations object named "debugfs_open_proxy_file_operations". It's ->open() functions checks, under the protection of a SRCU read lock, whether the dentry is still alive, i.e. has not been d_delete()'d and if so, tries to acquire a reference on the owning module. On success, it sets the file object's ->f_op to the original file_operations and forwards the ongoing open() call to the original ->open(). - For clarity, rename the former debugfs_file_operations to debugfs_noop_file_operations -- they are in no way canonical. The choice of SRCU over "normal" RCU is justified by the fact, that the former may also be used to protect ->i_private data from going away during the execution of a file's readers and writers which may (and do) sleep. Finally, introduce the fs/debugfs/internal.h header containing some declarations internal to the debugfs implementation. Signed-off-by: Nicolai Stange <nicstange@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2016-03-22 21:11:13 +08:00
debugfs: prevent access to removed files' private data Upon return of debugfs_remove()/debugfs_remove_recursive(), it might still be attempted to access associated private file data through previously opened struct file objects. If that data has been freed by the caller of debugfs_remove*() in the meanwhile, the reading/writing process would either encounter a fault or, if the memory address in question has been reassigned again, unrelated data structures could get overwritten. However, since debugfs files are seldomly removed, usually from module exit handlers only, the impact is very low. Currently, there are ~1000 call sites of debugfs_create_file() spread throughout the whole tree and touching all of those struct file_operations in order to make them file removal aware by means of checking the result of debugfs_use_file_start() from within their methods is unfeasible. Instead, wrap the struct file_operations by a lifetime managing proxy at file open: - In debugfs_create_file(), the original fops handed in has got stashed away in ->d_fsdata already. - In debugfs_create_file(), install a proxy file_operations factory, debugfs_full_proxy_file_operations, at ->i_fop. This proxy factory has got an ->open() method only. It carries out some lifetime checks and if successful, dynamically allocates and sets up a new struct file_operations proxy at ->f_op. Afterwards, it forwards to the ->open() of the original struct file_operations in ->d_fsdata, if any. The dynamically set up proxy at ->f_op has got a lifetime managing wrapper set for each of the methods defined in the original struct file_operations in ->d_fsdata. Its ->release()er frees the proxy again and forwards to the original ->release(), if any. In order not to mislead the VFS layer, it is strictly necessary to leave those fields blank in the proxy that have been NULL in the original struct file_operations also, i.e. aren't supported. This is why there is a need for dynamically allocated proxies. The choice made not to allocate a proxy instance for every dentry at file creation, but for every struct file object instantiated thereof is justified by the expected usage pattern of debugfs, namely that in general very few files get opened more than once at a time. The wrapper methods set in the struct file_operations implement lifetime managing by means of the SRCU protection facilities already in place for debugfs: They set up a SRCU read side critical section and check whether the dentry is still alive by means of debugfs_use_file_start(). If so, they forward the call to the original struct file_operation stored in ->d_fsdata, still under the protection of the SRCU read side critical section. This SRCU read side critical section prevents any pending debugfs_remove() and friends to return to their callers. Since a file's private data must only be freed after the return of debugfs_remove(), the ongoing proxied call is guarded against any file removal race. If, on the other hand, the initial call to debugfs_use_file_start() detects that the dentry is dead, the wrapper simply returns -EIO and does not forward the call. Note that the ->poll() wrapper is special in that its signature does not allow for the return of arbitrary -EXXX values and thus, POLLHUP is returned here. In order not to pollute debugfs with wrapper definitions that aren't ever needed, I chose not to define a wrapper for every struct file_operations method possible. Instead, a wrapper is defined only for the subset of methods which are actually set by any debugfs users. Currently, these are: ->llseek() ->read() ->write() ->unlocked_ioctl() ->poll() The ->release() wrapper is special in that it does not protect the original ->release() in any way from dead files in order not to leak resources. Thus, any ->release() handed to debugfs must implement file lifetime management manually, if needed. For only 33 out of a total of 434 releasers handed in to debugfs, it could not be verified immediately whether they access data structures that might have been freed upon a debugfs_remove() return in the meanwhile. Export debugfs_use_file_start() and debugfs_use_file_finish() in order to allow any ->release() to manually implement file lifetime management. For a set of common cases of struct file_operations implemented by the debugfs_core itself, future patches will incorporate file lifetime management directly within those in order to allow for their unproxied operation. Rename the original, non-proxying "debugfs_create_file()" to "debugfs_create_file_unsafe()" and keep it for future internal use by debugfs itself. Factor out code common to both into the new __debugfs_create_file(). Signed-off-by: Nicolai Stange <nicstange@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2016-03-22 21:11:14 +08:00
return __debugfs_create_file(name, mode, parent, data,
fops ? &debugfs_open_proxy_file_operations :
&debugfs_noop_file_operations,
fops);
}
EXPORT_SYMBOL_GPL(debugfs_create_file_unsafe);
/**
* debugfs_create_file_size - create a file in the debugfs filesystem
* @name: a pointer to a string containing the name of the file to create.
* @mode: the permission that the file should have.
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this parameter is NULL, then the
* file will be created in the root of the debugfs filesystem.
* @data: a pointer to something that the caller will want to get to later
* on. The inode.i_private pointer will point to this value on
* the open() call.
* @fops: a pointer to a struct file_operations that should be used for
* this file.
* @file_size: initial file size
*
* This is the basic "create a file" function for debugfs. It allows for a
* wide range of flexibility in creating a file, or a directory (if you want
* to create a directory, the debugfs_create_dir() function is
* recommended to be used instead.)
*
* This function will return a pointer to a dentry if it succeeds. This
* pointer must be passed to the debugfs_remove() function when the file is
* to be removed (no automatic cleanup happens if your module is unloaded,
* you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be
* returned.
*
* If debugfs is not enabled in the kernel, the value -%ENODEV will be
* returned.
*/
struct dentry *debugfs_create_file_size(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops,
loff_t file_size)
{
struct dentry *de = debugfs_create_file(name, mode, parent, data, fops);
if (de)
d_inode(de)->i_size = file_size;
return de;
}
EXPORT_SYMBOL_GPL(debugfs_create_file_size);
/**
* debugfs_create_dir - create a directory in the debugfs filesystem
* @name: a pointer to a string containing the name of the directory to
* create.
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this parameter is NULL, then the
* directory will be created in the root of the debugfs filesystem.
*
* This function creates a directory in debugfs with the given name.
*
* This function will return a pointer to a dentry if it succeeds. This
* pointer must be passed to the debugfs_remove() function when the file is
* to be removed (no automatic cleanup happens if your module is unloaded,
* you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be
* returned.
*
* If debugfs is not enabled in the kernel, the value -%ENODEV will be
* returned.
*/
struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
{
struct dentry *dentry = start_creating(name, parent);
struct inode *inode;
if (IS_ERR(dentry))
return dentry;
inode = debugfs_get_inode(dentry->d_sb);
if (unlikely(!inode)) {
pr_err("out of free dentries, can not create directory '%s'\n",
name);
return failed_creating(dentry);
}
inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
debugfs: Restrict debugfs when the kernel is locked down Disallow opening of debugfs files that might be used to muck around when the kernel is locked down as various drivers give raw access to hardware through debugfs. Given the effort of auditing all 2000 or so files and manually fixing each one as necessary, I've chosen to apply a heuristic instead. The following changes are made: (1) chmod and chown are disallowed on debugfs objects (though the root dir can be modified by mount and remount, but I'm not worried about that). (2) When the kernel is locked down, only files with the following criteria are permitted to be opened: - The file must have mode 00444 - The file must not have ioctl methods - The file must not have mmap (3) When the kernel is locked down, files may only be opened for reading. Normal device interaction should be done through configfs, sysfs or a miscdev, not debugfs. Note that this makes it unnecessary to specifically lock down show_dsts(), show_devs() and show_call() in the asus-wmi driver. I would actually prefer to lock down all files by default and have the the files unlocked by the creator. This is tricky to manage correctly, though, as there are 19 creation functions and ~1600 call sites (some of them in loops scanning tables). Signed-off-by: David Howells <dhowells@redhat.com> cc: Andy Shevchenko <andy.shevchenko@gmail.com> cc: acpi4asus-user@lists.sourceforge.net cc: platform-driver-x86@vger.kernel.org cc: Matthew Garrett <mjg59@srcf.ucam.org> cc: Thomas Gleixner <tglx@linutronix.de> Cc: Greg KH <greg@kroah.com> Cc: Rafael J. Wysocki <rafael@kernel.org> Signed-off-by: Matthew Garrett <matthewgarrett@google.com> Signed-off-by: James Morris <jmorris@namei.org>
2019-08-20 08:18:02 +08:00
inode->i_op = &debugfs_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
/* directory inodes start off with i_nlink == 2 (for "." entry) */
inc_nlink(inode);
d_instantiate(dentry, inode);
inc_nlink(d_inode(dentry->d_parent));
fsnotify_mkdir(d_inode(dentry->d_parent), dentry);
return end_creating(dentry);
}
EXPORT_SYMBOL_GPL(debugfs_create_dir);
/**
* debugfs_create_automount - create automount point in the debugfs filesystem
* @name: a pointer to a string containing the name of the file to create.
* @parent: a pointer to the parent dentry for this file. This should be a
* directory dentry if set. If this parameter is NULL, then the
* file will be created in the root of the debugfs filesystem.
* @f: function to be called when pathname resolution steps on that one.
* @data: opaque argument to pass to f().
*
* @f should return what ->d_automount() would.
*/
struct dentry *debugfs_create_automount(const char *name,
struct dentry *parent,
fs: Better permission checking for submounts To support unprivileged users mounting filesystems two permission checks have to be performed: a test to see if the user allowed to create a mount in the mount namespace, and a test to see if the user is allowed to access the specified filesystem. The automount case is special in that mounting the original filesystem grants permission to mount the sub-filesystems, to any user who happens to stumble across the their mountpoint and satisfies the ordinary filesystem permission checks. Attempting to handle the automount case by using override_creds almost works. It preserves the idea that permission to mount the original filesystem is permission to mount the sub-filesystem. Unfortunately using override_creds messes up the filesystems ordinary permission checks. Solve this by being explicit that a mount is a submount by introducing vfs_submount, and using it where appropriate. vfs_submount uses a new mount internal mount flags MS_SUBMOUNT, to let sget and friends know that a mount is a submount so they can take appropriate action. sget and sget_userns are modified to not perform any permission checks on submounts. follow_automount is modified to stop using override_creds as that has proven problemantic. do_mount is modified to always remove the new MS_SUBMOUNT flag so that we know userspace will never by able to specify it. autofs4 is modified to stop using current_real_cred that was put in there to handle the previous version of submount permission checking. cifs is modified to pass the mountpoint all of the way down to vfs_submount. debugfs is modified to pass the mountpoint all of the way down to trace_automount by adding a new parameter. To make this change easier a new typedef debugfs_automount_t is introduced to capture the type of the debugfs automount function. Cc: stable@vger.kernel.org Fixes: 069d5ac9ae0d ("autofs: Fix automounts by using current_real_cred()->uid") Fixes: aeaa4a79ff6a ("fs: Call d_automount with the filesystems creds") Reviewed-by: Trond Myklebust <trond.myklebust@primarydata.com> Reviewed-by: Seth Forshee <seth.forshee@canonical.com> Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
2017-02-01 01:06:16 +08:00
debugfs_automount_t f,
void *data)
{
struct dentry *dentry = start_creating(name, parent);
struct inode *inode;
if (IS_ERR(dentry))
return dentry;
inode = debugfs_get_inode(dentry->d_sb);
if (unlikely(!inode)) {
pr_err("out of free dentries, can not create automount '%s'\n",
name);
return failed_creating(dentry);
}
make_empty_dir_inode(inode);
inode->i_flags |= S_AUTOMOUNT;
inode->i_private = data;
dentry->d_fsdata = (void *)f;
debugfs: fix inode i_nlink references for automount dentry Directory inodes should start off with i_nlink == 2 (one extra ref for "." entry). debugfs_create_automount() increases neither the i_nlink reference for current inode nor for parent inode. On attempt to remove the automount dentry, kernel complains: [ 86.288070] WARNING: CPU: 1 PID: 3616 at fs/inode.c:273 drop_nlink+0x3e/0x50() [ 86.288461] Modules linked in: debugfs_example2(O-) [ 86.288745] CPU: 1 PID: 3616 Comm: rmmod Tainted: G O 4.4.0-rc3-next-20151207+ #135 [ 86.289197] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.8.2-20150617_082717-anatol 04/01/2014 [ 86.289696] ffffffff81be05c9 ffff8800b9e6fda0 ffffffff81352e2c 0000000000000000 [ 86.290110] ffff8800b9e6fdd8 ffffffff81065142 ffff8801399175e8 ffff8800bb78b240 [ 86.290507] ffff8801399175e8 ffff8800b73d7898 ffff8800b73d7840 ffff8800b9e6fde8 [ 86.290933] Call Trace: [ 86.291080] [<ffffffff81352e2c>] dump_stack+0x4e/0x82 [ 86.291340] [<ffffffff81065142>] warn_slowpath_common+0x82/0xc0 [ 86.291640] [<ffffffff8106523a>] warn_slowpath_null+0x1a/0x20 [ 86.291932] [<ffffffff811ae62e>] drop_nlink+0x3e/0x50 [ 86.292208] [<ffffffff811ba35b>] simple_unlink+0x4b/0x60 [ 86.292481] [<ffffffff811ba3a7>] simple_rmdir+0x37/0x50 [ 86.292748] [<ffffffff812d9808>] __debugfs_remove.part.16+0xa8/0xd0 [ 86.293082] [<ffffffff812d9a0b>] debugfs_remove_recursive+0xdb/0x1c0 [ 86.293406] [<ffffffffa00004dd>] cleanup_module+0x2d/0x3b [debugfs_example2] [ 86.293762] [<ffffffff810d959b>] SyS_delete_module+0x16b/0x220 [ 86.294077] [<ffffffff818ef857>] entry_SYSCALL_64_fastpath+0x12/0x6a [ 86.294405] ---[ end trace c9fc53353fe14a36 ]--- [ 86.294639] ------------[ cut here ]------------ To reproduce the issue it is enough to invoke these lines: autom = debugfs_create_automount("automount", NULL, vfsmount_cb, data); BUG_ON(IS_ERR_OR_NULL(autom)); debugfs_remove(autom); The issue is fixed by increasing inode i_nlink references for current and parent inodes. Signed-off-by: Roman Pen <r.peniaev@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2016-02-09 18:30:29 +08:00
/* directory inodes start off with i_nlink == 2 (for "." entry) */
inc_nlink(inode);
d_instantiate(dentry, inode);
debugfs: fix inode i_nlink references for automount dentry Directory inodes should start off with i_nlink == 2 (one extra ref for "." entry). debugfs_create_automount() increases neither the i_nlink reference for current inode nor for parent inode. On attempt to remove the automount dentry, kernel complains: [ 86.288070] WARNING: CPU: 1 PID: 3616 at fs/inode.c:273 drop_nlink+0x3e/0x50() [ 86.288461] Modules linked in: debugfs_example2(O-) [ 86.288745] CPU: 1 PID: 3616 Comm: rmmod Tainted: G O 4.4.0-rc3-next-20151207+ #135 [ 86.289197] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.8.2-20150617_082717-anatol 04/01/2014 [ 86.289696] ffffffff81be05c9 ffff8800b9e6fda0 ffffffff81352e2c 0000000000000000 [ 86.290110] ffff8800b9e6fdd8 ffffffff81065142 ffff8801399175e8 ffff8800bb78b240 [ 86.290507] ffff8801399175e8 ffff8800b73d7898 ffff8800b73d7840 ffff8800b9e6fde8 [ 86.290933] Call Trace: [ 86.291080] [<ffffffff81352e2c>] dump_stack+0x4e/0x82 [ 86.291340] [<ffffffff81065142>] warn_slowpath_common+0x82/0xc0 [ 86.291640] [<ffffffff8106523a>] warn_slowpath_null+0x1a/0x20 [ 86.291932] [<ffffffff811ae62e>] drop_nlink+0x3e/0x50 [ 86.292208] [<ffffffff811ba35b>] simple_unlink+0x4b/0x60 [ 86.292481] [<ffffffff811ba3a7>] simple_rmdir+0x37/0x50 [ 86.292748] [<ffffffff812d9808>] __debugfs_remove.part.16+0xa8/0xd0 [ 86.293082] [<ffffffff812d9a0b>] debugfs_remove_recursive+0xdb/0x1c0 [ 86.293406] [<ffffffffa00004dd>] cleanup_module+0x2d/0x3b [debugfs_example2] [ 86.293762] [<ffffffff810d959b>] SyS_delete_module+0x16b/0x220 [ 86.294077] [<ffffffff818ef857>] entry_SYSCALL_64_fastpath+0x12/0x6a [ 86.294405] ---[ end trace c9fc53353fe14a36 ]--- [ 86.294639] ------------[ cut here ]------------ To reproduce the issue it is enough to invoke these lines: autom = debugfs_create_automount("automount", NULL, vfsmount_cb, data); BUG_ON(IS_ERR_OR_NULL(autom)); debugfs_remove(autom); The issue is fixed by increasing inode i_nlink references for current and parent inodes. Signed-off-by: Roman Pen <r.peniaev@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2016-02-09 18:30:29 +08:00
inc_nlink(d_inode(dentry->d_parent));
fsnotify_mkdir(d_inode(dentry->d_parent), dentry);
return end_creating(dentry);
}
EXPORT_SYMBOL(debugfs_create_automount);
/**
* debugfs_create_symlink- create a symbolic link in the debugfs filesystem
* @name: a pointer to a string containing the name of the symbolic link to
* create.
* @parent: a pointer to the parent dentry for this symbolic link. This
* should be a directory dentry if set. If this parameter is NULL,
* then the symbolic link will be created in the root of the debugfs
* filesystem.
* @target: a pointer to a string containing the path to the target of the
* symbolic link.
*
* This function creates a symbolic link with the given name in debugfs that
* links to the given target path.
*
* This function will return a pointer to a dentry if it succeeds. This
* pointer must be passed to the debugfs_remove() function when the symbolic
* link is to be removed (no automatic cleanup happens if your module is
* unloaded, you are responsible here.) If an error occurs, %ERR_PTR(-ERROR)
* will be returned.
*
* If debugfs is not enabled in the kernel, the value -%ENODEV will be
* returned.
*/
struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent,
const char *target)
{
struct dentry *dentry;
struct inode *inode;
char *link = kstrdup(target, GFP_KERNEL);
if (!link)
return ERR_PTR(-ENOMEM);
dentry = start_creating(name, parent);
if (IS_ERR(dentry)) {
kfree(link);
return dentry;
}
inode = debugfs_get_inode(dentry->d_sb);
if (unlikely(!inode)) {
pr_err("out of free dentries, can not create symlink '%s'\n",
name);
kfree(link);
return failed_creating(dentry);
}
inode->i_mode = S_IFLNK | S_IRWXUGO;
debugfs: Restrict debugfs when the kernel is locked down Disallow opening of debugfs files that might be used to muck around when the kernel is locked down as various drivers give raw access to hardware through debugfs. Given the effort of auditing all 2000 or so files and manually fixing each one as necessary, I've chosen to apply a heuristic instead. The following changes are made: (1) chmod and chown are disallowed on debugfs objects (though the root dir can be modified by mount and remount, but I'm not worried about that). (2) When the kernel is locked down, only files with the following criteria are permitted to be opened: - The file must have mode 00444 - The file must not have ioctl methods - The file must not have mmap (3) When the kernel is locked down, files may only be opened for reading. Normal device interaction should be done through configfs, sysfs or a miscdev, not debugfs. Note that this makes it unnecessary to specifically lock down show_dsts(), show_devs() and show_call() in the asus-wmi driver. I would actually prefer to lock down all files by default and have the the files unlocked by the creator. This is tricky to manage correctly, though, as there are 19 creation functions and ~1600 call sites (some of them in loops scanning tables). Signed-off-by: David Howells <dhowells@redhat.com> cc: Andy Shevchenko <andy.shevchenko@gmail.com> cc: acpi4asus-user@lists.sourceforge.net cc: platform-driver-x86@vger.kernel.org cc: Matthew Garrett <mjg59@srcf.ucam.org> cc: Thomas Gleixner <tglx@linutronix.de> Cc: Greg KH <greg@kroah.com> Cc: Rafael J. Wysocki <rafael@kernel.org> Signed-off-by: Matthew Garrett <matthewgarrett@google.com> Signed-off-by: James Morris <jmorris@namei.org>
2019-08-20 08:18:02 +08:00
inode->i_op = &debugfs_symlink_inode_operations;
inode->i_link = link;
d_instantiate(dentry, inode);
return end_creating(dentry);
}
EXPORT_SYMBOL_GPL(debugfs_create_symlink);
static void __debugfs_file_removed(struct dentry *dentry)
debugfs: implement per-file removal protection Since commit 49d200deaa68 ("debugfs: prevent access to removed files' private data"), accesses to a file's private data are protected from concurrent removal by covering all file_operations with a SRCU read section and sychronizing with those before returning from debugfs_remove() by means of synchronize_srcu(). As pointed out by Johannes Berg, there are debugfs files with forever blocking file_operations. Their corresponding SRCU read side sections would block any debugfs_remove() forever as well, even unrelated ones. This results in a livelock. Because a remover can't cancel any indefinite blocking within foreign files, this is a problem. Resolve this by introducing support for more granular protection on a per-file basis. This is implemented by introducing an 'active_users' refcount_t to the per-file struct debugfs_fsdata state. At file creation time, it is set to one and a debugfs_remove() will drop that initial reference. The new debugfs_file_get() and debugfs_file_put(), intended to be used in place of former debugfs_use_file_start() and debugfs_use_file_finish(), increment and decrement it respectively. Once the count drops to zero, debugfs_file_put() will signal a completion which is possibly being waited for from debugfs_remove(). Thus, as long as there is a debugfs_file_get() not yet matched by a corresponding debugfs_file_put() around, debugfs_remove() will block. Actual users of debugfs_use_file_start() and -finish() will get converted to the new debugfs_file_get() and debugfs_file_put() by followup patches. Fixes: 49d200deaa68 ("debugfs: prevent access to removed files' private data") Reported-by: Johannes Berg <johannes@sipsolutions.net> Signed-off-by: Nicolai Stange <nicstange@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2017-10-31 07:15:48 +08:00
{
struct debugfs_fsdata *fsd;
debugfs: defer debugfs_fsdata allocation to first usage Currently, __debugfs_create_file allocates one struct debugfs_fsdata instance for every file created. However, there are potentially many debugfs file around, most of which are never touched by userspace. Thus, defer the allocations to the first usage, i.e. to the first debugfs_file_get(). A dentry's ->d_fsdata starts out to point to the "real", user provided fops. After a debugfs_fsdata instance has been allocated (and the real fops pointer has been moved over into its ->real_fops member), ->d_fsdata is changed to point to it from then on. The two cases are distinguished by setting BIT(0) for the real fops case. struct debugfs_fsdata's foremost purpose is to track active users and to make debugfs_remove() block until they are done. Since no debugfs_fsdata instance means no active users, make debugfs_remove() return immediately in this case. Take care of possible races between debugfs_file_get() and debugfs_remove(): either debugfs_remove() must see a debugfs_fsdata instance and thus wait for possible active users or debugfs_file_get() must see a dead dentry and return immediately. Make a dentry's ->d_release(), i.e. debugfs_release_dentry(), check whether ->d_fsdata is actually a debugfs_fsdata instance before kfree()ing it. Similarly, make debugfs_real_fops() check whether ->d_fsdata is actually a debugfs_fsdata instance before returning it, otherwise emit a warning. The set of possible error codes returned from debugfs_file_get() has grown from -EIO to -EIO and -ENOMEM. Make open_proxy_open() and full_proxy_open() pass the -ENOMEM onwards to their callers. Signed-off-by: Nicolai Stange <nicstange@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2017-10-31 07:15:54 +08:00
/*
* Paired with the closing smp_mb() implied by a successful
* cmpxchg() in debugfs_file_get(): either
* debugfs_file_get() must see a dead dentry or we must see a
* debugfs_fsdata instance at ->d_fsdata here (or both).
*/
smp_mb();
fsd = READ_ONCE(dentry->d_fsdata);
if ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)
return;
debugfs: implement per-file removal protection Since commit 49d200deaa68 ("debugfs: prevent access to removed files' private data"), accesses to a file's private data are protected from concurrent removal by covering all file_operations with a SRCU read section and sychronizing with those before returning from debugfs_remove() by means of synchronize_srcu(). As pointed out by Johannes Berg, there are debugfs files with forever blocking file_operations. Their corresponding SRCU read side sections would block any debugfs_remove() forever as well, even unrelated ones. This results in a livelock. Because a remover can't cancel any indefinite blocking within foreign files, this is a problem. Resolve this by introducing support for more granular protection on a per-file basis. This is implemented by introducing an 'active_users' refcount_t to the per-file struct debugfs_fsdata state. At file creation time, it is set to one and a debugfs_remove() will drop that initial reference. The new debugfs_file_get() and debugfs_file_put(), intended to be used in place of former debugfs_use_file_start() and debugfs_use_file_finish(), increment and decrement it respectively. Once the count drops to zero, debugfs_file_put() will signal a completion which is possibly being waited for from debugfs_remove(). Thus, as long as there is a debugfs_file_get() not yet matched by a corresponding debugfs_file_put() around, debugfs_remove() will block. Actual users of debugfs_use_file_start() and -finish() will get converted to the new debugfs_file_get() and debugfs_file_put() by followup patches. Fixes: 49d200deaa68 ("debugfs: prevent access to removed files' private data") Reported-by: Johannes Berg <johannes@sipsolutions.net> Signed-off-by: Nicolai Stange <nicstange@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2017-10-31 07:15:48 +08:00
if (!refcount_dec_and_test(&fsd->active_users))
wait_for_completion(&fsd->active_users_drained);
}
static int __debugfs_remove(struct dentry *dentry, struct dentry *parent)
{
int ret = 0;
if (simple_positive(dentry)) {
dget(dentry);
if (d_is_dir(dentry)) {
ret = simple_rmdir(d_inode(parent), dentry);
if (!ret)
fsnotify_rmdir(d_inode(parent), dentry);
debugfs: implement per-file removal protection Since commit 49d200deaa68 ("debugfs: prevent access to removed files' private data"), accesses to a file's private data are protected from concurrent removal by covering all file_operations with a SRCU read section and sychronizing with those before returning from debugfs_remove() by means of synchronize_srcu(). As pointed out by Johannes Berg, there are debugfs files with forever blocking file_operations. Their corresponding SRCU read side sections would block any debugfs_remove() forever as well, even unrelated ones. This results in a livelock. Because a remover can't cancel any indefinite blocking within foreign files, this is a problem. Resolve this by introducing support for more granular protection on a per-file basis. This is implemented by introducing an 'active_users' refcount_t to the per-file struct debugfs_fsdata state. At file creation time, it is set to one and a debugfs_remove() will drop that initial reference. The new debugfs_file_get() and debugfs_file_put(), intended to be used in place of former debugfs_use_file_start() and debugfs_use_file_finish(), increment and decrement it respectively. Once the count drops to zero, debugfs_file_put() will signal a completion which is possibly being waited for from debugfs_remove(). Thus, as long as there is a debugfs_file_get() not yet matched by a corresponding debugfs_file_put() around, debugfs_remove() will block. Actual users of debugfs_use_file_start() and -finish() will get converted to the new debugfs_file_get() and debugfs_file_put() by followup patches. Fixes: 49d200deaa68 ("debugfs: prevent access to removed files' private data") Reported-by: Johannes Berg <johannes@sipsolutions.net> Signed-off-by: Nicolai Stange <nicstange@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2017-10-31 07:15:48 +08:00
} else {
simple_unlink(d_inode(parent), dentry);
fsnotify_unlink(d_inode(parent), dentry);
debugfs: implement per-file removal protection Since commit 49d200deaa68 ("debugfs: prevent access to removed files' private data"), accesses to a file's private data are protected from concurrent removal by covering all file_operations with a SRCU read section and sychronizing with those before returning from debugfs_remove() by means of synchronize_srcu(). As pointed out by Johannes Berg, there are debugfs files with forever blocking file_operations. Their corresponding SRCU read side sections would block any debugfs_remove() forever as well, even unrelated ones. This results in a livelock. Because a remover can't cancel any indefinite blocking within foreign files, this is a problem. Resolve this by introducing support for more granular protection on a per-file basis. This is implemented by introducing an 'active_users' refcount_t to the per-file struct debugfs_fsdata state. At file creation time, it is set to one and a debugfs_remove() will drop that initial reference. The new debugfs_file_get() and debugfs_file_put(), intended to be used in place of former debugfs_use_file_start() and debugfs_use_file_finish(), increment and decrement it respectively. Once the count drops to zero, debugfs_file_put() will signal a completion which is possibly being waited for from debugfs_remove(). Thus, as long as there is a debugfs_file_get() not yet matched by a corresponding debugfs_file_put() around, debugfs_remove() will block. Actual users of debugfs_use_file_start() and -finish() will get converted to the new debugfs_file_get() and debugfs_file_put() by followup patches. Fixes: 49d200deaa68 ("debugfs: prevent access to removed files' private data") Reported-by: Johannes Berg <johannes@sipsolutions.net> Signed-off-by: Nicolai Stange <nicstange@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2017-10-31 07:15:48 +08:00
}
if (!ret)
d_delete(dentry);
if (d_is_reg(dentry))
__debugfs_file_removed(dentry);
dput(dentry);
}
return ret;
}
/**
* debugfs_remove - removes a file or directory from the debugfs filesystem
* @dentry: a pointer to a the dentry of the file or directory to be
* removed. If this parameter is NULL or an error value, nothing
* will be done.
*
* This function removes a file or directory in debugfs that was previously
* created with a call to another debugfs function (like
* debugfs_create_file() or variants thereof.)
*
* This function is required to be called in order for the file to be
* removed, no automatic cleanup of files will happen when a module is
* removed, you are responsible here.
*/
void debugfs_remove(struct dentry *dentry)
{
struct dentry *parent;
int ret;
if (IS_ERR_OR_NULL(dentry))
return;
parent = dentry->d_parent;
inode_lock(d_inode(parent));
ret = __debugfs_remove(dentry, parent);
inode_unlock(d_inode(parent));
if (!ret)
simple_release_fs(&debugfs_mount, &debugfs_mount_count);
}
EXPORT_SYMBOL_GPL(debugfs_remove);
/**
* debugfs_remove_recursive - recursively removes a directory
* @dentry: a pointer to a the dentry of the directory to be removed. If this
* parameter is NULL or an error value, nothing will be done.
*
* This function recursively removes a directory tree in debugfs that
* was previously created with a call to another debugfs function
* (like debugfs_create_file() or variants thereof.)
*
* This function is required to be called in order for the file to be
* removed, no automatic cleanup of files will happen when a module is
* removed, you are responsible here.
*/
void debugfs_remove_recursive(struct dentry *dentry)
{
debugfs: Fix corrupted loop in debugfs_remove_recursive [ I'm currently running my tests on it now, and so far, after a few hours it has yet to blow up. I'll run it for 24 hours which it never succeeded in the past. ] The tracing code has a way to make directories within the debugfs file system as well as deleting them using mkdir/rmdir in the instance directory. This is very limited in functionality, such as there is no renames, and the parent directory "instance" can not be modified. The tracing code creates the instance directory from the debugfs code and then replaces the dentry->d_inode->i_op with its own to allow for mkdir/rmdir to work. When these are called, the d_entry and inode locks need to be released to call the instance creation and deletion code. That code has its own accounting and locking to serialize everything to prevent multiple users from causing harm. As the parent "instance" directory can not be modified this simplifies things. I created a stress test that creates several threads that randomly creates and deletes directories thousands of times a second. The code stood up to this test and I submitted it a while ago. Recently I added a new test that adds readers to the mix. While the instance directories were being added and deleted, readers would read from these directories and even enable tracing within them. This test was able to trigger a bug: general protection fault: 0000 [#1] PREEMPT SMP Modules linked in: ... CPU: 3 PID: 17789 Comm: rmdir Tainted: G W 3.15.0-rc2-test+ #41 Hardware name: To Be Filled By O.E.M. To Be Filled By O.E.M./To be filled by O.E.M., BIOS SDBLI944.86P 05/08/2007 task: ffff88003786ca60 ti: ffff880077018000 task.ti: ffff880077018000 RIP: 0010:[<ffffffff811ed5eb>] [<ffffffff811ed5eb>] debugfs_remove_recursive+0x1bd/0x367 RSP: 0018:ffff880077019df8 EFLAGS: 00010246 RAX: 0000000000000002 RBX: ffff88006f0fe490 RCX: 0000000000000000 RDX: dead000000100058 RSI: 0000000000000246 RDI: ffff88003786d454 RBP: ffff88006f0fe640 R08: 0000000000000628 R09: 0000000000000000 R10: 0000000000000628 R11: ffff8800795110a0 R12: ffff88006f0fe640 R13: ffff88006f0fe640 R14: ffffffff81817d0b R15: ffffffff818188b7 FS: 00007ff13ae24700(0000) GS:ffff88007d580000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b CR2: 0000003054ec7be0 CR3: 0000000076d51000 CR4: 00000000000007e0 Stack: ffff88007a41ebe0 dead000000100058 00000000fffffffe ffff88006f0fe640 0000000000000000 ffff88006f0fe678 ffff88007a41ebe0 ffff88003793a000 00000000fffffffe ffffffff810bde82 ffff88006f0fe640 ffff88007a41eb28 Call Trace: [<ffffffff810bde82>] ? instance_rmdir+0x15b/0x1de [<ffffffff81132e2d>] ? vfs_rmdir+0x80/0xd3 [<ffffffff81132f51>] ? do_rmdir+0xd1/0x139 [<ffffffff8124ad9e>] ? trace_hardirqs_on_thunk+0x3a/0x3c [<ffffffff814fea62>] ? system_call_fastpath+0x16/0x1b Code: fe ff ff 48 8d 75 30 48 89 df e8 c9 fd ff ff 85 c0 75 13 48 c7 c6 b8 cc d2 81 48 c7 c7 b0 cc d2 81 e8 8c 7a f5 ff 48 8b 54 24 08 <48> 8b 82 a8 00 00 00 48 89 d3 48 2d a8 00 00 00 48 89 44 24 08 RIP [<ffffffff811ed5eb>] debugfs_remove_recursive+0x1bd/0x367 RSP <ffff880077019df8> It took a while, but every time it triggered, it was always in the same place: list_for_each_entry_safe(child, next, &parent->d_subdirs, d_u.d_child) { Where the child->d_u.d_child seemed to be corrupted. I added lots of trace_printk()s to see what was wrong, and sure enough, it was always the child's d_u.d_child field. I looked around to see what touches it and noticed that in __dentry_kill() which calls dentry_free(): static void dentry_free(struct dentry *dentry) { /* if dentry was never visible to RCU, immediate free is OK */ if (!(dentry->d_flags & DCACHE_RCUACCESS)) __d_free(&dentry->d_u.d_rcu); else call_rcu(&dentry->d_u.d_rcu, __d_free); } I also noticed that __dentry_kill() unlinks the child->d_u.child under the parent->d_lock spin_lock. Looking back at the loop in debugfs_remove_recursive() it never takes the parent->d_lock to do the list walk. Adding more tracing, I was able to prove this was the issue: ftrace-t-15385 1.... 246662024us : dentry_kill <ffffffff81138b91>: free ffff88006d573600 rmdir-15409 2.... 246662024us : debugfs_remove_recursive <ffffffff811ec7e5>: child=ffff88006d573600 next=dead000000100058 The dentry_kill freed ffff88006d573600 just as the remove recursive was walking it. In order to fix this, the list walk needs to be modified a bit to take the parent->d_lock. The safe version is no longer necessary, as every time we remove a child, the parent->d_lock must be released and the list walk must start over. Each time a child is removed, even though it may still be on the list, it should be skipped by the first check in the loop: if (!debugfs_positive(child)) continue; Cc: stable@vger.kernel.org Signed-off-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-06-10 02:06:07 +08:00
struct dentry *child, *parent;
if (IS_ERR_OR_NULL(dentry))
return;
parent = dentry;
debugfs: debugfs_remove_recursive() must not rely on list_empty(d_subdirs) debugfs_remove_recursive() is wrong, 1. it wrongly assumes that !list_empty(d_subdirs) means that this dir should be removed. This is not that bad by itself, but: 2. if d_subdirs does not becomes empty after __debugfs_remove() it gives up and silently fails, it doesn't even try to remove other entries. However ->d_subdirs can be non-empty because it still has the already deleted !debugfs_positive() entries. 3. simple_release_fs() is called even if __debugfs_remove() fails. Suppose we have dir1/ dir2/ file2 file1 and someone opens dir1/dir2/file2. Now, debugfs_remove_recursive(dir1/dir2) succeeds, and dir1/dir2 goes away. But debugfs_remove_recursive(dir1) silently fails and doesn't remove this directory. Because it tries to delete (the already deleted) dir1/dir2/file2 again and then fails due to "Avoid infinite loop" logic. Test-case: #!/bin/sh cd /sys/kernel/debug/tracing echo 'p:probe/sigprocmask sigprocmask' >> kprobe_events sleep 1000 < events/probe/sigprocmask/id & echo -n >| kprobe_events [ -d events/probe ] && echo "ERR!! failed to rm probe" And after that it is not possible to create another probe entry. With this patch debugfs_remove_recursive() skips !debugfs_positive() files although this is not strictly needed. The most important change is that it does not try to make ->d_subdirs empty, it simply scans the whole list(s) recursively and removes as much as possible. Link: http://lkml.kernel.org/r/20130726151256.GC19472@redhat.com Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-07-26 23:12:56 +08:00
down:
inode_lock(d_inode(parent));
debugfs: Fix corrupted loop in debugfs_remove_recursive [ I'm currently running my tests on it now, and so far, after a few hours it has yet to blow up. I'll run it for 24 hours which it never succeeded in the past. ] The tracing code has a way to make directories within the debugfs file system as well as deleting them using mkdir/rmdir in the instance directory. This is very limited in functionality, such as there is no renames, and the parent directory "instance" can not be modified. The tracing code creates the instance directory from the debugfs code and then replaces the dentry->d_inode->i_op with its own to allow for mkdir/rmdir to work. When these are called, the d_entry and inode locks need to be released to call the instance creation and deletion code. That code has its own accounting and locking to serialize everything to prevent multiple users from causing harm. As the parent "instance" directory can not be modified this simplifies things. I created a stress test that creates several threads that randomly creates and deletes directories thousands of times a second. The code stood up to this test and I submitted it a while ago. Recently I added a new test that adds readers to the mix. While the instance directories were being added and deleted, readers would read from these directories and even enable tracing within them. This test was able to trigger a bug: general protection fault: 0000 [#1] PREEMPT SMP Modules linked in: ... CPU: 3 PID: 17789 Comm: rmdir Tainted: G W 3.15.0-rc2-test+ #41 Hardware name: To Be Filled By O.E.M. To Be Filled By O.E.M./To be filled by O.E.M., BIOS SDBLI944.86P 05/08/2007 task: ffff88003786ca60 ti: ffff880077018000 task.ti: ffff880077018000 RIP: 0010:[<ffffffff811ed5eb>] [<ffffffff811ed5eb>] debugfs_remove_recursive+0x1bd/0x367 RSP: 0018:ffff880077019df8 EFLAGS: 00010246 RAX: 0000000000000002 RBX: ffff88006f0fe490 RCX: 0000000000000000 RDX: dead000000100058 RSI: 0000000000000246 RDI: ffff88003786d454 RBP: ffff88006f0fe640 R08: 0000000000000628 R09: 0000000000000000 R10: 0000000000000628 R11: ffff8800795110a0 R12: ffff88006f0fe640 R13: ffff88006f0fe640 R14: ffffffff81817d0b R15: ffffffff818188b7 FS: 00007ff13ae24700(0000) GS:ffff88007d580000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b CR2: 0000003054ec7be0 CR3: 0000000076d51000 CR4: 00000000000007e0 Stack: ffff88007a41ebe0 dead000000100058 00000000fffffffe ffff88006f0fe640 0000000000000000 ffff88006f0fe678 ffff88007a41ebe0 ffff88003793a000 00000000fffffffe ffffffff810bde82 ffff88006f0fe640 ffff88007a41eb28 Call Trace: [<ffffffff810bde82>] ? instance_rmdir+0x15b/0x1de [<ffffffff81132e2d>] ? vfs_rmdir+0x80/0xd3 [<ffffffff81132f51>] ? do_rmdir+0xd1/0x139 [<ffffffff8124ad9e>] ? trace_hardirqs_on_thunk+0x3a/0x3c [<ffffffff814fea62>] ? system_call_fastpath+0x16/0x1b Code: fe ff ff 48 8d 75 30 48 89 df e8 c9 fd ff ff 85 c0 75 13 48 c7 c6 b8 cc d2 81 48 c7 c7 b0 cc d2 81 e8 8c 7a f5 ff 48 8b 54 24 08 <48> 8b 82 a8 00 00 00 48 89 d3 48 2d a8 00 00 00 48 89 44 24 08 RIP [<ffffffff811ed5eb>] debugfs_remove_recursive+0x1bd/0x367 RSP <ffff880077019df8> It took a while, but every time it triggered, it was always in the same place: list_for_each_entry_safe(child, next, &parent->d_subdirs, d_u.d_child) { Where the child->d_u.d_child seemed to be corrupted. I added lots of trace_printk()s to see what was wrong, and sure enough, it was always the child's d_u.d_child field. I looked around to see what touches it and noticed that in __dentry_kill() which calls dentry_free(): static void dentry_free(struct dentry *dentry) { /* if dentry was never visible to RCU, immediate free is OK */ if (!(dentry->d_flags & DCACHE_RCUACCESS)) __d_free(&dentry->d_u.d_rcu); else call_rcu(&dentry->d_u.d_rcu, __d_free); } I also noticed that __dentry_kill() unlinks the child->d_u.child under the parent->d_lock spin_lock. Looking back at the loop in debugfs_remove_recursive() it never takes the parent->d_lock to do the list walk. Adding more tracing, I was able to prove this was the issue: ftrace-t-15385 1.... 246662024us : dentry_kill <ffffffff81138b91>: free ffff88006d573600 rmdir-15409 2.... 246662024us : debugfs_remove_recursive <ffffffff811ec7e5>: child=ffff88006d573600 next=dead000000100058 The dentry_kill freed ffff88006d573600 just as the remove recursive was walking it. In order to fix this, the list walk needs to be modified a bit to take the parent->d_lock. The safe version is no longer necessary, as every time we remove a child, the parent->d_lock must be released and the list walk must start over. Each time a child is removed, even though it may still be on the list, it should be skipped by the first check in the loop: if (!debugfs_positive(child)) continue; Cc: stable@vger.kernel.org Signed-off-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-06-10 02:06:07 +08:00
loop:
/*
* The parent->d_subdirs is protected by the d_lock. Outside that
* lock, the child can be unlinked and set to be freed which can
* use the d_u.d_child as the rcu head and corrupt this list.
*/
spin_lock(&parent->d_lock);
list_for_each_entry(child, &parent->d_subdirs, d_child) {
if (!simple_positive(child))
debugfs: debugfs_remove_recursive() must not rely on list_empty(d_subdirs) debugfs_remove_recursive() is wrong, 1. it wrongly assumes that !list_empty(d_subdirs) means that this dir should be removed. This is not that bad by itself, but: 2. if d_subdirs does not becomes empty after __debugfs_remove() it gives up and silently fails, it doesn't even try to remove other entries. However ->d_subdirs can be non-empty because it still has the already deleted !debugfs_positive() entries. 3. simple_release_fs() is called even if __debugfs_remove() fails. Suppose we have dir1/ dir2/ file2 file1 and someone opens dir1/dir2/file2. Now, debugfs_remove_recursive(dir1/dir2) succeeds, and dir1/dir2 goes away. But debugfs_remove_recursive(dir1) silently fails and doesn't remove this directory. Because it tries to delete (the already deleted) dir1/dir2/file2 again and then fails due to "Avoid infinite loop" logic. Test-case: #!/bin/sh cd /sys/kernel/debug/tracing echo 'p:probe/sigprocmask sigprocmask' >> kprobe_events sleep 1000 < events/probe/sigprocmask/id & echo -n >| kprobe_events [ -d events/probe ] && echo "ERR!! failed to rm probe" And after that it is not possible to create another probe entry. With this patch debugfs_remove_recursive() skips !debugfs_positive() files although this is not strictly needed. The most important change is that it does not try to make ->d_subdirs empty, it simply scans the whole list(s) recursively and removes as much as possible. Link: http://lkml.kernel.org/r/20130726151256.GC19472@redhat.com Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-07-26 23:12:56 +08:00
continue;
debugfs: debugfs_remove_recursive() must not rely on list_empty(d_subdirs) debugfs_remove_recursive() is wrong, 1. it wrongly assumes that !list_empty(d_subdirs) means that this dir should be removed. This is not that bad by itself, but: 2. if d_subdirs does not becomes empty after __debugfs_remove() it gives up and silently fails, it doesn't even try to remove other entries. However ->d_subdirs can be non-empty because it still has the already deleted !debugfs_positive() entries. 3. simple_release_fs() is called even if __debugfs_remove() fails. Suppose we have dir1/ dir2/ file2 file1 and someone opens dir1/dir2/file2. Now, debugfs_remove_recursive(dir1/dir2) succeeds, and dir1/dir2 goes away. But debugfs_remove_recursive(dir1) silently fails and doesn't remove this directory. Because it tries to delete (the already deleted) dir1/dir2/file2 again and then fails due to "Avoid infinite loop" logic. Test-case: #!/bin/sh cd /sys/kernel/debug/tracing echo 'p:probe/sigprocmask sigprocmask' >> kprobe_events sleep 1000 < events/probe/sigprocmask/id & echo -n >| kprobe_events [ -d events/probe ] && echo "ERR!! failed to rm probe" And after that it is not possible to create another probe entry. With this patch debugfs_remove_recursive() skips !debugfs_positive() files although this is not strictly needed. The most important change is that it does not try to make ->d_subdirs empty, it simply scans the whole list(s) recursively and removes as much as possible. Link: http://lkml.kernel.org/r/20130726151256.GC19472@redhat.com Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-07-26 23:12:56 +08:00
/* perhaps simple_empty(child) makes more sense */
if (!list_empty(&child->d_subdirs)) {
debugfs: Fix corrupted loop in debugfs_remove_recursive [ I'm currently running my tests on it now, and so far, after a few hours it has yet to blow up. I'll run it for 24 hours which it never succeeded in the past. ] The tracing code has a way to make directories within the debugfs file system as well as deleting them using mkdir/rmdir in the instance directory. This is very limited in functionality, such as there is no renames, and the parent directory "instance" can not be modified. The tracing code creates the instance directory from the debugfs code and then replaces the dentry->d_inode->i_op with its own to allow for mkdir/rmdir to work. When these are called, the d_entry and inode locks need to be released to call the instance creation and deletion code. That code has its own accounting and locking to serialize everything to prevent multiple users from causing harm. As the parent "instance" directory can not be modified this simplifies things. I created a stress test that creates several threads that randomly creates and deletes directories thousands of times a second. The code stood up to this test and I submitted it a while ago. Recently I added a new test that adds readers to the mix. While the instance directories were being added and deleted, readers would read from these directories and even enable tracing within them. This test was able to trigger a bug: general protection fault: 0000 [#1] PREEMPT SMP Modules linked in: ... CPU: 3 PID: 17789 Comm: rmdir Tainted: G W 3.15.0-rc2-test+ #41 Hardware name: To Be Filled By O.E.M. To Be Filled By O.E.M./To be filled by O.E.M., BIOS SDBLI944.86P 05/08/2007 task: ffff88003786ca60 ti: ffff880077018000 task.ti: ffff880077018000 RIP: 0010:[<ffffffff811ed5eb>] [<ffffffff811ed5eb>] debugfs_remove_recursive+0x1bd/0x367 RSP: 0018:ffff880077019df8 EFLAGS: 00010246 RAX: 0000000000000002 RBX: ffff88006f0fe490 RCX: 0000000000000000 RDX: dead000000100058 RSI: 0000000000000246 RDI: ffff88003786d454 RBP: ffff88006f0fe640 R08: 0000000000000628 R09: 0000000000000000 R10: 0000000000000628 R11: ffff8800795110a0 R12: ffff88006f0fe640 R13: ffff88006f0fe640 R14: ffffffff81817d0b R15: ffffffff818188b7 FS: 00007ff13ae24700(0000) GS:ffff88007d580000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b CR2: 0000003054ec7be0 CR3: 0000000076d51000 CR4: 00000000000007e0 Stack: ffff88007a41ebe0 dead000000100058 00000000fffffffe ffff88006f0fe640 0000000000000000 ffff88006f0fe678 ffff88007a41ebe0 ffff88003793a000 00000000fffffffe ffffffff810bde82 ffff88006f0fe640 ffff88007a41eb28 Call Trace: [<ffffffff810bde82>] ? instance_rmdir+0x15b/0x1de [<ffffffff81132e2d>] ? vfs_rmdir+0x80/0xd3 [<ffffffff81132f51>] ? do_rmdir+0xd1/0x139 [<ffffffff8124ad9e>] ? trace_hardirqs_on_thunk+0x3a/0x3c [<ffffffff814fea62>] ? system_call_fastpath+0x16/0x1b Code: fe ff ff 48 8d 75 30 48 89 df e8 c9 fd ff ff 85 c0 75 13 48 c7 c6 b8 cc d2 81 48 c7 c7 b0 cc d2 81 e8 8c 7a f5 ff 48 8b 54 24 08 <48> 8b 82 a8 00 00 00 48 89 d3 48 2d a8 00 00 00 48 89 44 24 08 RIP [<ffffffff811ed5eb>] debugfs_remove_recursive+0x1bd/0x367 RSP <ffff880077019df8> It took a while, but every time it triggered, it was always in the same place: list_for_each_entry_safe(child, next, &parent->d_subdirs, d_u.d_child) { Where the child->d_u.d_child seemed to be corrupted. I added lots of trace_printk()s to see what was wrong, and sure enough, it was always the child's d_u.d_child field. I looked around to see what touches it and noticed that in __dentry_kill() which calls dentry_free(): static void dentry_free(struct dentry *dentry) { /* if dentry was never visible to RCU, immediate free is OK */ if (!(dentry->d_flags & DCACHE_RCUACCESS)) __d_free(&dentry->d_u.d_rcu); else call_rcu(&dentry->d_u.d_rcu, __d_free); } I also noticed that __dentry_kill() unlinks the child->d_u.child under the parent->d_lock spin_lock. Looking back at the loop in debugfs_remove_recursive() it never takes the parent->d_lock to do the list walk. Adding more tracing, I was able to prove this was the issue: ftrace-t-15385 1.... 246662024us : dentry_kill <ffffffff81138b91>: free ffff88006d573600 rmdir-15409 2.... 246662024us : debugfs_remove_recursive <ffffffff811ec7e5>: child=ffff88006d573600 next=dead000000100058 The dentry_kill freed ffff88006d573600 just as the remove recursive was walking it. In order to fix this, the list walk needs to be modified a bit to take the parent->d_lock. The safe version is no longer necessary, as every time we remove a child, the parent->d_lock must be released and the list walk must start over. Each time a child is removed, even though it may still be on the list, it should be skipped by the first check in the loop: if (!debugfs_positive(child)) continue; Cc: stable@vger.kernel.org Signed-off-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-06-10 02:06:07 +08:00
spin_unlock(&parent->d_lock);
inode_unlock(d_inode(parent));
parent = child;
debugfs: debugfs_remove_recursive() must not rely on list_empty(d_subdirs) debugfs_remove_recursive() is wrong, 1. it wrongly assumes that !list_empty(d_subdirs) means that this dir should be removed. This is not that bad by itself, but: 2. if d_subdirs does not becomes empty after __debugfs_remove() it gives up and silently fails, it doesn't even try to remove other entries. However ->d_subdirs can be non-empty because it still has the already deleted !debugfs_positive() entries. 3. simple_release_fs() is called even if __debugfs_remove() fails. Suppose we have dir1/ dir2/ file2 file1 and someone opens dir1/dir2/file2. Now, debugfs_remove_recursive(dir1/dir2) succeeds, and dir1/dir2 goes away. But debugfs_remove_recursive(dir1) silently fails and doesn't remove this directory. Because it tries to delete (the already deleted) dir1/dir2/file2 again and then fails due to "Avoid infinite loop" logic. Test-case: #!/bin/sh cd /sys/kernel/debug/tracing echo 'p:probe/sigprocmask sigprocmask' >> kprobe_events sleep 1000 < events/probe/sigprocmask/id & echo -n >| kprobe_events [ -d events/probe ] && echo "ERR!! failed to rm probe" And after that it is not possible to create another probe entry. With this patch debugfs_remove_recursive() skips !debugfs_positive() files although this is not strictly needed. The most important change is that it does not try to make ->d_subdirs empty, it simply scans the whole list(s) recursively and removes as much as possible. Link: http://lkml.kernel.org/r/20130726151256.GC19472@redhat.com Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-07-26 23:12:56 +08:00
goto down;
}
debugfs: Fix corrupted loop in debugfs_remove_recursive [ I'm currently running my tests on it now, and so far, after a few hours it has yet to blow up. I'll run it for 24 hours which it never succeeded in the past. ] The tracing code has a way to make directories within the debugfs file system as well as deleting them using mkdir/rmdir in the instance directory. This is very limited in functionality, such as there is no renames, and the parent directory "instance" can not be modified. The tracing code creates the instance directory from the debugfs code and then replaces the dentry->d_inode->i_op with its own to allow for mkdir/rmdir to work. When these are called, the d_entry and inode locks need to be released to call the instance creation and deletion code. That code has its own accounting and locking to serialize everything to prevent multiple users from causing harm. As the parent "instance" directory can not be modified this simplifies things. I created a stress test that creates several threads that randomly creates and deletes directories thousands of times a second. The code stood up to this test and I submitted it a while ago. Recently I added a new test that adds readers to the mix. While the instance directories were being added and deleted, readers would read from these directories and even enable tracing within them. This test was able to trigger a bug: general protection fault: 0000 [#1] PREEMPT SMP Modules linked in: ... CPU: 3 PID: 17789 Comm: rmdir Tainted: G W 3.15.0-rc2-test+ #41 Hardware name: To Be Filled By O.E.M. To Be Filled By O.E.M./To be filled by O.E.M., BIOS SDBLI944.86P 05/08/2007 task: ffff88003786ca60 ti: ffff880077018000 task.ti: ffff880077018000 RIP: 0010:[<ffffffff811ed5eb>] [<ffffffff811ed5eb>] debugfs_remove_recursive+0x1bd/0x367 RSP: 0018:ffff880077019df8 EFLAGS: 00010246 RAX: 0000000000000002 RBX: ffff88006f0fe490 RCX: 0000000000000000 RDX: dead000000100058 RSI: 0000000000000246 RDI: ffff88003786d454 RBP: ffff88006f0fe640 R08: 0000000000000628 R09: 0000000000000000 R10: 0000000000000628 R11: ffff8800795110a0 R12: ffff88006f0fe640 R13: ffff88006f0fe640 R14: ffffffff81817d0b R15: ffffffff818188b7 FS: 00007ff13ae24700(0000) GS:ffff88007d580000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b CR2: 0000003054ec7be0 CR3: 0000000076d51000 CR4: 00000000000007e0 Stack: ffff88007a41ebe0 dead000000100058 00000000fffffffe ffff88006f0fe640 0000000000000000 ffff88006f0fe678 ffff88007a41ebe0 ffff88003793a000 00000000fffffffe ffffffff810bde82 ffff88006f0fe640 ffff88007a41eb28 Call Trace: [<ffffffff810bde82>] ? instance_rmdir+0x15b/0x1de [<ffffffff81132e2d>] ? vfs_rmdir+0x80/0xd3 [<ffffffff81132f51>] ? do_rmdir+0xd1/0x139 [<ffffffff8124ad9e>] ? trace_hardirqs_on_thunk+0x3a/0x3c [<ffffffff814fea62>] ? system_call_fastpath+0x16/0x1b Code: fe ff ff 48 8d 75 30 48 89 df e8 c9 fd ff ff 85 c0 75 13 48 c7 c6 b8 cc d2 81 48 c7 c7 b0 cc d2 81 e8 8c 7a f5 ff 48 8b 54 24 08 <48> 8b 82 a8 00 00 00 48 89 d3 48 2d a8 00 00 00 48 89 44 24 08 RIP [<ffffffff811ed5eb>] debugfs_remove_recursive+0x1bd/0x367 RSP <ffff880077019df8> It took a while, but every time it triggered, it was always in the same place: list_for_each_entry_safe(child, next, &parent->d_subdirs, d_u.d_child) { Where the child->d_u.d_child seemed to be corrupted. I added lots of trace_printk()s to see what was wrong, and sure enough, it was always the child's d_u.d_child field. I looked around to see what touches it and noticed that in __dentry_kill() which calls dentry_free(): static void dentry_free(struct dentry *dentry) { /* if dentry was never visible to RCU, immediate free is OK */ if (!(dentry->d_flags & DCACHE_RCUACCESS)) __d_free(&dentry->d_u.d_rcu); else call_rcu(&dentry->d_u.d_rcu, __d_free); } I also noticed that __dentry_kill() unlinks the child->d_u.child under the parent->d_lock spin_lock. Looking back at the loop in debugfs_remove_recursive() it never takes the parent->d_lock to do the list walk. Adding more tracing, I was able to prove this was the issue: ftrace-t-15385 1.... 246662024us : dentry_kill <ffffffff81138b91>: free ffff88006d573600 rmdir-15409 2.... 246662024us : debugfs_remove_recursive <ffffffff811ec7e5>: child=ffff88006d573600 next=dead000000100058 The dentry_kill freed ffff88006d573600 just as the remove recursive was walking it. In order to fix this, the list walk needs to be modified a bit to take the parent->d_lock. The safe version is no longer necessary, as every time we remove a child, the parent->d_lock must be released and the list walk must start over. Each time a child is removed, even though it may still be on the list, it should be skipped by the first check in the loop: if (!debugfs_positive(child)) continue; Cc: stable@vger.kernel.org Signed-off-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-06-10 02:06:07 +08:00
spin_unlock(&parent->d_lock);
debugfs: debugfs_remove_recursive() must not rely on list_empty(d_subdirs) debugfs_remove_recursive() is wrong, 1. it wrongly assumes that !list_empty(d_subdirs) means that this dir should be removed. This is not that bad by itself, but: 2. if d_subdirs does not becomes empty after __debugfs_remove() it gives up and silently fails, it doesn't even try to remove other entries. However ->d_subdirs can be non-empty because it still has the already deleted !debugfs_positive() entries. 3. simple_release_fs() is called even if __debugfs_remove() fails. Suppose we have dir1/ dir2/ file2 file1 and someone opens dir1/dir2/file2. Now, debugfs_remove_recursive(dir1/dir2) succeeds, and dir1/dir2 goes away. But debugfs_remove_recursive(dir1) silently fails and doesn't remove this directory. Because it tries to delete (the already deleted) dir1/dir2/file2 again and then fails due to "Avoid infinite loop" logic. Test-case: #!/bin/sh cd /sys/kernel/debug/tracing echo 'p:probe/sigprocmask sigprocmask' >> kprobe_events sleep 1000 < events/probe/sigprocmask/id & echo -n >| kprobe_events [ -d events/probe ] && echo "ERR!! failed to rm probe" And after that it is not possible to create another probe entry. With this patch debugfs_remove_recursive() skips !debugfs_positive() files although this is not strictly needed. The most important change is that it does not try to make ->d_subdirs empty, it simply scans the whole list(s) recursively and removes as much as possible. Link: http://lkml.kernel.org/r/20130726151256.GC19472@redhat.com Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-07-26 23:12:56 +08:00
if (!__debugfs_remove(child, parent))
simple_release_fs(&debugfs_mount, &debugfs_mount_count);
debugfs: Fix corrupted loop in debugfs_remove_recursive [ I'm currently running my tests on it now, and so far, after a few hours it has yet to blow up. I'll run it for 24 hours which it never succeeded in the past. ] The tracing code has a way to make directories within the debugfs file system as well as deleting them using mkdir/rmdir in the instance directory. This is very limited in functionality, such as there is no renames, and the parent directory "instance" can not be modified. The tracing code creates the instance directory from the debugfs code and then replaces the dentry->d_inode->i_op with its own to allow for mkdir/rmdir to work. When these are called, the d_entry and inode locks need to be released to call the instance creation and deletion code. That code has its own accounting and locking to serialize everything to prevent multiple users from causing harm. As the parent "instance" directory can not be modified this simplifies things. I created a stress test that creates several threads that randomly creates and deletes directories thousands of times a second. The code stood up to this test and I submitted it a while ago. Recently I added a new test that adds readers to the mix. While the instance directories were being added and deleted, readers would read from these directories and even enable tracing within them. This test was able to trigger a bug: general protection fault: 0000 [#1] PREEMPT SMP Modules linked in: ... CPU: 3 PID: 17789 Comm: rmdir Tainted: G W 3.15.0-rc2-test+ #41 Hardware name: To Be Filled By O.E.M. To Be Filled By O.E.M./To be filled by O.E.M., BIOS SDBLI944.86P 05/08/2007 task: ffff88003786ca60 ti: ffff880077018000 task.ti: ffff880077018000 RIP: 0010:[<ffffffff811ed5eb>] [<ffffffff811ed5eb>] debugfs_remove_recursive+0x1bd/0x367 RSP: 0018:ffff880077019df8 EFLAGS: 00010246 RAX: 0000000000000002 RBX: ffff88006f0fe490 RCX: 0000000000000000 RDX: dead000000100058 RSI: 0000000000000246 RDI: ffff88003786d454 RBP: ffff88006f0fe640 R08: 0000000000000628 R09: 0000000000000000 R10: 0000000000000628 R11: ffff8800795110a0 R12: ffff88006f0fe640 R13: ffff88006f0fe640 R14: ffffffff81817d0b R15: ffffffff818188b7 FS: 00007ff13ae24700(0000) GS:ffff88007d580000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b CR2: 0000003054ec7be0 CR3: 0000000076d51000 CR4: 00000000000007e0 Stack: ffff88007a41ebe0 dead000000100058 00000000fffffffe ffff88006f0fe640 0000000000000000 ffff88006f0fe678 ffff88007a41ebe0 ffff88003793a000 00000000fffffffe ffffffff810bde82 ffff88006f0fe640 ffff88007a41eb28 Call Trace: [<ffffffff810bde82>] ? instance_rmdir+0x15b/0x1de [<ffffffff81132e2d>] ? vfs_rmdir+0x80/0xd3 [<ffffffff81132f51>] ? do_rmdir+0xd1/0x139 [<ffffffff8124ad9e>] ? trace_hardirqs_on_thunk+0x3a/0x3c [<ffffffff814fea62>] ? system_call_fastpath+0x16/0x1b Code: fe ff ff 48 8d 75 30 48 89 df e8 c9 fd ff ff 85 c0 75 13 48 c7 c6 b8 cc d2 81 48 c7 c7 b0 cc d2 81 e8 8c 7a f5 ff 48 8b 54 24 08 <48> 8b 82 a8 00 00 00 48 89 d3 48 2d a8 00 00 00 48 89 44 24 08 RIP [<ffffffff811ed5eb>] debugfs_remove_recursive+0x1bd/0x367 RSP <ffff880077019df8> It took a while, but every time it triggered, it was always in the same place: list_for_each_entry_safe(child, next, &parent->d_subdirs, d_u.d_child) { Where the child->d_u.d_child seemed to be corrupted. I added lots of trace_printk()s to see what was wrong, and sure enough, it was always the child's d_u.d_child field. I looked around to see what touches it and noticed that in __dentry_kill() which calls dentry_free(): static void dentry_free(struct dentry *dentry) { /* if dentry was never visible to RCU, immediate free is OK */ if (!(dentry->d_flags & DCACHE_RCUACCESS)) __d_free(&dentry->d_u.d_rcu); else call_rcu(&dentry->d_u.d_rcu, __d_free); } I also noticed that __dentry_kill() unlinks the child->d_u.child under the parent->d_lock spin_lock. Looking back at the loop in debugfs_remove_recursive() it never takes the parent->d_lock to do the list walk. Adding more tracing, I was able to prove this was the issue: ftrace-t-15385 1.... 246662024us : dentry_kill <ffffffff81138b91>: free ffff88006d573600 rmdir-15409 2.... 246662024us : debugfs_remove_recursive <ffffffff811ec7e5>: child=ffff88006d573600 next=dead000000100058 The dentry_kill freed ffff88006d573600 just as the remove recursive was walking it. In order to fix this, the list walk needs to be modified a bit to take the parent->d_lock. The safe version is no longer necessary, as every time we remove a child, the parent->d_lock must be released and the list walk must start over. Each time a child is removed, even though it may still be on the list, it should be skipped by the first check in the loop: if (!debugfs_positive(child)) continue; Cc: stable@vger.kernel.org Signed-off-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-06-10 02:06:07 +08:00
/*
* The parent->d_lock protects agaist child from unlinking
* from d_subdirs. When releasing the parent->d_lock we can
* no longer trust that the next pointer is valid.
* Restart the loop. We'll skip this one with the
* simple_positive() check.
debugfs: Fix corrupted loop in debugfs_remove_recursive [ I'm currently running my tests on it now, and so far, after a few hours it has yet to blow up. I'll run it for 24 hours which it never succeeded in the past. ] The tracing code has a way to make directories within the debugfs file system as well as deleting them using mkdir/rmdir in the instance directory. This is very limited in functionality, such as there is no renames, and the parent directory "instance" can not be modified. The tracing code creates the instance directory from the debugfs code and then replaces the dentry->d_inode->i_op with its own to allow for mkdir/rmdir to work. When these are called, the d_entry and inode locks need to be released to call the instance creation and deletion code. That code has its own accounting and locking to serialize everything to prevent multiple users from causing harm. As the parent "instance" directory can not be modified this simplifies things. I created a stress test that creates several threads that randomly creates and deletes directories thousands of times a second. The code stood up to this test and I submitted it a while ago. Recently I added a new test that adds readers to the mix. While the instance directories were being added and deleted, readers would read from these directories and even enable tracing within them. This test was able to trigger a bug: general protection fault: 0000 [#1] PREEMPT SMP Modules linked in: ... CPU: 3 PID: 17789 Comm: rmdir Tainted: G W 3.15.0-rc2-test+ #41 Hardware name: To Be Filled By O.E.M. To Be Filled By O.E.M./To be filled by O.E.M., BIOS SDBLI944.86P 05/08/2007 task: ffff88003786ca60 ti: ffff880077018000 task.ti: ffff880077018000 RIP: 0010:[<ffffffff811ed5eb>] [<ffffffff811ed5eb>] debugfs_remove_recursive+0x1bd/0x367 RSP: 0018:ffff880077019df8 EFLAGS: 00010246 RAX: 0000000000000002 RBX: ffff88006f0fe490 RCX: 0000000000000000 RDX: dead000000100058 RSI: 0000000000000246 RDI: ffff88003786d454 RBP: ffff88006f0fe640 R08: 0000000000000628 R09: 0000000000000000 R10: 0000000000000628 R11: ffff8800795110a0 R12: ffff88006f0fe640 R13: ffff88006f0fe640 R14: ffffffff81817d0b R15: ffffffff818188b7 FS: 00007ff13ae24700(0000) GS:ffff88007d580000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b CR2: 0000003054ec7be0 CR3: 0000000076d51000 CR4: 00000000000007e0 Stack: ffff88007a41ebe0 dead000000100058 00000000fffffffe ffff88006f0fe640 0000000000000000 ffff88006f0fe678 ffff88007a41ebe0 ffff88003793a000 00000000fffffffe ffffffff810bde82 ffff88006f0fe640 ffff88007a41eb28 Call Trace: [<ffffffff810bde82>] ? instance_rmdir+0x15b/0x1de [<ffffffff81132e2d>] ? vfs_rmdir+0x80/0xd3 [<ffffffff81132f51>] ? do_rmdir+0xd1/0x139 [<ffffffff8124ad9e>] ? trace_hardirqs_on_thunk+0x3a/0x3c [<ffffffff814fea62>] ? system_call_fastpath+0x16/0x1b Code: fe ff ff 48 8d 75 30 48 89 df e8 c9 fd ff ff 85 c0 75 13 48 c7 c6 b8 cc d2 81 48 c7 c7 b0 cc d2 81 e8 8c 7a f5 ff 48 8b 54 24 08 <48> 8b 82 a8 00 00 00 48 89 d3 48 2d a8 00 00 00 48 89 44 24 08 RIP [<ffffffff811ed5eb>] debugfs_remove_recursive+0x1bd/0x367 RSP <ffff880077019df8> It took a while, but every time it triggered, it was always in the same place: list_for_each_entry_safe(child, next, &parent->d_subdirs, d_u.d_child) { Where the child->d_u.d_child seemed to be corrupted. I added lots of trace_printk()s to see what was wrong, and sure enough, it was always the child's d_u.d_child field. I looked around to see what touches it and noticed that in __dentry_kill() which calls dentry_free(): static void dentry_free(struct dentry *dentry) { /* if dentry was never visible to RCU, immediate free is OK */ if (!(dentry->d_flags & DCACHE_RCUACCESS)) __d_free(&dentry->d_u.d_rcu); else call_rcu(&dentry->d_u.d_rcu, __d_free); } I also noticed that __dentry_kill() unlinks the child->d_u.child under the parent->d_lock spin_lock. Looking back at the loop in debugfs_remove_recursive() it never takes the parent->d_lock to do the list walk. Adding more tracing, I was able to prove this was the issue: ftrace-t-15385 1.... 246662024us : dentry_kill <ffffffff81138b91>: free ffff88006d573600 rmdir-15409 2.... 246662024us : debugfs_remove_recursive <ffffffff811ec7e5>: child=ffff88006d573600 next=dead000000100058 The dentry_kill freed ffff88006d573600 just as the remove recursive was walking it. In order to fix this, the list walk needs to be modified a bit to take the parent->d_lock. The safe version is no longer necessary, as every time we remove a child, the parent->d_lock must be released and the list walk must start over. Each time a child is removed, even though it may still be on the list, it should be skipped by the first check in the loop: if (!debugfs_positive(child)) continue; Cc: stable@vger.kernel.org Signed-off-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-06-10 02:06:07 +08:00
*/
goto loop;
}
debugfs: Fix corrupted loop in debugfs_remove_recursive [ I'm currently running my tests on it now, and so far, after a few hours it has yet to blow up. I'll run it for 24 hours which it never succeeded in the past. ] The tracing code has a way to make directories within the debugfs file system as well as deleting them using mkdir/rmdir in the instance directory. This is very limited in functionality, such as there is no renames, and the parent directory "instance" can not be modified. The tracing code creates the instance directory from the debugfs code and then replaces the dentry->d_inode->i_op with its own to allow for mkdir/rmdir to work. When these are called, the d_entry and inode locks need to be released to call the instance creation and deletion code. That code has its own accounting and locking to serialize everything to prevent multiple users from causing harm. As the parent "instance" directory can not be modified this simplifies things. I created a stress test that creates several threads that randomly creates and deletes directories thousands of times a second. The code stood up to this test and I submitted it a while ago. Recently I added a new test that adds readers to the mix. While the instance directories were being added and deleted, readers would read from these directories and even enable tracing within them. This test was able to trigger a bug: general protection fault: 0000 [#1] PREEMPT SMP Modules linked in: ... CPU: 3 PID: 17789 Comm: rmdir Tainted: G W 3.15.0-rc2-test+ #41 Hardware name: To Be Filled By O.E.M. To Be Filled By O.E.M./To be filled by O.E.M., BIOS SDBLI944.86P 05/08/2007 task: ffff88003786ca60 ti: ffff880077018000 task.ti: ffff880077018000 RIP: 0010:[<ffffffff811ed5eb>] [<ffffffff811ed5eb>] debugfs_remove_recursive+0x1bd/0x367 RSP: 0018:ffff880077019df8 EFLAGS: 00010246 RAX: 0000000000000002 RBX: ffff88006f0fe490 RCX: 0000000000000000 RDX: dead000000100058 RSI: 0000000000000246 RDI: ffff88003786d454 RBP: ffff88006f0fe640 R08: 0000000000000628 R09: 0000000000000000 R10: 0000000000000628 R11: ffff8800795110a0 R12: ffff88006f0fe640 R13: ffff88006f0fe640 R14: ffffffff81817d0b R15: ffffffff818188b7 FS: 00007ff13ae24700(0000) GS:ffff88007d580000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b CR2: 0000003054ec7be0 CR3: 0000000076d51000 CR4: 00000000000007e0 Stack: ffff88007a41ebe0 dead000000100058 00000000fffffffe ffff88006f0fe640 0000000000000000 ffff88006f0fe678 ffff88007a41ebe0 ffff88003793a000 00000000fffffffe ffffffff810bde82 ffff88006f0fe640 ffff88007a41eb28 Call Trace: [<ffffffff810bde82>] ? instance_rmdir+0x15b/0x1de [<ffffffff81132e2d>] ? vfs_rmdir+0x80/0xd3 [<ffffffff81132f51>] ? do_rmdir+0xd1/0x139 [<ffffffff8124ad9e>] ? trace_hardirqs_on_thunk+0x3a/0x3c [<ffffffff814fea62>] ? system_call_fastpath+0x16/0x1b Code: fe ff ff 48 8d 75 30 48 89 df e8 c9 fd ff ff 85 c0 75 13 48 c7 c6 b8 cc d2 81 48 c7 c7 b0 cc d2 81 e8 8c 7a f5 ff 48 8b 54 24 08 <48> 8b 82 a8 00 00 00 48 89 d3 48 2d a8 00 00 00 48 89 44 24 08 RIP [<ffffffff811ed5eb>] debugfs_remove_recursive+0x1bd/0x367 RSP <ffff880077019df8> It took a while, but every time it triggered, it was always in the same place: list_for_each_entry_safe(child, next, &parent->d_subdirs, d_u.d_child) { Where the child->d_u.d_child seemed to be corrupted. I added lots of trace_printk()s to see what was wrong, and sure enough, it was always the child's d_u.d_child field. I looked around to see what touches it and noticed that in __dentry_kill() which calls dentry_free(): static void dentry_free(struct dentry *dentry) { /* if dentry was never visible to RCU, immediate free is OK */ if (!(dentry->d_flags & DCACHE_RCUACCESS)) __d_free(&dentry->d_u.d_rcu); else call_rcu(&dentry->d_u.d_rcu, __d_free); } I also noticed that __dentry_kill() unlinks the child->d_u.child under the parent->d_lock spin_lock. Looking back at the loop in debugfs_remove_recursive() it never takes the parent->d_lock to do the list walk. Adding more tracing, I was able to prove this was the issue: ftrace-t-15385 1.... 246662024us : dentry_kill <ffffffff81138b91>: free ffff88006d573600 rmdir-15409 2.... 246662024us : debugfs_remove_recursive <ffffffff811ec7e5>: child=ffff88006d573600 next=dead000000100058 The dentry_kill freed ffff88006d573600 just as the remove recursive was walking it. In order to fix this, the list walk needs to be modified a bit to take the parent->d_lock. The safe version is no longer necessary, as every time we remove a child, the parent->d_lock must be released and the list walk must start over. Each time a child is removed, even though it may still be on the list, it should be skipped by the first check in the loop: if (!debugfs_positive(child)) continue; Cc: stable@vger.kernel.org Signed-off-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-06-10 02:06:07 +08:00
spin_unlock(&parent->d_lock);
inode_unlock(d_inode(parent));
debugfs: debugfs_remove_recursive() must not rely on list_empty(d_subdirs) debugfs_remove_recursive() is wrong, 1. it wrongly assumes that !list_empty(d_subdirs) means that this dir should be removed. This is not that bad by itself, but: 2. if d_subdirs does not becomes empty after __debugfs_remove() it gives up and silently fails, it doesn't even try to remove other entries. However ->d_subdirs can be non-empty because it still has the already deleted !debugfs_positive() entries. 3. simple_release_fs() is called even if __debugfs_remove() fails. Suppose we have dir1/ dir2/ file2 file1 and someone opens dir1/dir2/file2. Now, debugfs_remove_recursive(dir1/dir2) succeeds, and dir1/dir2 goes away. But debugfs_remove_recursive(dir1) silently fails and doesn't remove this directory. Because it tries to delete (the already deleted) dir1/dir2/file2 again and then fails due to "Avoid infinite loop" logic. Test-case: #!/bin/sh cd /sys/kernel/debug/tracing echo 'p:probe/sigprocmask sigprocmask' >> kprobe_events sleep 1000 < events/probe/sigprocmask/id & echo -n >| kprobe_events [ -d events/probe ] && echo "ERR!! failed to rm probe" And after that it is not possible to create another probe entry. With this patch debugfs_remove_recursive() skips !debugfs_positive() files although this is not strictly needed. The most important change is that it does not try to make ->d_subdirs empty, it simply scans the whole list(s) recursively and removes as much as possible. Link: http://lkml.kernel.org/r/20130726151256.GC19472@redhat.com Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-07-26 23:12:56 +08:00
child = parent;
parent = parent->d_parent;
inode_lock(d_inode(parent));
debugfs: debugfs_remove_recursive() must not rely on list_empty(d_subdirs) debugfs_remove_recursive() is wrong, 1. it wrongly assumes that !list_empty(d_subdirs) means that this dir should be removed. This is not that bad by itself, but: 2. if d_subdirs does not becomes empty after __debugfs_remove() it gives up and silently fails, it doesn't even try to remove other entries. However ->d_subdirs can be non-empty because it still has the already deleted !debugfs_positive() entries. 3. simple_release_fs() is called even if __debugfs_remove() fails. Suppose we have dir1/ dir2/ file2 file1 and someone opens dir1/dir2/file2. Now, debugfs_remove_recursive(dir1/dir2) succeeds, and dir1/dir2 goes away. But debugfs_remove_recursive(dir1) silently fails and doesn't remove this directory. Because it tries to delete (the already deleted) dir1/dir2/file2 again and then fails due to "Avoid infinite loop" logic. Test-case: #!/bin/sh cd /sys/kernel/debug/tracing echo 'p:probe/sigprocmask sigprocmask' >> kprobe_events sleep 1000 < events/probe/sigprocmask/id & echo -n >| kprobe_events [ -d events/probe ] && echo "ERR!! failed to rm probe" And after that it is not possible to create another probe entry. With this patch debugfs_remove_recursive() skips !debugfs_positive() files although this is not strictly needed. The most important change is that it does not try to make ->d_subdirs empty, it simply scans the whole list(s) recursively and removes as much as possible. Link: http://lkml.kernel.org/r/20130726151256.GC19472@redhat.com Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-07-26 23:12:56 +08:00
debugfs: Fix corrupted loop in debugfs_remove_recursive [ I'm currently running my tests on it now, and so far, after a few hours it has yet to blow up. I'll run it for 24 hours which it never succeeded in the past. ] The tracing code has a way to make directories within the debugfs file system as well as deleting them using mkdir/rmdir in the instance directory. This is very limited in functionality, such as there is no renames, and the parent directory "instance" can not be modified. The tracing code creates the instance directory from the debugfs code and then replaces the dentry->d_inode->i_op with its own to allow for mkdir/rmdir to work. When these are called, the d_entry and inode locks need to be released to call the instance creation and deletion code. That code has its own accounting and locking to serialize everything to prevent multiple users from causing harm. As the parent "instance" directory can not be modified this simplifies things. I created a stress test that creates several threads that randomly creates and deletes directories thousands of times a second. The code stood up to this test and I submitted it a while ago. Recently I added a new test that adds readers to the mix. While the instance directories were being added and deleted, readers would read from these directories and even enable tracing within them. This test was able to trigger a bug: general protection fault: 0000 [#1] PREEMPT SMP Modules linked in: ... CPU: 3 PID: 17789 Comm: rmdir Tainted: G W 3.15.0-rc2-test+ #41 Hardware name: To Be Filled By O.E.M. To Be Filled By O.E.M./To be filled by O.E.M., BIOS SDBLI944.86P 05/08/2007 task: ffff88003786ca60 ti: ffff880077018000 task.ti: ffff880077018000 RIP: 0010:[<ffffffff811ed5eb>] [<ffffffff811ed5eb>] debugfs_remove_recursive+0x1bd/0x367 RSP: 0018:ffff880077019df8 EFLAGS: 00010246 RAX: 0000000000000002 RBX: ffff88006f0fe490 RCX: 0000000000000000 RDX: dead000000100058 RSI: 0000000000000246 RDI: ffff88003786d454 RBP: ffff88006f0fe640 R08: 0000000000000628 R09: 0000000000000000 R10: 0000000000000628 R11: ffff8800795110a0 R12: ffff88006f0fe640 R13: ffff88006f0fe640 R14: ffffffff81817d0b R15: ffffffff818188b7 FS: 00007ff13ae24700(0000) GS:ffff88007d580000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b CR2: 0000003054ec7be0 CR3: 0000000076d51000 CR4: 00000000000007e0 Stack: ffff88007a41ebe0 dead000000100058 00000000fffffffe ffff88006f0fe640 0000000000000000 ffff88006f0fe678 ffff88007a41ebe0 ffff88003793a000 00000000fffffffe ffffffff810bde82 ffff88006f0fe640 ffff88007a41eb28 Call Trace: [<ffffffff810bde82>] ? instance_rmdir+0x15b/0x1de [<ffffffff81132e2d>] ? vfs_rmdir+0x80/0xd3 [<ffffffff81132f51>] ? do_rmdir+0xd1/0x139 [<ffffffff8124ad9e>] ? trace_hardirqs_on_thunk+0x3a/0x3c [<ffffffff814fea62>] ? system_call_fastpath+0x16/0x1b Code: fe ff ff 48 8d 75 30 48 89 df e8 c9 fd ff ff 85 c0 75 13 48 c7 c6 b8 cc d2 81 48 c7 c7 b0 cc d2 81 e8 8c 7a f5 ff 48 8b 54 24 08 <48> 8b 82 a8 00 00 00 48 89 d3 48 2d a8 00 00 00 48 89 44 24 08 RIP [<ffffffff811ed5eb>] debugfs_remove_recursive+0x1bd/0x367 RSP <ffff880077019df8> It took a while, but every time it triggered, it was always in the same place: list_for_each_entry_safe(child, next, &parent->d_subdirs, d_u.d_child) { Where the child->d_u.d_child seemed to be corrupted. I added lots of trace_printk()s to see what was wrong, and sure enough, it was always the child's d_u.d_child field. I looked around to see what touches it and noticed that in __dentry_kill() which calls dentry_free(): static void dentry_free(struct dentry *dentry) { /* if dentry was never visible to RCU, immediate free is OK */ if (!(dentry->d_flags & DCACHE_RCUACCESS)) __d_free(&dentry->d_u.d_rcu); else call_rcu(&dentry->d_u.d_rcu, __d_free); } I also noticed that __dentry_kill() unlinks the child->d_u.child under the parent->d_lock spin_lock. Looking back at the loop in debugfs_remove_recursive() it never takes the parent->d_lock to do the list walk. Adding more tracing, I was able to prove this was the issue: ftrace-t-15385 1.... 246662024us : dentry_kill <ffffffff81138b91>: free ffff88006d573600 rmdir-15409 2.... 246662024us : debugfs_remove_recursive <ffffffff811ec7e5>: child=ffff88006d573600 next=dead000000100058 The dentry_kill freed ffff88006d573600 just as the remove recursive was walking it. In order to fix this, the list walk needs to be modified a bit to take the parent->d_lock. The safe version is no longer necessary, as every time we remove a child, the parent->d_lock must be released and the list walk must start over. Each time a child is removed, even though it may still be on the list, it should be skipped by the first check in the loop: if (!debugfs_positive(child)) continue; Cc: stable@vger.kernel.org Signed-off-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-06-10 02:06:07 +08:00
if (child != dentry)
/* go up */
goto loop;
debugfs: debugfs_remove_recursive() must not rely on list_empty(d_subdirs) debugfs_remove_recursive() is wrong, 1. it wrongly assumes that !list_empty(d_subdirs) means that this dir should be removed. This is not that bad by itself, but: 2. if d_subdirs does not becomes empty after __debugfs_remove() it gives up and silently fails, it doesn't even try to remove other entries. However ->d_subdirs can be non-empty because it still has the already deleted !debugfs_positive() entries. 3. simple_release_fs() is called even if __debugfs_remove() fails. Suppose we have dir1/ dir2/ file2 file1 and someone opens dir1/dir2/file2. Now, debugfs_remove_recursive(dir1/dir2) succeeds, and dir1/dir2 goes away. But debugfs_remove_recursive(dir1) silently fails and doesn't remove this directory. Because it tries to delete (the already deleted) dir1/dir2/file2 again and then fails due to "Avoid infinite loop" logic. Test-case: #!/bin/sh cd /sys/kernel/debug/tracing echo 'p:probe/sigprocmask sigprocmask' >> kprobe_events sleep 1000 < events/probe/sigprocmask/id & echo -n >| kprobe_events [ -d events/probe ] && echo "ERR!! failed to rm probe" And after that it is not possible to create another probe entry. With this patch debugfs_remove_recursive() skips !debugfs_positive() files although this is not strictly needed. The most important change is that it does not try to make ->d_subdirs empty, it simply scans the whole list(s) recursively and removes as much as possible. Link: http://lkml.kernel.org/r/20130726151256.GC19472@redhat.com Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-07-26 23:12:56 +08:00
if (!__debugfs_remove(child, parent))
simple_release_fs(&debugfs_mount, &debugfs_mount_count);
inode_unlock(d_inode(parent));
}
EXPORT_SYMBOL_GPL(debugfs_remove_recursive);
/**
* debugfs_rename - rename a file/directory in the debugfs filesystem
* @old_dir: a pointer to the parent dentry for the renamed object. This
* should be a directory dentry.
* @old_dentry: dentry of an object to be renamed.
* @new_dir: a pointer to the parent dentry where the object should be
* moved. This should be a directory dentry.
* @new_name: a pointer to a string containing the target name.
*
* This function renames a file/directory in debugfs. The target must not
* exist for rename to succeed.
*
* This function will return a pointer to old_dentry (which is updated to
* reflect renaming) if it succeeds. If an error occurs, %NULL will be
* returned.
*
* If debugfs is not enabled in the kernel, the value -%ENODEV will be
* returned.
*/
struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
struct dentry *new_dir, const char *new_name)
{
int error;
struct dentry *dentry = NULL, *trap;
struct name_snapshot old_name;
if (IS_ERR(old_dir))
return old_dir;
if (IS_ERR(new_dir))
return new_dir;
if (IS_ERR_OR_NULL(old_dentry))
return old_dentry;
trap = lock_rename(new_dir, old_dir);
/* Source or destination directories don't exist? */
if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir))
goto exit;
/* Source does not exist, cyclic rename, or mountpoint? */
if (d_really_is_negative(old_dentry) || old_dentry == trap ||
d_mountpoint(old_dentry))
goto exit;
dentry = lookup_one_len(new_name, new_dir, strlen(new_name));
/* Lookup failed, cyclic rename or target exists? */
if (IS_ERR(dentry) || dentry == trap || d_really_is_positive(dentry))
goto exit;
take_dentry_name_snapshot(&old_name, old_dentry);
error = simple_rename(d_inode(old_dir), old_dentry, d_inode(new_dir),
dentry, 0);
if (error) {
release_dentry_name_snapshot(&old_name);
goto exit;
}
d_move(old_dentry, dentry);
fsnotify_move(d_inode(old_dir), d_inode(new_dir), &old_name.name,
VFS: (Scripted) Convert S_ISLNK/DIR/REG(dentry->d_inode) to d_is_*(dentry) Convert the following where appropriate: (1) S_ISLNK(dentry->d_inode) to d_is_symlink(dentry). (2) S_ISREG(dentry->d_inode) to d_is_reg(dentry). (3) S_ISDIR(dentry->d_inode) to d_is_dir(dentry). This is actually more complicated than it appears as some calls should be converted to d_can_lookup() instead. The difference is whether the directory in question is a real dir with a ->lookup op or whether it's a fake dir with a ->d_automount op. In some circumstances, we can subsume checks for dentry->d_inode not being NULL into this, provided we the code isn't in a filesystem that expects d_inode to be NULL if the dirent really *is* negative (ie. if we're going to use d_inode() rather than d_backing_inode() to get the inode pointer). Note that the dentry type field may be set to something other than DCACHE_MISS_TYPE when d_inode is NULL in the case of unionmount, where the VFS manages the fall-through from a negative dentry to a lower layer. In such a case, the dentry type of the negative union dentry is set to the same as the type of the lower dentry. However, if you know d_inode is not NULL at the call site, then you can use the d_is_xxx() functions even in a filesystem. There is one further complication: a 0,0 chardev dentry may be labelled DCACHE_WHITEOUT_TYPE rather than DCACHE_SPECIAL_TYPE. Strictly, this was intended for special directory entry types that don't have attached inodes. The following perl+coccinelle script was used: use strict; my @callers; open($fd, 'git grep -l \'S_IS[A-Z].*->d_inode\' |') || die "Can't grep for S_ISDIR and co. callers"; @callers = <$fd>; close($fd); unless (@callers) { print "No matches\n"; exit(0); } my @cocci = ( '@@', 'expression E;', '@@', '', '- S_ISLNK(E->d_inode->i_mode)', '+ d_is_symlink(E)', '', '@@', 'expression E;', '@@', '', '- S_ISDIR(E->d_inode->i_mode)', '+ d_is_dir(E)', '', '@@', 'expression E;', '@@', '', '- S_ISREG(E->d_inode->i_mode)', '+ d_is_reg(E)' ); my $coccifile = "tmp.sp.cocci"; open($fd, ">$coccifile") || die $coccifile; print($fd "$_\n") || die $coccifile foreach (@cocci); close($fd); foreach my $file (@callers) { chomp $file; print "Processing ", $file, "\n"; system("spatch", "--sp-file", $coccifile, $file, "--in-place", "--no-show-diff") == 0 || die "spatch failed"; } [AV: overlayfs parts skipped] Signed-off-by: David Howells <dhowells@redhat.com> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
2015-01-29 20:02:35 +08:00
d_is_dir(old_dentry),
NULL, old_dentry);
release_dentry_name_snapshot(&old_name);
unlock_rename(new_dir, old_dir);
dput(dentry);
return old_dentry;
exit:
if (dentry && !IS_ERR(dentry))
dput(dentry);
unlock_rename(new_dir, old_dir);
if (IS_ERR(dentry))
return dentry;
return ERR_PTR(-EINVAL);
}
EXPORT_SYMBOL_GPL(debugfs_rename);
/**
* debugfs_initialized - Tells whether debugfs has been registered
*/
bool debugfs_initialized(void)
{
return debugfs_registered;
}
EXPORT_SYMBOL_GPL(debugfs_initialized);
static int __init debugfs_init(void)
{
int retval;
retval = sysfs_create_mount_point(kernel_kobj, "debug");
if (retval)
return retval;
retval = register_filesystem(&debug_fs_type);
if (retval)
sysfs_remove_mount_point(kernel_kobj, "debug");
else
debugfs_registered = true;
return retval;
}
core_initcall(debugfs_init);