2006-06-02 04:10:59 +08:00
|
|
|
/*
|
|
|
|
* fs/inotify_user.c - inotify support for userspace
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* John McCutchan <ttb@tentacle.dhs.org>
|
|
|
|
* Robert Love <rml@novell.com>
|
|
|
|
*
|
|
|
|
* Copyright (C) 2005 John McCutchan
|
|
|
|
* Copyright 2006 Hewlett-Packard Development Company, L.P.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License as published by the
|
|
|
|
* Free Software Foundation; either version 2, or (at your option) any
|
|
|
|
* later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/file.h>
|
|
|
|
#include <linux/mount.h>
|
|
|
|
#include <linux/namei.h>
|
|
|
|
#include <linux/poll.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/inotify.h>
|
|
|
|
#include <linux/syscalls.h>
|
2007-10-17 14:30:13 +08:00
|
|
|
#include <linux/magic.h>
|
2006-06-02 04:10:59 +08:00
|
|
|
|
|
|
|
#include <asm/ioctls.h>
|
|
|
|
|
2006-12-07 12:33:20 +08:00
|
|
|
static struct kmem_cache *watch_cachep __read_mostly;
|
|
|
|
static struct kmem_cache *event_cachep __read_mostly;
|
2006-06-02 04:10:59 +08:00
|
|
|
|
|
|
|
static struct vfsmount *inotify_mnt __read_mostly;
|
|
|
|
|
|
|
|
/* these are configurable via /proc/sys/fs/inotify/ */
|
2008-02-15 11:31:21 +08:00
|
|
|
static int inotify_max_user_instances __read_mostly;
|
|
|
|
static int inotify_max_user_watches __read_mostly;
|
|
|
|
static int inotify_max_queued_events __read_mostly;
|
2006-06-02 04:10:59 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Lock ordering:
|
|
|
|
*
|
|
|
|
* inotify_dev->up_mutex (ensures we don't re-add the same watch)
|
|
|
|
* inode->inotify_mutex (protects inode's watch list)
|
|
|
|
* inotify_handle->mutex (protects inotify_handle's watch list)
|
|
|
|
* inotify_dev->ev_mutex (protects device's event queue)
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Lifetimes of the main data structures:
|
|
|
|
*
|
|
|
|
* inotify_device: Lifetime is managed by reference count, from
|
|
|
|
* sys_inotify_init() until release. Additional references can bump the count
|
|
|
|
* via get_inotify_dev() and drop the count via put_inotify_dev().
|
|
|
|
*
|
|
|
|
* inotify_user_watch: Lifetime is from create_watch() to the receipt of an
|
|
|
|
* IN_IGNORED event from inotify, or when using IN_ONESHOT, to receipt of the
|
|
|
|
* first event, or to inotify_destroy().
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* struct inotify_device - represents an inotify instance
|
|
|
|
*
|
|
|
|
* This structure is protected by the mutex 'mutex'.
|
|
|
|
*/
|
|
|
|
struct inotify_device {
|
|
|
|
wait_queue_head_t wq; /* wait queue for i/o */
|
|
|
|
struct mutex ev_mutex; /* protects event queue */
|
|
|
|
struct mutex up_mutex; /* synchronizes watch updates */
|
|
|
|
struct list_head events; /* list of queued events */
|
|
|
|
atomic_t count; /* reference count */
|
|
|
|
struct user_struct *user; /* user who opened this dev */
|
|
|
|
struct inotify_handle *ih; /* inotify handle */
|
2008-02-06 17:36:19 +08:00
|
|
|
struct fasync_struct *fa; /* async notification */
|
2006-06-02 04:10:59 +08:00
|
|
|
unsigned int queue_size; /* size of the queue (bytes) */
|
|
|
|
unsigned int event_count; /* number of pending events */
|
|
|
|
unsigned int max_events; /* maximum number of events */
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* struct inotify_kernel_event - An inotify event, originating from a watch and
|
|
|
|
* queued for user-space. A list of these is attached to each instance of the
|
|
|
|
* device. In read(), this list is walked and all events that can fit in the
|
|
|
|
* buffer are returned.
|
|
|
|
*
|
|
|
|
* Protected by dev->ev_mutex of the device in which we are queued.
|
|
|
|
*/
|
|
|
|
struct inotify_kernel_event {
|
|
|
|
struct inotify_event event; /* the user-space event */
|
|
|
|
struct list_head list; /* entry in inotify_device's list */
|
|
|
|
char *name; /* filename, if any */
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* struct inotify_user_watch - our version of an inotify_watch, we add
|
|
|
|
* a reference to the associated inotify_device.
|
|
|
|
*/
|
|
|
|
struct inotify_user_watch {
|
|
|
|
struct inotify_device *dev; /* associated device */
|
|
|
|
struct inotify_watch wdata; /* inotify watch data */
|
|
|
|
};
|
|
|
|
|
|
|
|
#ifdef CONFIG_SYSCTL
|
|
|
|
|
|
|
|
#include <linux/sysctl.h>
|
|
|
|
|
|
|
|
static int zero;
|
|
|
|
|
|
|
|
ctl_table inotify_table[] = {
|
|
|
|
{
|
|
|
|
.ctl_name = INOTIFY_MAX_USER_INSTANCES,
|
|
|
|
.procname = "max_user_instances",
|
|
|
|
.data = &inotify_max_user_instances,
|
|
|
|
.maxlen = sizeof(int),
|
|
|
|
.mode = 0644,
|
|
|
|
.proc_handler = &proc_dointvec_minmax,
|
|
|
|
.strategy = &sysctl_intvec,
|
|
|
|
.extra1 = &zero,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.ctl_name = INOTIFY_MAX_USER_WATCHES,
|
|
|
|
.procname = "max_user_watches",
|
|
|
|
.data = &inotify_max_user_watches,
|
|
|
|
.maxlen = sizeof(int),
|
|
|
|
.mode = 0644,
|
|
|
|
.proc_handler = &proc_dointvec_minmax,
|
|
|
|
.strategy = &sysctl_intvec,
|
|
|
|
.extra1 = &zero,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.ctl_name = INOTIFY_MAX_QUEUED_EVENTS,
|
|
|
|
.procname = "max_queued_events",
|
|
|
|
.data = &inotify_max_queued_events,
|
|
|
|
.maxlen = sizeof(int),
|
|
|
|
.mode = 0644,
|
|
|
|
.proc_handler = &proc_dointvec_minmax,
|
|
|
|
.strategy = &sysctl_intvec,
|
|
|
|
.extra1 = &zero
|
|
|
|
},
|
|
|
|
{ .ctl_name = 0 }
|
|
|
|
};
|
|
|
|
#endif /* CONFIG_SYSCTL */
|
|
|
|
|
|
|
|
static inline void get_inotify_dev(struct inotify_device *dev)
|
|
|
|
{
|
|
|
|
atomic_inc(&dev->count);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void put_inotify_dev(struct inotify_device *dev)
|
|
|
|
{
|
|
|
|
if (atomic_dec_and_test(&dev->count)) {
|
|
|
|
atomic_dec(&dev->user->inotify_devs);
|
|
|
|
free_uid(dev->user);
|
|
|
|
kfree(dev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* free_inotify_user_watch - cleans up the watch and its references
|
|
|
|
*/
|
|
|
|
static void free_inotify_user_watch(struct inotify_watch *w)
|
|
|
|
{
|
|
|
|
struct inotify_user_watch *watch;
|
|
|
|
struct inotify_device *dev;
|
|
|
|
|
|
|
|
watch = container_of(w, struct inotify_user_watch, wdata);
|
|
|
|
dev = watch->dev;
|
|
|
|
|
|
|
|
atomic_dec(&dev->user->inotify_watches);
|
|
|
|
put_inotify_dev(dev);
|
|
|
|
kmem_cache_free(watch_cachep, watch);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* kernel_event - create a new kernel event with the given parameters
|
|
|
|
*
|
|
|
|
* This function can sleep.
|
|
|
|
*/
|
|
|
|
static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie,
|
|
|
|
const char *name)
|
|
|
|
{
|
|
|
|
struct inotify_kernel_event *kevent;
|
|
|
|
|
[PATCH] inotify: fix deadlock found by lockdep
This is a real deadlock, a nice complex one:
(warning: long explanation follows so that Andrew can have a complete
patch description)
it's an ABCDA deadlock:
A iprune_mutex
B inode->inotify_mutex
C ih->mutex
D dev->ev_mutex
The AB relationship comes straight from invalidate_inodes()
int invalidate_inodes(struct super_block * sb)
{
int busy;
LIST_HEAD(throw_away);
mutex_lock(&iprune_mutex);
spin_lock(&inode_lock);
inotify_unmount_inodes(&sb->s_inodes);
where inotify_umount_inodes() takes the
mutex_lock(&inode->inotify_mutex);
The BC relationship comes directly from inotify_find_update_watch():
s32 inotify_find_update_watch(struct inotify_handle *ih, struct inode *inode,
u32 mask)
{
...
mutex_lock(&inode->inotify_mutex);
mutex_lock(&ih->mutex);
The CD relationship comes from inotify_rm_wd:
inotify_rm_wd does
mutex_lock(&inode->inotify_mutex);
mutex_lock(&ih->mutex)
and then calls inotify_remove_watch_locked() which calls
notify_dev_queue_event() which does
mutex_lock(&dev->ev_mutex);
(this strictly is a BCD relationship)
The DA relationship comes from the most interesting part:
[<ffffffff8022d9f2>] shrink_icache_memory+0x42/0x270
[<ffffffff80240dc4>] shrink_slab+0x11d/0x1c9
[<ffffffff802b5104>] try_to_free_pages+0x187/0x244
[<ffffffff8020efed>] __alloc_pages+0x1cd/0x2e0
[<ffffffff8025e1f8>] cache_alloc_refill+0x3f8/0x821
[<ffffffff8020a5e5>] kmem_cache_alloc+0x85/0xcb
[<ffffffff802db027>] kernel_event+0x2e/0x122
[<ffffffff8021d61c>] inotify_dev_queue_event+0xcc/0x140
inotify_dev_queue_event schedules a kernel_event which does a
kmem_cache_alloc( , GFP_KERNEL) which may try to shrink slabs, including
the inode cache .. which then takes iprune_mutex.
And voila, there is an AB, a BC, a CD relationship (even a direct BCD),
and also now a DA relationship -> a circular type AB-BA deadlock but
involving 4 locks.
The solution is simple: kernel_event() is NOT allowed to use GFP_KERNEL,
but must use GFP_NOFS to not cause recursion into the VFS.
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Robert Love <rml@novell.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-07-30 18:03:54 +08:00
|
|
|
kevent = kmem_cache_alloc(event_cachep, GFP_NOFS);
|
2006-06-02 04:10:59 +08:00
|
|
|
if (unlikely(!kevent))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* we hand this out to user-space, so zero it just in case */
|
|
|
|
memset(&kevent->event, 0, sizeof(struct inotify_event));
|
|
|
|
|
|
|
|
kevent->event.wd = wd;
|
|
|
|
kevent->event.mask = mask;
|
|
|
|
kevent->event.cookie = cookie;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&kevent->list);
|
|
|
|
|
|
|
|
if (name) {
|
|
|
|
size_t len, rem, event_size = sizeof(struct inotify_event);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We need to pad the filename so as to properly align an
|
|
|
|
* array of inotify_event structures. Because the structure is
|
|
|
|
* small and the common case is a small filename, we just round
|
|
|
|
* up to the next multiple of the structure's sizeof. This is
|
|
|
|
* simple and safe for all architectures.
|
|
|
|
*/
|
|
|
|
len = strlen(name) + 1;
|
|
|
|
rem = event_size - len;
|
|
|
|
if (len > event_size) {
|
|
|
|
rem = event_size - (len % event_size);
|
|
|
|
if (len % event_size == 0)
|
|
|
|
rem = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
kevent->name = kmalloc(len + rem, GFP_KERNEL);
|
|
|
|
if (unlikely(!kevent->name)) {
|
|
|
|
kmem_cache_free(event_cachep, kevent);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
memcpy(kevent->name, name, len);
|
|
|
|
if (rem)
|
|
|
|
memset(kevent->name + len, 0, rem);
|
|
|
|
kevent->event.len = len + rem;
|
|
|
|
} else {
|
|
|
|
kevent->event.len = 0;
|
|
|
|
kevent->name = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return kevent;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* inotify_dev_get_event - return the next event in the given dev's queue
|
|
|
|
*
|
|
|
|
* Caller must hold dev->ev_mutex.
|
|
|
|
*/
|
|
|
|
static inline struct inotify_kernel_event *
|
|
|
|
inotify_dev_get_event(struct inotify_device *dev)
|
|
|
|
{
|
|
|
|
return list_entry(dev->events.next, struct inotify_kernel_event, list);
|
|
|
|
}
|
|
|
|
|
2008-02-06 17:36:09 +08:00
|
|
|
/*
|
|
|
|
* inotify_dev_get_last_event - return the last event in the given dev's queue
|
|
|
|
*
|
|
|
|
* Caller must hold dev->ev_mutex.
|
|
|
|
*/
|
|
|
|
static inline struct inotify_kernel_event *
|
|
|
|
inotify_dev_get_last_event(struct inotify_device *dev)
|
|
|
|
{
|
|
|
|
if (list_empty(&dev->events))
|
|
|
|
return NULL;
|
|
|
|
return list_entry(dev->events.prev, struct inotify_kernel_event, list);
|
|
|
|
}
|
|
|
|
|
2006-06-02 04:10:59 +08:00
|
|
|
/*
|
|
|
|
* inotify_dev_queue_event - event handler registered with core inotify, adds
|
|
|
|
* a new event to the given device
|
|
|
|
*
|
|
|
|
* Can sleep (calls kernel_event()).
|
|
|
|
*/
|
|
|
|
static void inotify_dev_queue_event(struct inotify_watch *w, u32 wd, u32 mask,
|
2006-06-02 04:11:01 +08:00
|
|
|
u32 cookie, const char *name,
|
|
|
|
struct inode *ignored)
|
2006-06-02 04:10:59 +08:00
|
|
|
{
|
|
|
|
struct inotify_user_watch *watch;
|
|
|
|
struct inotify_device *dev;
|
|
|
|
struct inotify_kernel_event *kevent, *last;
|
|
|
|
|
|
|
|
watch = container_of(w, struct inotify_user_watch, wdata);
|
|
|
|
dev = watch->dev;
|
|
|
|
|
|
|
|
mutex_lock(&dev->ev_mutex);
|
|
|
|
|
|
|
|
/* we can safely put the watch as we don't reference it while
|
|
|
|
* generating the event
|
|
|
|
*/
|
2008-02-08 20:18:16 +08:00
|
|
|
if (mask & IN_IGNORED || w->mask & IN_ONESHOT)
|
2006-06-02 04:10:59 +08:00
|
|
|
put_inotify_watch(w); /* final put */
|
|
|
|
|
|
|
|
/* coalescing: drop this event if it is a dupe of the previous */
|
2008-02-06 17:36:09 +08:00
|
|
|
last = inotify_dev_get_last_event(dev);
|
2006-06-02 04:10:59 +08:00
|
|
|
if (last && last->event.mask == mask && last->event.wd == wd &&
|
|
|
|
last->event.cookie == cookie) {
|
|
|
|
const char *lastname = last->name;
|
|
|
|
|
|
|
|
if (!name && !lastname)
|
|
|
|
goto out;
|
|
|
|
if (name && lastname && !strcmp(lastname, name))
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* the queue overflowed and we already sent the Q_OVERFLOW event */
|
|
|
|
if (unlikely(dev->event_count > dev->max_events))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* if the queue overflows, we need to notify user space */
|
|
|
|
if (unlikely(dev->event_count == dev->max_events))
|
|
|
|
kevent = kernel_event(-1, IN_Q_OVERFLOW, cookie, NULL);
|
|
|
|
else
|
|
|
|
kevent = kernel_event(wd, mask, cookie, name);
|
|
|
|
|
|
|
|
if (unlikely(!kevent))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* queue the event and wake up anyone waiting */
|
|
|
|
dev->event_count++;
|
|
|
|
dev->queue_size += sizeof(struct inotify_event) + kevent->event.len;
|
|
|
|
list_add_tail(&kevent->list, &dev->events);
|
|
|
|
wake_up_interruptible(&dev->wq);
|
2008-02-06 17:36:19 +08:00
|
|
|
kill_fasync(&dev->fa, SIGIO, POLL_IN);
|
2006-06-02 04:10:59 +08:00
|
|
|
|
|
|
|
out:
|
|
|
|
mutex_unlock(&dev->ev_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2008-10-03 05:50:12 +08:00
|
|
|
* remove_kevent - cleans up the given kevent
|
2006-06-02 04:10:59 +08:00
|
|
|
*
|
|
|
|
* Caller must hold dev->ev_mutex.
|
|
|
|
*/
|
|
|
|
static void remove_kevent(struct inotify_device *dev,
|
|
|
|
struct inotify_kernel_event *kevent)
|
|
|
|
{
|
|
|
|
list_del(&kevent->list);
|
|
|
|
|
|
|
|
dev->event_count--;
|
|
|
|
dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len;
|
2008-10-03 05:50:12 +08:00
|
|
|
}
|
2006-06-02 04:10:59 +08:00
|
|
|
|
2008-10-03 05:50:12 +08:00
|
|
|
/*
|
|
|
|
* free_kevent - frees the given kevent.
|
|
|
|
*/
|
|
|
|
static void free_kevent(struct inotify_kernel_event *kevent)
|
|
|
|
{
|
2006-06-02 04:10:59 +08:00
|
|
|
kfree(kevent->name);
|
|
|
|
kmem_cache_free(event_cachep, kevent);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* inotify_dev_event_dequeue - destroy an event on the given device
|
|
|
|
*
|
|
|
|
* Caller must hold dev->ev_mutex.
|
|
|
|
*/
|
|
|
|
static void inotify_dev_event_dequeue(struct inotify_device *dev)
|
|
|
|
{
|
|
|
|
if (!list_empty(&dev->events)) {
|
|
|
|
struct inotify_kernel_event *kevent;
|
|
|
|
kevent = inotify_dev_get_event(dev);
|
|
|
|
remove_kevent(dev, kevent);
|
2008-10-03 05:50:12 +08:00
|
|
|
free_kevent(kevent);
|
2006-06-02 04:10:59 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2008-07-22 21:59:21 +08:00
|
|
|
* find_inode - resolve a user-given path to a specific inode
|
2006-06-02 04:10:59 +08:00
|
|
|
*/
|
2008-07-22 21:59:21 +08:00
|
|
|
static int find_inode(const char __user *dirname, struct path *path,
|
2006-06-02 04:10:59 +08:00
|
|
|
unsigned flags)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
2008-07-22 21:59:21 +08:00
|
|
|
error = user_path_at(AT_FDCWD, dirname, flags, path);
|
2006-06-02 04:10:59 +08:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
/* you can only watch an inode if you have read permissions on it */
|
2008-07-22 21:59:21 +08:00
|
|
|
error = inode_permission(path->dentry->d_inode, MAY_READ);
|
2006-06-02 04:10:59 +08:00
|
|
|
if (error)
|
2008-07-22 21:59:21 +08:00
|
|
|
path_put(path);
|
2006-06-02 04:10:59 +08:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* create_watch - creates a watch on the given device.
|
|
|
|
*
|
|
|
|
* Callers must hold dev->up_mutex.
|
|
|
|
*/
|
|
|
|
static int create_watch(struct inotify_device *dev, struct inode *inode,
|
|
|
|
u32 mask)
|
|
|
|
{
|
|
|
|
struct inotify_user_watch *watch;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (atomic_read(&dev->user->inotify_watches) >=
|
|
|
|
inotify_max_user_watches)
|
|
|
|
return -ENOSPC;
|
|
|
|
|
|
|
|
watch = kmem_cache_alloc(watch_cachep, GFP_KERNEL);
|
|
|
|
if (unlikely(!watch))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
/* save a reference to device and bump the count to make it official */
|
|
|
|
get_inotify_dev(dev);
|
|
|
|
watch->dev = dev;
|
|
|
|
|
|
|
|
atomic_inc(&dev->user->inotify_watches);
|
|
|
|
|
2006-06-02 04:11:03 +08:00
|
|
|
inotify_init_watch(&watch->wdata);
|
2006-06-02 04:10:59 +08:00
|
|
|
ret = inotify_add_watch(dev->ih, &watch->wdata, inode, mask);
|
|
|
|
if (ret < 0)
|
|
|
|
free_inotify_user_watch(&watch->wdata);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Device Interface */
|
|
|
|
|
|
|
|
static unsigned int inotify_poll(struct file *file, poll_table *wait)
|
|
|
|
{
|
|
|
|
struct inotify_device *dev = file->private_data;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
poll_wait(file, &dev->wq, wait);
|
|
|
|
mutex_lock(&dev->ev_mutex);
|
|
|
|
if (!list_empty(&dev->events))
|
|
|
|
ret = POLLIN | POLLRDNORM;
|
|
|
|
mutex_unlock(&dev->ev_mutex);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t inotify_read(struct file *file, char __user *buf,
|
|
|
|
size_t count, loff_t *pos)
|
|
|
|
{
|
|
|
|
size_t event_size = sizeof (struct inotify_event);
|
|
|
|
struct inotify_device *dev;
|
|
|
|
char __user *start;
|
|
|
|
int ret;
|
|
|
|
DEFINE_WAIT(wait);
|
|
|
|
|
|
|
|
start = buf;
|
|
|
|
dev = file->private_data;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
|
|
|
|
prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE);
|
|
|
|
|
|
|
|
mutex_lock(&dev->ev_mutex);
|
2008-10-03 05:50:12 +08:00
|
|
|
if (!list_empty(&dev->events)) {
|
2006-06-02 04:10:59 +08:00
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
}
|
2008-10-03 05:50:12 +08:00
|
|
|
mutex_unlock(&dev->ev_mutex);
|
2006-06-02 04:10:59 +08:00
|
|
|
|
|
|
|
if (file->f_flags & O_NONBLOCK) {
|
|
|
|
ret = -EAGAIN;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (signal_pending(current)) {
|
|
|
|
ret = -EINTR;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
schedule();
|
|
|
|
}
|
|
|
|
|
|
|
|
finish_wait(&dev->wq, &wait);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
struct inotify_kernel_event *kevent;
|
|
|
|
|
|
|
|
ret = buf - start;
|
|
|
|
if (list_empty(&dev->events))
|
|
|
|
break;
|
|
|
|
|
|
|
|
kevent = inotify_dev_get_event(dev);
|
2007-02-12 16:51:59 +08:00
|
|
|
if (event_size + kevent->event.len > count) {
|
|
|
|
if (ret == 0 && count > 0) {
|
|
|
|
/*
|
|
|
|
* could not get a single event because we
|
|
|
|
* didn't have enough buffer space.
|
|
|
|
*/
|
|
|
|
ret = -EINVAL;
|
|
|
|
}
|
2006-06-02 04:10:59 +08:00
|
|
|
break;
|
2007-02-12 16:51:59 +08:00
|
|
|
}
|
2008-10-03 05:50:12 +08:00
|
|
|
remove_kevent(dev, kevent);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Must perform the copy_to_user outside the mutex in order
|
|
|
|
* to avoid a lock order reversal with mmap_sem.
|
|
|
|
*/
|
|
|
|
mutex_unlock(&dev->ev_mutex);
|
2006-06-02 04:10:59 +08:00
|
|
|
|
|
|
|
if (copy_to_user(buf, &kevent->event, event_size)) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
buf += event_size;
|
|
|
|
count -= event_size;
|
|
|
|
|
|
|
|
if (kevent->name) {
|
|
|
|
if (copy_to_user(buf, kevent->name, kevent->event.len)){
|
|
|
|
ret = -EFAULT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
buf += kevent->event.len;
|
|
|
|
count -= kevent->event.len;
|
|
|
|
}
|
|
|
|
|
2008-10-03 05:50:12 +08:00
|
|
|
free_kevent(kevent);
|
|
|
|
|
|
|
|
mutex_lock(&dev->ev_mutex);
|
2006-06-02 04:10:59 +08:00
|
|
|
}
|
|
|
|
mutex_unlock(&dev->ev_mutex);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-02-06 17:36:19 +08:00
|
|
|
static int inotify_fasync(int fd, struct file *file, int on)
|
|
|
|
{
|
|
|
|
struct inotify_device *dev = file->private_data;
|
|
|
|
|
|
|
|
return fasync_helper(fd, file, on, &dev->fa) >= 0 ? 0 : -EIO;
|
|
|
|
}
|
|
|
|
|
2006-06-02 04:10:59 +08:00
|
|
|
static int inotify_release(struct inode *ignored, struct file *file)
|
|
|
|
{
|
|
|
|
struct inotify_device *dev = file->private_data;
|
|
|
|
|
|
|
|
inotify_destroy(dev->ih);
|
|
|
|
|
|
|
|
/* destroy all of the events on this device */
|
|
|
|
mutex_lock(&dev->ev_mutex);
|
|
|
|
while (!list_empty(&dev->events))
|
|
|
|
inotify_dev_event_dequeue(dev);
|
|
|
|
mutex_unlock(&dev->ev_mutex);
|
|
|
|
|
|
|
|
/* free this device: the put matching the get in inotify_init() */
|
|
|
|
put_inotify_dev(dev);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static long inotify_ioctl(struct file *file, unsigned int cmd,
|
|
|
|
unsigned long arg)
|
|
|
|
{
|
|
|
|
struct inotify_device *dev;
|
|
|
|
void __user *p;
|
|
|
|
int ret = -ENOTTY;
|
|
|
|
|
|
|
|
dev = file->private_data;
|
|
|
|
p = (void __user *) arg;
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case FIONREAD:
|
|
|
|
ret = put_user(dev->queue_size, (int __user *) p);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations inotify_fops = {
|
|
|
|
.poll = inotify_poll,
|
|
|
|
.read = inotify_read,
|
2008-02-06 17:36:19 +08:00
|
|
|
.fasync = inotify_fasync,
|
2006-06-02 04:10:59 +08:00
|
|
|
.release = inotify_release,
|
|
|
|
.unlocked_ioctl = inotify_ioctl,
|
|
|
|
.compat_ioctl = inotify_ioctl,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct inotify_operations inotify_user_ops = {
|
|
|
|
.handle_event = inotify_dev_queue_event,
|
|
|
|
.destroy_watch = free_inotify_user_watch,
|
|
|
|
};
|
|
|
|
|
2008-07-24 12:29:32 +08:00
|
|
|
asmlinkage long sys_inotify_init1(int flags)
|
2006-06-02 04:10:59 +08:00
|
|
|
{
|
|
|
|
struct inotify_device *dev;
|
|
|
|
struct inotify_handle *ih;
|
|
|
|
struct user_struct *user;
|
|
|
|
struct file *filp;
|
|
|
|
int fd, ret;
|
|
|
|
|
2008-07-24 12:29:42 +08:00
|
|
|
/* Check the IN_* constants for consistency. */
|
|
|
|
BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC);
|
|
|
|
BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK);
|
|
|
|
|
2008-07-24 12:29:41 +08:00
|
|
|
if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
|
2008-07-24 12:29:32 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
fd = get_unused_fd_flags(flags & O_CLOEXEC);
|
2006-06-02 04:10:59 +08:00
|
|
|
if (fd < 0)
|
|
|
|
return fd;
|
|
|
|
|
|
|
|
filp = get_empty_filp();
|
|
|
|
if (!filp) {
|
|
|
|
ret = -ENFILE;
|
|
|
|
goto out_put_fd;
|
|
|
|
}
|
|
|
|
|
2008-11-14 07:39:05 +08:00
|
|
|
user = get_current_user();
|
2006-06-02 04:10:59 +08:00
|
|
|
if (unlikely(atomic_read(&user->inotify_devs) >=
|
|
|
|
inotify_max_user_instances)) {
|
|
|
|
ret = -EMFILE;
|
|
|
|
goto out_free_uid;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev = kmalloc(sizeof(struct inotify_device), GFP_KERNEL);
|
|
|
|
if (unlikely(!dev)) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out_free_uid;
|
|
|
|
}
|
|
|
|
|
|
|
|
ih = inotify_init(&inotify_user_ops);
|
2008-04-29 16:03:09 +08:00
|
|
|
if (IS_ERR(ih)) {
|
2006-06-02 04:10:59 +08:00
|
|
|
ret = PTR_ERR(ih);
|
|
|
|
goto out_free_dev;
|
|
|
|
}
|
|
|
|
dev->ih = ih;
|
2008-02-06 17:36:19 +08:00
|
|
|
dev->fa = NULL;
|
2006-06-02 04:10:59 +08:00
|
|
|
|
|
|
|
filp->f_op = &inotify_fops;
|
2006-12-08 18:36:35 +08:00
|
|
|
filp->f_path.mnt = mntget(inotify_mnt);
|
|
|
|
filp->f_path.dentry = dget(inotify_mnt->mnt_root);
|
|
|
|
filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping;
|
2006-06-02 04:10:59 +08:00
|
|
|
filp->f_mode = FMODE_READ;
|
2008-07-24 12:29:41 +08:00
|
|
|
filp->f_flags = O_RDONLY | (flags & O_NONBLOCK);
|
2006-06-02 04:10:59 +08:00
|
|
|
filp->private_data = dev;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&dev->events);
|
|
|
|
init_waitqueue_head(&dev->wq);
|
|
|
|
mutex_init(&dev->ev_mutex);
|
|
|
|
mutex_init(&dev->up_mutex);
|
|
|
|
dev->event_count = 0;
|
|
|
|
dev->queue_size = 0;
|
|
|
|
dev->max_events = inotify_max_queued_events;
|
|
|
|
dev->user = user;
|
|
|
|
atomic_set(&dev->count, 0);
|
|
|
|
|
|
|
|
get_inotify_dev(dev);
|
|
|
|
atomic_inc(&user->inotify_devs);
|
|
|
|
fd_install(fd, filp);
|
|
|
|
|
|
|
|
return fd;
|
|
|
|
out_free_dev:
|
|
|
|
kfree(dev);
|
|
|
|
out_free_uid:
|
|
|
|
free_uid(user);
|
|
|
|
put_filp(filp);
|
|
|
|
out_put_fd:
|
|
|
|
put_unused_fd(fd);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-07-24 12:29:32 +08:00
|
|
|
asmlinkage long sys_inotify_init(void)
|
|
|
|
{
|
|
|
|
return sys_inotify_init1(0);
|
|
|
|
}
|
|
|
|
|
2008-07-22 21:59:21 +08:00
|
|
|
asmlinkage long sys_inotify_add_watch(int fd, const char __user *pathname, u32 mask)
|
2006-06-02 04:10:59 +08:00
|
|
|
{
|
|
|
|
struct inode *inode;
|
|
|
|
struct inotify_device *dev;
|
2008-07-22 21:59:21 +08:00
|
|
|
struct path path;
|
2006-06-02 04:10:59 +08:00
|
|
|
struct file *filp;
|
|
|
|
int ret, fput_needed;
|
|
|
|
unsigned flags = 0;
|
|
|
|
|
|
|
|
filp = fget_light(fd, &fput_needed);
|
|
|
|
if (unlikely(!filp))
|
|
|
|
return -EBADF;
|
|
|
|
|
|
|
|
/* verify that this is indeed an inotify instance */
|
|
|
|
if (unlikely(filp->f_op != &inotify_fops)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto fput_and_out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(mask & IN_DONT_FOLLOW))
|
|
|
|
flags |= LOOKUP_FOLLOW;
|
|
|
|
if (mask & IN_ONLYDIR)
|
|
|
|
flags |= LOOKUP_DIRECTORY;
|
|
|
|
|
2008-07-22 21:59:21 +08:00
|
|
|
ret = find_inode(pathname, &path, flags);
|
2006-06-02 04:10:59 +08:00
|
|
|
if (unlikely(ret))
|
|
|
|
goto fput_and_out;
|
|
|
|
|
2008-07-22 21:59:21 +08:00
|
|
|
/* inode held in place by reference to path; dev by fget on fd */
|
|
|
|
inode = path.dentry->d_inode;
|
2006-06-02 04:10:59 +08:00
|
|
|
dev = filp->private_data;
|
|
|
|
|
|
|
|
mutex_lock(&dev->up_mutex);
|
|
|
|
ret = inotify_find_update_watch(dev->ih, inode, mask);
|
|
|
|
if (ret == -ENOENT)
|
|
|
|
ret = create_watch(dev, inode, mask);
|
|
|
|
mutex_unlock(&dev->up_mutex);
|
|
|
|
|
2008-07-22 21:59:21 +08:00
|
|
|
path_put(&path);
|
2006-06-02 04:10:59 +08:00
|
|
|
fput_and_out:
|
|
|
|
fput_light(filp, fput_needed);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage long sys_inotify_rm_watch(int fd, u32 wd)
|
|
|
|
{
|
|
|
|
struct file *filp;
|
|
|
|
struct inotify_device *dev;
|
|
|
|
int ret, fput_needed;
|
|
|
|
|
|
|
|
filp = fget_light(fd, &fput_needed);
|
|
|
|
if (unlikely(!filp))
|
|
|
|
return -EBADF;
|
|
|
|
|
|
|
|
/* verify that this is indeed an inotify instance */
|
|
|
|
if (unlikely(filp->f_op != &inotify_fops)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev = filp->private_data;
|
|
|
|
|
|
|
|
/* we free our watch data when we get IN_IGNORED */
|
|
|
|
ret = inotify_rm_wd(dev->ih, wd);
|
|
|
|
|
|
|
|
out:
|
|
|
|
fput_light(filp, fput_needed);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
[PATCH] VFS: Permit filesystem to override root dentry on mount
Extend the get_sb() filesystem operation to take an extra argument that
permits the VFS to pass in the target vfsmount that defines the mountpoint.
The filesystem is then required to manually set the superblock and root dentry
pointers. For most filesystems, this should be done with simple_set_mnt()
which will set the superblock pointer and then set the root dentry to the
superblock's s_root (as per the old default behaviour).
The get_sb() op now returns an integer as there's now no need to return the
superblock pointer.
This patch permits a superblock to be implicitly shared amongst several mount
points, such as can be done with NFS to avoid potential inode aliasing. In
such a case, simple_set_mnt() would not be called, and instead the mnt_root
and mnt_sb would be set directly.
The patch also makes the following changes:
(*) the get_sb_*() convenience functions in the core kernel now take a vfsmount
pointer argument and return an integer, so most filesystems have to change
very little.
(*) If one of the convenience function is not used, then get_sb() should
normally call simple_set_mnt() to instantiate the vfsmount. This will
always return 0, and so can be tail-called from get_sb().
(*) generic_shutdown_super() now calls shrink_dcache_sb() to clean up the
dcache upon superblock destruction rather than shrink_dcache_anon().
This is required because the superblock may now have multiple trees that
aren't actually bound to s_root, but that still need to be cleaned up. The
currently called functions assume that the whole tree is rooted at s_root,
and that anonymous dentries are not the roots of trees which results in
dentries being left unculled.
However, with the way NFS superblock sharing are currently set to be
implemented, these assumptions are violated: the root of the filesystem is
simply a dummy dentry and inode (the real inode for '/' may well be
inaccessible), and all the vfsmounts are rooted on anonymous[*] dentries
with child trees.
[*] Anonymous until discovered from another tree.
(*) The documentation has been adjusted, including the additional bit of
changing ext2_* into foo_* in the documentation.
[akpm@osdl.org: convert ipath_fs, do other stuff]
Signed-off-by: David Howells <dhowells@redhat.com>
Acked-by: Al Viro <viro@zeniv.linux.org.uk>
Cc: Nathan Scott <nathans@sgi.com>
Cc: Roland Dreier <rolandd@cisco.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-23 17:02:57 +08:00
|
|
|
static int
|
2006-06-02 04:10:59 +08:00
|
|
|
inotify_get_sb(struct file_system_type *fs_type, int flags,
|
[PATCH] VFS: Permit filesystem to override root dentry on mount
Extend the get_sb() filesystem operation to take an extra argument that
permits the VFS to pass in the target vfsmount that defines the mountpoint.
The filesystem is then required to manually set the superblock and root dentry
pointers. For most filesystems, this should be done with simple_set_mnt()
which will set the superblock pointer and then set the root dentry to the
superblock's s_root (as per the old default behaviour).
The get_sb() op now returns an integer as there's now no need to return the
superblock pointer.
This patch permits a superblock to be implicitly shared amongst several mount
points, such as can be done with NFS to avoid potential inode aliasing. In
such a case, simple_set_mnt() would not be called, and instead the mnt_root
and mnt_sb would be set directly.
The patch also makes the following changes:
(*) the get_sb_*() convenience functions in the core kernel now take a vfsmount
pointer argument and return an integer, so most filesystems have to change
very little.
(*) If one of the convenience function is not used, then get_sb() should
normally call simple_set_mnt() to instantiate the vfsmount. This will
always return 0, and so can be tail-called from get_sb().
(*) generic_shutdown_super() now calls shrink_dcache_sb() to clean up the
dcache upon superblock destruction rather than shrink_dcache_anon().
This is required because the superblock may now have multiple trees that
aren't actually bound to s_root, but that still need to be cleaned up. The
currently called functions assume that the whole tree is rooted at s_root,
and that anonymous dentries are not the roots of trees which results in
dentries being left unculled.
However, with the way NFS superblock sharing are currently set to be
implemented, these assumptions are violated: the root of the filesystem is
simply a dummy dentry and inode (the real inode for '/' may well be
inaccessible), and all the vfsmounts are rooted on anonymous[*] dentries
with child trees.
[*] Anonymous until discovered from another tree.
(*) The documentation has been adjusted, including the additional bit of
changing ext2_* into foo_* in the documentation.
[akpm@osdl.org: convert ipath_fs, do other stuff]
Signed-off-by: David Howells <dhowells@redhat.com>
Acked-by: Al Viro <viro@zeniv.linux.org.uk>
Cc: Nathan Scott <nathans@sgi.com>
Cc: Roland Dreier <rolandd@cisco.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-23 17:02:57 +08:00
|
|
|
const char *dev_name, void *data, struct vfsmount *mnt)
|
2006-06-02 04:10:59 +08:00
|
|
|
{
|
2007-10-17 14:30:13 +08:00
|
|
|
return get_sb_pseudo(fs_type, "inotify", NULL,
|
|
|
|
INOTIFYFS_SUPER_MAGIC, mnt);
|
2006-06-02 04:10:59 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct file_system_type inotify_fs_type = {
|
|
|
|
.name = "inotifyfs",
|
|
|
|
.get_sb = inotify_get_sb,
|
|
|
|
.kill_sb = kill_anon_super,
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* inotify_user_setup - Our initialization function. Note that we cannnot return
|
|
|
|
* error because we have compiled-in VFS hooks. So an (unlikely) failure here
|
|
|
|
* must result in panic().
|
|
|
|
*/
|
|
|
|
static int __init inotify_user_setup(void)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = register_filesystem(&inotify_fs_type);
|
|
|
|
if (unlikely(ret))
|
|
|
|
panic("inotify: register_filesystem returned %d!\n", ret);
|
|
|
|
|
|
|
|
inotify_mnt = kern_mount(&inotify_fs_type);
|
|
|
|
if (IS_ERR(inotify_mnt))
|
|
|
|
panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt));
|
|
|
|
|
|
|
|
inotify_max_queued_events = 16384;
|
|
|
|
inotify_max_user_instances = 128;
|
|
|
|
inotify_max_user_watches = 8192;
|
|
|
|
|
|
|
|
watch_cachep = kmem_cache_create("inotify_watch_cache",
|
|
|
|
sizeof(struct inotify_user_watch),
|
2007-07-20 09:11:58 +08:00
|
|
|
0, SLAB_PANIC, NULL);
|
2006-06-02 04:10:59 +08:00
|
|
|
event_cachep = kmem_cache_create("inotify_event_cache",
|
|
|
|
sizeof(struct inotify_kernel_event),
|
2007-07-20 09:11:58 +08:00
|
|
|
0, SLAB_PANIC, NULL);
|
2006-06-02 04:10:59 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
module_init(inotify_user_setup);
|