fsnotify: use a mutex instead of a spinlock to protect a groups mark list
Replaces the groups mark_lock spinlock with a mutex. Using a mutex instead of a spinlock results in more flexibility (i.e it allows to sleep while the lock is held). Signed-off-by: Lino Sanfilippo <LinoSanfilippo@gmx.de> Signed-off-by: Eric Paris <eparis@redhat.com>
This commit is contained in:
parent
6dfbd14994
commit
986ab09807
|
@ -95,7 +95,7 @@ struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops)
|
||||||
init_waitqueue_head(&group->notification_waitq);
|
init_waitqueue_head(&group->notification_waitq);
|
||||||
group->max_events = UINT_MAX;
|
group->max_events = UINT_MAX;
|
||||||
|
|
||||||
spin_lock_init(&group->mark_lock);
|
mutex_init(&group->mark_mutex);
|
||||||
INIT_LIST_HEAD(&group->marks_list);
|
INIT_LIST_HEAD(&group->marks_list);
|
||||||
|
|
||||||
group->ops = ops;
|
group->ops = ops;
|
||||||
|
|
|
@ -63,8 +63,8 @@ void fsnotify_destroy_inode_mark(struct fsnotify_mark *mark)
|
||||||
{
|
{
|
||||||
struct inode *inode = mark->i.inode;
|
struct inode *inode = mark->i.inode;
|
||||||
|
|
||||||
|
BUG_ON(!mutex_is_locked(&mark->group->mark_mutex));
|
||||||
assert_spin_locked(&mark->lock);
|
assert_spin_locked(&mark->lock);
|
||||||
assert_spin_locked(&mark->group->mark_lock);
|
|
||||||
|
|
||||||
spin_lock(&inode->i_lock);
|
spin_lock(&inode->i_lock);
|
||||||
|
|
||||||
|
@ -191,8 +191,8 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
|
||||||
|
|
||||||
mark->flags |= FSNOTIFY_MARK_FLAG_INODE;
|
mark->flags |= FSNOTIFY_MARK_FLAG_INODE;
|
||||||
|
|
||||||
|
BUG_ON(!mutex_is_locked(&group->mark_mutex));
|
||||||
assert_spin_locked(&mark->lock);
|
assert_spin_locked(&mark->lock);
|
||||||
assert_spin_locked(&group->mark_lock);
|
|
||||||
|
|
||||||
spin_lock(&inode->i_lock);
|
spin_lock(&inode->i_lock);
|
||||||
|
|
||||||
|
|
|
@ -136,13 +136,13 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)
|
||||||
group = mark->group;
|
group = mark->group;
|
||||||
spin_unlock(&mark->lock);
|
spin_unlock(&mark->lock);
|
||||||
|
|
||||||
spin_lock(&group->mark_lock);
|
mutex_lock(&group->mark_mutex);
|
||||||
spin_lock(&mark->lock);
|
spin_lock(&mark->lock);
|
||||||
|
|
||||||
/* something else already called this function on this mark */
|
/* something else already called this function on this mark */
|
||||||
if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) {
|
if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) {
|
||||||
spin_unlock(&mark->lock);
|
spin_unlock(&mark->lock);
|
||||||
spin_unlock(&group->mark_lock);
|
mutex_unlock(&group->mark_mutex);
|
||||||
goto put_group;
|
goto put_group;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -159,7 +159,7 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)
|
||||||
list_del_init(&mark->g_list);
|
list_del_init(&mark->g_list);
|
||||||
|
|
||||||
spin_unlock(&mark->lock);
|
spin_unlock(&mark->lock);
|
||||||
spin_unlock(&group->mark_lock);
|
mutex_unlock(&group->mark_mutex);
|
||||||
|
|
||||||
spin_lock(&destroy_lock);
|
spin_lock(&destroy_lock);
|
||||||
list_add(&mark->destroy_list, &destroy_list);
|
list_add(&mark->destroy_list, &destroy_list);
|
||||||
|
@ -232,11 +232,11 @@ int fsnotify_add_mark(struct fsnotify_mark *mark,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* LOCKING ORDER!!!!
|
* LOCKING ORDER!!!!
|
||||||
* group->mark_lock
|
* group->mark_mutex
|
||||||
* mark->lock
|
* mark->lock
|
||||||
* inode->i_lock
|
* inode->i_lock
|
||||||
*/
|
*/
|
||||||
spin_lock(&group->mark_lock);
|
mutex_lock(&group->mark_mutex);
|
||||||
|
|
||||||
spin_lock(&mark->lock);
|
spin_lock(&mark->lock);
|
||||||
mark->flags |= FSNOTIFY_MARK_FLAG_ALIVE;
|
mark->flags |= FSNOTIFY_MARK_FLAG_ALIVE;
|
||||||
|
@ -263,7 +263,7 @@ int fsnotify_add_mark(struct fsnotify_mark *mark,
|
||||||
fsnotify_set_mark_mask_locked(mark, mark->mask);
|
fsnotify_set_mark_mask_locked(mark, mark->mask);
|
||||||
spin_unlock(&mark->lock);
|
spin_unlock(&mark->lock);
|
||||||
|
|
||||||
spin_unlock(&group->mark_lock);
|
mutex_unlock(&group->mark_mutex);
|
||||||
|
|
||||||
if (inode)
|
if (inode)
|
||||||
__fsnotify_update_child_dentry_flags(inode);
|
__fsnotify_update_child_dentry_flags(inode);
|
||||||
|
@ -277,7 +277,7 @@ err:
|
||||||
atomic_dec(&group->num_marks);
|
atomic_dec(&group->num_marks);
|
||||||
|
|
||||||
spin_unlock(&mark->lock);
|
spin_unlock(&mark->lock);
|
||||||
spin_unlock(&group->mark_lock);
|
mutex_unlock(&group->mark_mutex);
|
||||||
|
|
||||||
spin_lock(&destroy_lock);
|
spin_lock(&destroy_lock);
|
||||||
list_add(&mark->destroy_list, &destroy_list);
|
list_add(&mark->destroy_list, &destroy_list);
|
||||||
|
@ -296,7 +296,7 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
|
||||||
struct fsnotify_mark *lmark, *mark;
|
struct fsnotify_mark *lmark, *mark;
|
||||||
LIST_HEAD(free_list);
|
LIST_HEAD(free_list);
|
||||||
|
|
||||||
spin_lock(&group->mark_lock);
|
mutex_lock(&group->mark_mutex);
|
||||||
list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
|
list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
|
||||||
if (mark->flags & flags) {
|
if (mark->flags & flags) {
|
||||||
list_add(&mark->free_g_list, &free_list);
|
list_add(&mark->free_g_list, &free_list);
|
||||||
|
@ -304,7 +304,7 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
|
||||||
fsnotify_get_mark(mark);
|
fsnotify_get_mark(mark);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
spin_unlock(&group->mark_lock);
|
mutex_unlock(&group->mark_mutex);
|
||||||
|
|
||||||
list_for_each_entry_safe(mark, lmark, &free_list, free_g_list) {
|
list_for_each_entry_safe(mark, lmark, &free_list, free_g_list) {
|
||||||
fsnotify_destroy_mark(mark);
|
fsnotify_destroy_mark(mark);
|
||||||
|
|
|
@ -88,8 +88,8 @@ void fsnotify_destroy_vfsmount_mark(struct fsnotify_mark *mark)
|
||||||
{
|
{
|
||||||
struct vfsmount *mnt = mark->m.mnt;
|
struct vfsmount *mnt = mark->m.mnt;
|
||||||
|
|
||||||
|
BUG_ON(!mutex_is_locked(&mark->group->mark_mutex));
|
||||||
assert_spin_locked(&mark->lock);
|
assert_spin_locked(&mark->lock);
|
||||||
assert_spin_locked(&mark->group->mark_lock);
|
|
||||||
|
|
||||||
spin_lock(&mnt->mnt_root->d_lock);
|
spin_lock(&mnt->mnt_root->d_lock);
|
||||||
|
|
||||||
|
@ -151,8 +151,8 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
|
||||||
|
|
||||||
mark->flags |= FSNOTIFY_MARK_FLAG_VFSMOUNT;
|
mark->flags |= FSNOTIFY_MARK_FLAG_VFSMOUNT;
|
||||||
|
|
||||||
|
BUG_ON(!mutex_is_locked(&group->mark_mutex));
|
||||||
assert_spin_locked(&mark->lock);
|
assert_spin_locked(&mark->lock);
|
||||||
assert_spin_locked(&group->mark_lock);
|
|
||||||
|
|
||||||
spin_lock(&mnt->mnt_root->d_lock);
|
spin_lock(&mnt->mnt_root->d_lock);
|
||||||
|
|
||||||
|
|
|
@ -141,7 +141,7 @@ struct fsnotify_group {
|
||||||
unsigned int priority;
|
unsigned int priority;
|
||||||
|
|
||||||
/* stores all fastpath marks assoc with this group so they can be cleaned on unregister */
|
/* stores all fastpath marks assoc with this group so they can be cleaned on unregister */
|
||||||
spinlock_t mark_lock; /* protect marks_list */
|
struct mutex mark_mutex; /* protect marks_list */
|
||||||
atomic_t num_marks; /* 1 for each mark and 1 for not being
|
atomic_t num_marks; /* 1 for each mark and 1 for not being
|
||||||
* past the point of no return when freeing
|
* past the point of no return when freeing
|
||||||
* a group */
|
* a group */
|
||||||
|
|
Loading…
Reference in New Issue