fs: rename inode_lock to inode_hash_lock
All that remains of the inode_lock is protecting the inode hash list manipulation and traversals. Rename the inode_lock to inode_hash_lock to reflect it's actual function. Signed-off-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
parent
a66979abad
commit
67a23c4946
115
fs/inode.c
115
fs/inode.c
|
@ -39,10 +39,10 @@
|
||||||
* sb->s_inodes, inode->i_sb_list
|
* sb->s_inodes, inode->i_sb_list
|
||||||
* inode_wb_list_lock protects:
|
* inode_wb_list_lock protects:
|
||||||
* bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list
|
* bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list
|
||||||
|
* inode_hash_lock protects:
|
||||||
|
* inode_hashtable, inode->i_hash
|
||||||
*
|
*
|
||||||
* Lock ordering:
|
* Lock ordering:
|
||||||
* inode_lock
|
|
||||||
* inode->i_lock
|
|
||||||
*
|
*
|
||||||
* inode_sb_list_lock
|
* inode_sb_list_lock
|
||||||
* inode->i_lock
|
* inode->i_lock
|
||||||
|
@ -50,6 +50,13 @@
|
||||||
*
|
*
|
||||||
* inode_wb_list_lock
|
* inode_wb_list_lock
|
||||||
* inode->i_lock
|
* inode->i_lock
|
||||||
|
*
|
||||||
|
* inode_hash_lock
|
||||||
|
* inode_sb_list_lock
|
||||||
|
* inode->i_lock
|
||||||
|
*
|
||||||
|
* iunique_lock
|
||||||
|
* inode_hash_lock
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -85,6 +92,8 @@
|
||||||
|
|
||||||
static unsigned int i_hash_mask __read_mostly;
|
static unsigned int i_hash_mask __read_mostly;
|
||||||
static unsigned int i_hash_shift __read_mostly;
|
static unsigned int i_hash_shift __read_mostly;
|
||||||
|
static struct hlist_head *inode_hashtable __read_mostly;
|
||||||
|
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Each inode can be on two separate lists. One is
|
* Each inode can be on two separate lists. One is
|
||||||
|
@ -100,15 +109,6 @@ static unsigned int i_hash_shift __read_mostly;
|
||||||
|
|
||||||
static LIST_HEAD(inode_lru);
|
static LIST_HEAD(inode_lru);
|
||||||
static DEFINE_SPINLOCK(inode_lru_lock);
|
static DEFINE_SPINLOCK(inode_lru_lock);
|
||||||
static struct hlist_head *inode_hashtable __read_mostly;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* A simple spinlock to protect the list manipulations.
|
|
||||||
*
|
|
||||||
* NOTE! You also have to own the lock if you change
|
|
||||||
* the i_state of an inode while it is in use..
|
|
||||||
*/
|
|
||||||
DEFINE_SPINLOCK(inode_lock);
|
|
||||||
|
|
||||||
__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);
|
__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);
|
||||||
__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock);
|
__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock);
|
||||||
|
@ -433,11 +433,11 @@ void __insert_inode_hash(struct inode *inode, unsigned long hashval)
|
||||||
{
|
{
|
||||||
struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
|
struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
|
||||||
|
|
||||||
spin_lock(&inode_lock);
|
spin_lock(&inode_hash_lock);
|
||||||
spin_lock(&inode->i_lock);
|
spin_lock(&inode->i_lock);
|
||||||
hlist_add_head(&inode->i_hash, b);
|
hlist_add_head(&inode->i_hash, b);
|
||||||
spin_unlock(&inode->i_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
spin_unlock(&inode_lock);
|
spin_unlock(&inode_hash_lock);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__insert_inode_hash);
|
EXPORT_SYMBOL(__insert_inode_hash);
|
||||||
|
|
||||||
|
@ -449,11 +449,11 @@ EXPORT_SYMBOL(__insert_inode_hash);
|
||||||
*/
|
*/
|
||||||
void remove_inode_hash(struct inode *inode)
|
void remove_inode_hash(struct inode *inode)
|
||||||
{
|
{
|
||||||
spin_lock(&inode_lock);
|
spin_lock(&inode_hash_lock);
|
||||||
spin_lock(&inode->i_lock);
|
spin_lock(&inode->i_lock);
|
||||||
hlist_del_init(&inode->i_hash);
|
hlist_del_init(&inode->i_hash);
|
||||||
spin_unlock(&inode->i_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
spin_unlock(&inode_lock);
|
spin_unlock(&inode_hash_lock);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(remove_inode_hash);
|
EXPORT_SYMBOL(remove_inode_hash);
|
||||||
|
|
||||||
|
@ -778,11 +778,15 @@ static struct inode *find_inode(struct super_block *sb,
|
||||||
|
|
||||||
repeat:
|
repeat:
|
||||||
hlist_for_each_entry(inode, node, head, i_hash) {
|
hlist_for_each_entry(inode, node, head, i_hash) {
|
||||||
if (inode->i_sb != sb)
|
|
||||||
continue;
|
|
||||||
if (!test(inode, data))
|
|
||||||
continue;
|
|
||||||
spin_lock(&inode->i_lock);
|
spin_lock(&inode->i_lock);
|
||||||
|
if (inode->i_sb != sb) {
|
||||||
|
spin_unlock(&inode->i_lock);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (!test(inode, data)) {
|
||||||
|
spin_unlock(&inode->i_lock);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
|
if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
|
||||||
__wait_on_freeing_inode(inode);
|
__wait_on_freeing_inode(inode);
|
||||||
goto repeat;
|
goto repeat;
|
||||||
|
@ -806,11 +810,15 @@ static struct inode *find_inode_fast(struct super_block *sb,
|
||||||
|
|
||||||
repeat:
|
repeat:
|
||||||
hlist_for_each_entry(inode, node, head, i_hash) {
|
hlist_for_each_entry(inode, node, head, i_hash) {
|
||||||
if (inode->i_ino != ino)
|
|
||||||
continue;
|
|
||||||
if (inode->i_sb != sb)
|
|
||||||
continue;
|
|
||||||
spin_lock(&inode->i_lock);
|
spin_lock(&inode->i_lock);
|
||||||
|
if (inode->i_ino != ino) {
|
||||||
|
spin_unlock(&inode->i_lock);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (inode->i_sb != sb) {
|
||||||
|
spin_unlock(&inode->i_lock);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
|
if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
|
||||||
__wait_on_freeing_inode(inode);
|
__wait_on_freeing_inode(inode);
|
||||||
goto repeat;
|
goto repeat;
|
||||||
|
@ -924,7 +932,7 @@ void unlock_new_inode(struct inode *inode)
|
||||||
EXPORT_SYMBOL(unlock_new_inode);
|
EXPORT_SYMBOL(unlock_new_inode);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is called without the inode lock held.. Be careful.
|
* This is called without the inode hash lock held.. Be careful.
|
||||||
*
|
*
|
||||||
* We no longer cache the sb_flags in i_flags - see fs.h
|
* We no longer cache the sb_flags in i_flags - see fs.h
|
||||||
* -- rmk@arm.uk.linux.org
|
* -- rmk@arm.uk.linux.org
|
||||||
|
@ -941,7 +949,7 @@ static struct inode *get_new_inode(struct super_block *sb,
|
||||||
if (inode) {
|
if (inode) {
|
||||||
struct inode *old;
|
struct inode *old;
|
||||||
|
|
||||||
spin_lock(&inode_lock);
|
spin_lock(&inode_hash_lock);
|
||||||
/* We released the lock, so.. */
|
/* We released the lock, so.. */
|
||||||
old = find_inode(sb, head, test, data);
|
old = find_inode(sb, head, test, data);
|
||||||
if (!old) {
|
if (!old) {
|
||||||
|
@ -953,7 +961,7 @@ static struct inode *get_new_inode(struct super_block *sb,
|
||||||
hlist_add_head(&inode->i_hash, head);
|
hlist_add_head(&inode->i_hash, head);
|
||||||
spin_unlock(&inode->i_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
inode_sb_list_add(inode);
|
inode_sb_list_add(inode);
|
||||||
spin_unlock(&inode_lock);
|
spin_unlock(&inode_hash_lock);
|
||||||
|
|
||||||
/* Return the locked inode with I_NEW set, the
|
/* Return the locked inode with I_NEW set, the
|
||||||
* caller is responsible for filling in the contents
|
* caller is responsible for filling in the contents
|
||||||
|
@ -966,7 +974,7 @@ static struct inode *get_new_inode(struct super_block *sb,
|
||||||
* us. Use the old inode instead of the one we just
|
* us. Use the old inode instead of the one we just
|
||||||
* allocated.
|
* allocated.
|
||||||
*/
|
*/
|
||||||
spin_unlock(&inode_lock);
|
spin_unlock(&inode_hash_lock);
|
||||||
destroy_inode(inode);
|
destroy_inode(inode);
|
||||||
inode = old;
|
inode = old;
|
||||||
wait_on_inode(inode);
|
wait_on_inode(inode);
|
||||||
|
@ -974,7 +982,7 @@ static struct inode *get_new_inode(struct super_block *sb,
|
||||||
return inode;
|
return inode;
|
||||||
|
|
||||||
set_failed:
|
set_failed:
|
||||||
spin_unlock(&inode_lock);
|
spin_unlock(&inode_hash_lock);
|
||||||
destroy_inode(inode);
|
destroy_inode(inode);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -992,7 +1000,7 @@ static struct inode *get_new_inode_fast(struct super_block *sb,
|
||||||
if (inode) {
|
if (inode) {
|
||||||
struct inode *old;
|
struct inode *old;
|
||||||
|
|
||||||
spin_lock(&inode_lock);
|
spin_lock(&inode_hash_lock);
|
||||||
/* We released the lock, so.. */
|
/* We released the lock, so.. */
|
||||||
old = find_inode_fast(sb, head, ino);
|
old = find_inode_fast(sb, head, ino);
|
||||||
if (!old) {
|
if (!old) {
|
||||||
|
@ -1002,7 +1010,7 @@ static struct inode *get_new_inode_fast(struct super_block *sb,
|
||||||
hlist_add_head(&inode->i_hash, head);
|
hlist_add_head(&inode->i_hash, head);
|
||||||
spin_unlock(&inode->i_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
inode_sb_list_add(inode);
|
inode_sb_list_add(inode);
|
||||||
spin_unlock(&inode_lock);
|
spin_unlock(&inode_hash_lock);
|
||||||
|
|
||||||
/* Return the locked inode with I_NEW set, the
|
/* Return the locked inode with I_NEW set, the
|
||||||
* caller is responsible for filling in the contents
|
* caller is responsible for filling in the contents
|
||||||
|
@ -1015,7 +1023,7 @@ static struct inode *get_new_inode_fast(struct super_block *sb,
|
||||||
* us. Use the old inode instead of the one we just
|
* us. Use the old inode instead of the one we just
|
||||||
* allocated.
|
* allocated.
|
||||||
*/
|
*/
|
||||||
spin_unlock(&inode_lock);
|
spin_unlock(&inode_hash_lock);
|
||||||
destroy_inode(inode);
|
destroy_inode(inode);
|
||||||
inode = old;
|
inode = old;
|
||||||
wait_on_inode(inode);
|
wait_on_inode(inode);
|
||||||
|
@ -1036,10 +1044,14 @@ static int test_inode_iunique(struct super_block *sb, unsigned long ino)
|
||||||
struct hlist_node *node;
|
struct hlist_node *node;
|
||||||
struct inode *inode;
|
struct inode *inode;
|
||||||
|
|
||||||
|
spin_lock(&inode_hash_lock);
|
||||||
hlist_for_each_entry(inode, node, b, i_hash) {
|
hlist_for_each_entry(inode, node, b, i_hash) {
|
||||||
if (inode->i_ino == ino && inode->i_sb == sb)
|
if (inode->i_ino == ino && inode->i_sb == sb) {
|
||||||
|
spin_unlock(&inode_hash_lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
spin_unlock(&inode_hash_lock);
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -1069,7 +1081,6 @@ ino_t iunique(struct super_block *sb, ino_t max_reserved)
|
||||||
static unsigned int counter;
|
static unsigned int counter;
|
||||||
ino_t res;
|
ino_t res;
|
||||||
|
|
||||||
spin_lock(&inode_lock);
|
|
||||||
spin_lock(&iunique_lock);
|
spin_lock(&iunique_lock);
|
||||||
do {
|
do {
|
||||||
if (counter <= max_reserved)
|
if (counter <= max_reserved)
|
||||||
|
@ -1077,7 +1088,6 @@ ino_t iunique(struct super_block *sb, ino_t max_reserved)
|
||||||
res = counter++;
|
res = counter++;
|
||||||
} while (!test_inode_iunique(sb, res));
|
} while (!test_inode_iunique(sb, res));
|
||||||
spin_unlock(&iunique_lock);
|
spin_unlock(&iunique_lock);
|
||||||
spin_unlock(&inode_lock);
|
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
@ -1119,7 +1129,7 @@ EXPORT_SYMBOL(igrab);
|
||||||
*
|
*
|
||||||
* Otherwise NULL is returned.
|
* Otherwise NULL is returned.
|
||||||
*
|
*
|
||||||
* Note, @test is called with the inode_lock held, so can't sleep.
|
* Note, @test is called with the inode_hash_lock held, so can't sleep.
|
||||||
*/
|
*/
|
||||||
static struct inode *ifind(struct super_block *sb,
|
static struct inode *ifind(struct super_block *sb,
|
||||||
struct hlist_head *head, int (*test)(struct inode *, void *),
|
struct hlist_head *head, int (*test)(struct inode *, void *),
|
||||||
|
@ -1127,15 +1137,15 @@ static struct inode *ifind(struct super_block *sb,
|
||||||
{
|
{
|
||||||
struct inode *inode;
|
struct inode *inode;
|
||||||
|
|
||||||
spin_lock(&inode_lock);
|
spin_lock(&inode_hash_lock);
|
||||||
inode = find_inode(sb, head, test, data);
|
inode = find_inode(sb, head, test, data);
|
||||||
if (inode) {
|
if (inode) {
|
||||||
spin_unlock(&inode_lock);
|
spin_unlock(&inode_hash_lock);
|
||||||
if (likely(wait))
|
if (likely(wait))
|
||||||
wait_on_inode(inode);
|
wait_on_inode(inode);
|
||||||
return inode;
|
return inode;
|
||||||
}
|
}
|
||||||
spin_unlock(&inode_lock);
|
spin_unlock(&inode_hash_lock);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1159,14 +1169,14 @@ static struct inode *ifind_fast(struct super_block *sb,
|
||||||
{
|
{
|
||||||
struct inode *inode;
|
struct inode *inode;
|
||||||
|
|
||||||
spin_lock(&inode_lock);
|
spin_lock(&inode_hash_lock);
|
||||||
inode = find_inode_fast(sb, head, ino);
|
inode = find_inode_fast(sb, head, ino);
|
||||||
if (inode) {
|
if (inode) {
|
||||||
spin_unlock(&inode_lock);
|
spin_unlock(&inode_hash_lock);
|
||||||
wait_on_inode(inode);
|
wait_on_inode(inode);
|
||||||
return inode;
|
return inode;
|
||||||
}
|
}
|
||||||
spin_unlock(&inode_lock);
|
spin_unlock(&inode_hash_lock);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1189,7 +1199,7 @@ static struct inode *ifind_fast(struct super_block *sb,
|
||||||
*
|
*
|
||||||
* Otherwise NULL is returned.
|
* Otherwise NULL is returned.
|
||||||
*
|
*
|
||||||
* Note, @test is called with the inode_lock held, so can't sleep.
|
* Note, @test is called with the inode_hash_lock held, so can't sleep.
|
||||||
*/
|
*/
|
||||||
struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
|
struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
|
||||||
int (*test)(struct inode *, void *), void *data)
|
int (*test)(struct inode *, void *), void *data)
|
||||||
|
@ -1217,7 +1227,7 @@ EXPORT_SYMBOL(ilookup5_nowait);
|
||||||
*
|
*
|
||||||
* Otherwise NULL is returned.
|
* Otherwise NULL is returned.
|
||||||
*
|
*
|
||||||
* Note, @test is called with the inode_lock held, so can't sleep.
|
* Note, @test is called with the inode_hash_lock held, so can't sleep.
|
||||||
*/
|
*/
|
||||||
struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
|
struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
|
||||||
int (*test)(struct inode *, void *), void *data)
|
int (*test)(struct inode *, void *), void *data)
|
||||||
|
@ -1268,7 +1278,8 @@ EXPORT_SYMBOL(ilookup);
|
||||||
* inode and this is returned locked, hashed, and with the I_NEW flag set. The
|
* inode and this is returned locked, hashed, and with the I_NEW flag set. The
|
||||||
* file system gets to fill it in before unlocking it via unlock_new_inode().
|
* file system gets to fill it in before unlocking it via unlock_new_inode().
|
||||||
*
|
*
|
||||||
* Note both @test and @set are called with the inode_lock held, so can't sleep.
|
* Note both @test and @set are called with the inode_hash_lock held, so can't
|
||||||
|
* sleep.
|
||||||
*/
|
*/
|
||||||
struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
|
struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
|
||||||
int (*test)(struct inode *, void *),
|
int (*test)(struct inode *, void *),
|
||||||
|
@ -1328,7 +1339,7 @@ int insert_inode_locked(struct inode *inode)
|
||||||
while (1) {
|
while (1) {
|
||||||
struct hlist_node *node;
|
struct hlist_node *node;
|
||||||
struct inode *old = NULL;
|
struct inode *old = NULL;
|
||||||
spin_lock(&inode_lock);
|
spin_lock(&inode_hash_lock);
|
||||||
hlist_for_each_entry(old, node, head, i_hash) {
|
hlist_for_each_entry(old, node, head, i_hash) {
|
||||||
if (old->i_ino != ino)
|
if (old->i_ino != ino)
|
||||||
continue;
|
continue;
|
||||||
|
@ -1346,12 +1357,12 @@ int insert_inode_locked(struct inode *inode)
|
||||||
inode->i_state |= I_NEW;
|
inode->i_state |= I_NEW;
|
||||||
hlist_add_head(&inode->i_hash, head);
|
hlist_add_head(&inode->i_hash, head);
|
||||||
spin_unlock(&inode->i_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
spin_unlock(&inode_lock);
|
spin_unlock(&inode_hash_lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
__iget(old);
|
__iget(old);
|
||||||
spin_unlock(&old->i_lock);
|
spin_unlock(&old->i_lock);
|
||||||
spin_unlock(&inode_lock);
|
spin_unlock(&inode_hash_lock);
|
||||||
wait_on_inode(old);
|
wait_on_inode(old);
|
||||||
if (unlikely(!inode_unhashed(old))) {
|
if (unlikely(!inode_unhashed(old))) {
|
||||||
iput(old);
|
iput(old);
|
||||||
|
@ -1372,7 +1383,7 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
|
||||||
struct hlist_node *node;
|
struct hlist_node *node;
|
||||||
struct inode *old = NULL;
|
struct inode *old = NULL;
|
||||||
|
|
||||||
spin_lock(&inode_lock);
|
spin_lock(&inode_hash_lock);
|
||||||
hlist_for_each_entry(old, node, head, i_hash) {
|
hlist_for_each_entry(old, node, head, i_hash) {
|
||||||
if (old->i_sb != sb)
|
if (old->i_sb != sb)
|
||||||
continue;
|
continue;
|
||||||
|
@ -1390,12 +1401,12 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
|
||||||
inode->i_state |= I_NEW;
|
inode->i_state |= I_NEW;
|
||||||
hlist_add_head(&inode->i_hash, head);
|
hlist_add_head(&inode->i_hash, head);
|
||||||
spin_unlock(&inode->i_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
spin_unlock(&inode_lock);
|
spin_unlock(&inode_hash_lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
__iget(old);
|
__iget(old);
|
||||||
spin_unlock(&old->i_lock);
|
spin_unlock(&old->i_lock);
|
||||||
spin_unlock(&inode_lock);
|
spin_unlock(&inode_hash_lock);
|
||||||
wait_on_inode(old);
|
wait_on_inode(old);
|
||||||
if (unlikely(!inode_unhashed(old))) {
|
if (unlikely(!inode_unhashed(old))) {
|
||||||
iput(old);
|
iput(old);
|
||||||
|
@ -1674,10 +1685,10 @@ static void __wait_on_freeing_inode(struct inode *inode)
|
||||||
wq = bit_waitqueue(&inode->i_state, __I_NEW);
|
wq = bit_waitqueue(&inode->i_state, __I_NEW);
|
||||||
prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
|
prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
|
||||||
spin_unlock(&inode->i_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
spin_unlock(&inode_lock);
|
spin_unlock(&inode_hash_lock);
|
||||||
schedule();
|
schedule();
|
||||||
finish_wait(wq, &wait.wait);
|
finish_wait(wq, &wait.wait);
|
||||||
spin_lock(&inode_lock);
|
spin_lock(&inode_hash_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __initdata unsigned long ihash_entries;
|
static __initdata unsigned long ihash_entries;
|
||||||
|
|
|
@ -22,7 +22,6 @@
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/mutex.h>
|
#include <linux/mutex.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/writeback.h> /* for inode_lock */
|
|
||||||
|
|
||||||
#include <asm/atomic.h>
|
#include <asm/atomic.h>
|
||||||
|
|
||||||
|
|
|
@ -91,7 +91,6 @@
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/srcu.h>
|
#include <linux/srcu.h>
|
||||||
#include <linux/writeback.h> /* for inode_lock */
|
|
||||||
|
|
||||||
#include <asm/atomic.h>
|
#include <asm/atomic.h>
|
||||||
|
|
||||||
|
|
|
@ -23,7 +23,6 @@
|
||||||
#include <linux/mount.h>
|
#include <linux/mount.h>
|
||||||
#include <linux/mutex.h>
|
#include <linux/mutex.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/writeback.h> /* for inode_lock */
|
|
||||||
|
|
||||||
#include <asm/atomic.h>
|
#include <asm/atomic.h>
|
||||||
|
|
||||||
|
|
|
@ -54,7 +54,7 @@
|
||||||
*
|
*
|
||||||
* Return 1 if the attributes match and 0 if not.
|
* Return 1 if the attributes match and 0 if not.
|
||||||
*
|
*
|
||||||
* NOTE: This function runs with the inode_lock spin lock held so it is not
|
* NOTE: This function runs with the inode->i_lock spin lock held so it is not
|
||||||
* allowed to sleep.
|
* allowed to sleep.
|
||||||
*/
|
*/
|
||||||
int ntfs_test_inode(struct inode *vi, ntfs_attr *na)
|
int ntfs_test_inode(struct inode *vi, ntfs_attr *na)
|
||||||
|
@ -98,7 +98,7 @@ int ntfs_test_inode(struct inode *vi, ntfs_attr *na)
|
||||||
*
|
*
|
||||||
* Return 0 on success and -errno on error.
|
* Return 0 on success and -errno on error.
|
||||||
*
|
*
|
||||||
* NOTE: This function runs with the inode_lock spin lock held so it is not
|
* NOTE: This function runs with the inode->i_lock spin lock held so it is not
|
||||||
* allowed to sleep. (Hence the GFP_ATOMIC allocation.)
|
* allowed to sleep. (Hence the GFP_ATOMIC allocation.)
|
||||||
*/
|
*/
|
||||||
static int ntfs_init_locked_inode(struct inode *vi, ntfs_attr *na)
|
static int ntfs_init_locked_inode(struct inode *vi, ntfs_attr *na)
|
||||||
|
|
|
@ -9,7 +9,6 @@
|
||||||
|
|
||||||
struct backing_dev_info;
|
struct backing_dev_info;
|
||||||
|
|
||||||
extern spinlock_t inode_lock;
|
|
||||||
extern spinlock_t inode_wb_list_lock;
|
extern spinlock_t inode_wb_list_lock;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in New Issue