[GFS2] Speed up scanning of glocks
I noticed the gfs2_scand seemed to be taking a lot of CPU, so in order to cut that down a bit, here is a patch. Firstly the type of a glock is a constant during its lifetime, so that its possible to check this without needing locking. I've moved the (common) case of testing for an inode glock outside of the glmutex lock. Also there was a mutex left over from when the glock cache was master of the inode cache. That isn't required any more so I've removed that too. There is probably scope for further speed ups in the future in this area. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
This commit is contained in:
parent
166afccd71
commit
a2242db090
|
@ -150,12 +150,9 @@ static void kill_glock(struct kref *kref)
|
|||
|
||||
int gfs2_glock_put(struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_sbd *sdp = gl->gl_sbd;
|
||||
struct gfs2_gl_hash_bucket *bucket = gl->gl_bucket;
|
||||
int rv = 0;
|
||||
|
||||
mutex_lock(&sdp->sd_invalidate_inodes_mutex);
|
||||
|
||||
write_lock(&bucket->hb_lock);
|
||||
if (kref_put(&gl->gl_ref, kill_glock)) {
|
||||
list_del_init(&gl->gl_list);
|
||||
|
@ -166,8 +163,7 @@ int gfs2_glock_put(struct gfs2_glock *gl)
|
|||
goto out;
|
||||
}
|
||||
write_unlock(&bucket->hb_lock);
|
||||
out:
|
||||
mutex_unlock(&sdp->sd_invalidate_inodes_mutex);
|
||||
out:
|
||||
return rv;
|
||||
}
|
||||
|
||||
|
@ -1964,19 +1960,18 @@ static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
|
|||
|
||||
static void scan_glock(struct gfs2_glock *gl)
|
||||
{
|
||||
if (gl->gl_ops == &gfs2_inode_glops)
|
||||
goto out;
|
||||
|
||||
if (gfs2_glmutex_trylock(gl)) {
|
||||
if (gl->gl_ops == &gfs2_inode_glops)
|
||||
goto out;
|
||||
if (queue_empty(gl, &gl->gl_holders) &&
|
||||
gl->gl_state != LM_ST_UNLOCKED &&
|
||||
demote_ok(gl))
|
||||
goto out_schedule;
|
||||
out:
|
||||
gfs2_glmutex_unlock(gl);
|
||||
}
|
||||
|
||||
out:
|
||||
gfs2_glock_put(gl);
|
||||
|
||||
return;
|
||||
|
||||
out_schedule:
|
||||
|
@ -2070,16 +2065,7 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
|
|||
t = jiffies;
|
||||
}
|
||||
|
||||
/* invalidate_inodes() requires that the sb inodes list
|
||||
not change, but an async completion callback for an
|
||||
unlock can occur which does glock_put() which
|
||||
can call iput() which will change the sb inodes list.
|
||||
invalidate_inodes_mutex prevents glock_put()'s during
|
||||
an invalidate_inodes() */
|
||||
|
||||
mutex_lock(&sdp->sd_invalidate_inodes_mutex);
|
||||
invalidate_inodes(sdp->sd_vfs);
|
||||
mutex_unlock(&sdp->sd_invalidate_inodes_mutex);
|
||||
msleep(10);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -507,7 +507,6 @@ struct gfs2_sbd {
|
|||
struct gfs2_holder sd_live_gh;
|
||||
struct gfs2_glock *sd_rename_gl;
|
||||
struct gfs2_glock *sd_trans_gl;
|
||||
struct mutex sd_invalidate_inodes_mutex;
|
||||
|
||||
/* Inode Stuff */
|
||||
|
||||
|
|
|
@ -63,7 +63,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
|
|||
INIT_LIST_HEAD(&sdp->sd_reclaim_list);
|
||||
spin_lock_init(&sdp->sd_reclaim_lock);
|
||||
init_waitqueue_head(&sdp->sd_reclaim_wq);
|
||||
mutex_init(&sdp->sd_invalidate_inodes_mutex);
|
||||
|
||||
mutex_init(&sdp->sd_inum_mutex);
|
||||
spin_lock_init(&sdp->sd_statfs_spin);
|
||||
|
|
Loading…
Reference in New Issue