GFS2: Extend umount wait coverage to full glock lifetime
Although all glocks are, by the time of the umount glock wait, scheduled for demotion, some of them haven't made it far enough through the process for the original set of waiting code to wait for them. This extends the ref count to the whole glock lifetime in order to ensure that the waiting does catch all glocks. It does make it a bit more invasive, but it seems the only sensible solution at the moment. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
This commit is contained in:
parent
e402746a94
commit
8f05228ee7
|
@ -769,6 +769,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
|
|||
if (!gl)
|
||||
return -ENOMEM;
|
||||
|
||||
atomic_inc(&sdp->sd_glock_disposal);
|
||||
gl->gl_flags = 0;
|
||||
gl->gl_name = name;
|
||||
atomic_set(&gl->gl_ref, 1);
|
||||
|
@ -1538,6 +1539,9 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
|
|||
up_write(&gfs2_umount_flush_sem);
|
||||
msleep(10);
|
||||
}
|
||||
flush_workqueue(glock_workqueue);
|
||||
wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
|
||||
gfs2_dump_lockstate(sdp);
|
||||
}
|
||||
|
||||
void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
|
||||
|
|
|
@ -123,7 +123,7 @@ struct lm_lockops {
|
|||
int (*lm_mount) (struct gfs2_sbd *sdp, const char *fsname);
|
||||
void (*lm_unmount) (struct gfs2_sbd *sdp);
|
||||
void (*lm_withdraw) (struct gfs2_sbd *sdp);
|
||||
void (*lm_put_lock) (struct kmem_cache *cachep, void *gl);
|
||||
void (*lm_put_lock) (struct kmem_cache *cachep, struct gfs2_glock *gl);
|
||||
unsigned int (*lm_lock) (struct gfs2_glock *gl,
|
||||
unsigned int req_state, unsigned int flags);
|
||||
void (*lm_cancel) (struct gfs2_glock *gl);
|
||||
|
|
|
@ -167,15 +167,16 @@ static unsigned int gdlm_lock(struct gfs2_glock *gl,
|
|||
return LM_OUT_ASYNC;
|
||||
}
|
||||
|
||||
static void gdlm_put_lock(struct kmem_cache *cachep, void *ptr)
|
||||
static void gdlm_put_lock(struct kmem_cache *cachep, struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_glock *gl = ptr;
|
||||
struct gfs2_sbd *sdp = gl->gl_sbd;
|
||||
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
|
||||
int error;
|
||||
|
||||
if (gl->gl_lksb.sb_lkid == 0) {
|
||||
kmem_cache_free(cachep, gl);
|
||||
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
|
||||
wake_up(&sdp->sd_glock_wait);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -187,7 +188,6 @@ static void gdlm_put_lock(struct kmem_cache *cachep, void *ptr)
|
|||
(unsigned long long)gl->gl_name.ln_number, error);
|
||||
return;
|
||||
}
|
||||
atomic_inc(&sdp->sd_glock_disposal);
|
||||
}
|
||||
|
||||
static void gdlm_cancel(struct gfs2_glock *gl)
|
||||
|
|
|
@ -985,9 +985,17 @@ static const match_table_t nolock_tokens = {
|
|||
{ Opt_err, NULL },
|
||||
};
|
||||
|
||||
static void nolock_put_lock(struct kmem_cache *cachep, struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_sbd *sdp = gl->gl_sbd;
|
||||
kmem_cache_free(cachep, gl);
|
||||
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
|
||||
wake_up(&sdp->sd_glock_wait);
|
||||
}
|
||||
|
||||
static const struct lm_lockops nolock_ops = {
|
||||
.lm_proto_name = "lock_nolock",
|
||||
.lm_put_lock = kmem_cache_free,
|
||||
.lm_put_lock = nolock_put_lock,
|
||||
.lm_tokens = &nolock_tokens,
|
||||
};
|
||||
|
||||
|
|
|
@ -861,8 +861,6 @@ restart:
|
|||
gfs2_jindex_free(sdp);
|
||||
/* Take apart glock structures and buffer lists */
|
||||
gfs2_gl_hash_clear(sdp);
|
||||
/* Wait for dlm to reply to all our unlock requests */
|
||||
wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
|
||||
/* Unmount the locking protocol */
|
||||
gfs2_lm_unmount(sdp);
|
||||
|
||||
|
|
Loading…
Reference in New Issue