gfs2: Get rid of GLF_PENDING_DELETE flag

Get rid of the GLF_PENDING_DELETE glock flag introduced by commit
a0e3cc65fa ("gfs2: Turn gl_delete into a delayed work").  The only use
of that flag is to prevent the iopen glock from being demoted (i.e.,
unlocked) while delete work is pending.  It turns out that demoting the
iopen glock while delete work is pending is perfectly fine; we only need
to make sure that the glock isn't being freed while still in use.  This
is ensured by the previous patch because delete_work_func() owns a
reference while the work is queued or running.

With these changes, gfs2_queue_delete_work() no longer takes the glock
spin lock, so we can use it in iopen_go_callback() instead of
open-coding it there.

Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
This commit is contained in:
Andreas Gruenbacher 2022-12-09 18:28:20 +01:00
parent 228804a35c
commit 3056dc4655
4 changed files with 5 additions and 34 deletions

View File

@ -984,10 +984,6 @@ static void delete_work_func(struct work_struct *work)
struct inode *inode;
u64 no_addr = gl->gl_name.ln_number;
spin_lock(&gl->gl_lockref.lock);
clear_bit(GLF_PENDING_DELETE, &gl->gl_flags);
spin_unlock(&gl->gl_lockref.lock);
if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
/*
* If we can evict the inode, give the remote node trying to
@ -2064,28 +2060,14 @@ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay)
{
bool queued;
spin_lock(&gl->gl_lockref.lock);
queued = queue_delayed_work(gfs2_delete_workqueue,
&gl->gl_delete, delay);
if (queued)
set_bit(GLF_PENDING_DELETE, &gl->gl_flags);
spin_unlock(&gl->gl_lockref.lock);
return queued;
return queue_delayed_work(gfs2_delete_workqueue,
&gl->gl_delete, delay);
}
void gfs2_cancel_delete_work(struct gfs2_glock *gl)
{
if (cancel_delayed_work(&gl->gl_delete)) {
clear_bit(GLF_PENDING_DELETE, &gl->gl_flags);
if (cancel_delayed_work(&gl->gl_delete))
gfs2_glock_put(gl);
}
}
bool gfs2_delete_work_queued(const struct gfs2_glock *gl)
{
return test_bit(GLF_PENDING_DELETE, &gl->gl_flags);
}
static void flush_delete_work(struct gfs2_glock *gl)
@ -2307,8 +2289,6 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
*p++ = 'o';
if (test_bit(GLF_BLOCKING, gflags))
*p++ = 'b';
if (test_bit(GLF_PENDING_DELETE, gflags))
*p++ = 'P';
if (test_bit(GLF_FREEING, gflags))
*p++ = 'x';
if (test_bit(GLF_INSTANTIATE_NEEDED, gflags))

View File

@ -270,7 +270,6 @@ extern void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state);
extern void gfs2_glock_complete(struct gfs2_glock *gl, int ret);
extern bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay);
extern void gfs2_cancel_delete_work(struct gfs2_glock *gl);
extern bool gfs2_delete_work_queued(const struct gfs2_glock *gl);
extern void gfs2_flush_delete_work(struct gfs2_sbd *sdp);
extern void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
extern void gfs2_gl_dq_holders(struct gfs2_sbd *sdp);

View File

@ -651,17 +651,11 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
if (gl->gl_demote_state == LM_ST_UNLOCKED &&
gl->gl_state == LM_ST_SHARED && ip) {
gl->gl_lockref.count++;
if (!queue_delayed_work(gfs2_delete_workqueue,
&gl->gl_delete, 0))
if (!gfs2_queue_delete_work(gl, 0))
gl->gl_lockref.count--;
}
}
static int iopen_go_demote_ok(const struct gfs2_glock *gl)
{
return !gfs2_delete_work_queued(gl);
}
/**
* inode_go_free - wake up anyone waiting for dlm's unlock ast to free it
* @gl: glock being freed
@ -767,7 +761,6 @@ const struct gfs2_glock_operations gfs2_iopen_glops = {
.go_type = LM_TYPE_IOPEN,
.go_callback = iopen_go_callback,
.go_dump = inode_go_dump,
.go_demote_ok = iopen_go_demote_ok,
.go_flags = GLOF_LRU | GLOF_NONDISK,
.go_subclass = 1,
};

View File

@ -329,8 +329,7 @@ enum {
GLF_LRU = 13,
GLF_OBJECT = 14, /* Used only for tracing */
GLF_BLOCKING = 15,
GLF_PENDING_DELETE = 17,
GLF_FREEING = 18, /* Wait for glock to be freed */
GLF_FREEING = 16, /* Wait for glock to be freed */
};
struct gfs2_glock {