[jffs2] kill wbuf_queued/wbuf_dwork_lock
schedule_delayed_work() happening when the work is already pending is a cheap no-op. Don't bother with ->wbuf_queued logics - it's both broken (cancelling ->wbuf_dwork leaves it set, as spotted by Jeff Harris) and pointless. It's cheaper to let schedule_delayed_work() handle that case. Reported-by: Jeff Harris <jefftharris@gmail.com> Tested-by: Jeff Harris <jefftharris@gmail.com> Cc: stable@vger.kernel.org Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
parent
4e07ad6406
commit
99358a1ca5
|
@ -134,8 +134,6 @@ struct jffs2_sb_info {
|
|||
struct rw_semaphore wbuf_sem; /* Protects the write buffer */
|
||||
|
||||
struct delayed_work wbuf_dwork; /* write-buffer write-out work */
|
||||
int wbuf_queued; /* non-zero delayed work is queued */
|
||||
spinlock_t wbuf_dwork_lock; /* protects wbuf_dwork and and wbuf_queued */
|
||||
|
||||
unsigned char *oobbuf;
|
||||
int oobavail; /* How many bytes are available for JFFS2 in OOB */
|
||||
|
|
|
@ -1162,10 +1162,6 @@ static void delayed_wbuf_sync(struct work_struct *work)
|
|||
struct jffs2_sb_info *c = work_to_sb(work);
|
||||
struct super_block *sb = OFNI_BS_2SFFJ(c);
|
||||
|
||||
spin_lock(&c->wbuf_dwork_lock);
|
||||
c->wbuf_queued = 0;
|
||||
spin_unlock(&c->wbuf_dwork_lock);
|
||||
|
||||
if (!(sb->s_flags & MS_RDONLY)) {
|
||||
jffs2_dbg(1, "%s()\n", __func__);
|
||||
jffs2_flush_wbuf_gc(c, 0);
|
||||
|
@ -1180,14 +1176,9 @@ void jffs2_dirty_trigger(struct jffs2_sb_info *c)
|
|||
if (sb->s_flags & MS_RDONLY)
|
||||
return;
|
||||
|
||||
spin_lock(&c->wbuf_dwork_lock);
|
||||
if (!c->wbuf_queued) {
|
||||
delay = msecs_to_jiffies(dirty_writeback_interval * 10);
|
||||
if (queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay))
|
||||
jffs2_dbg(1, "%s()\n", __func__);
|
||||
delay = msecs_to_jiffies(dirty_writeback_interval * 10);
|
||||
queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay);
|
||||
c->wbuf_queued = 1;
|
||||
}
|
||||
spin_unlock(&c->wbuf_dwork_lock);
|
||||
}
|
||||
|
||||
int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
|
||||
|
@ -1211,7 +1202,6 @@ int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
|
|||
|
||||
/* Initialise write buffer */
|
||||
init_rwsem(&c->wbuf_sem);
|
||||
spin_lock_init(&c->wbuf_dwork_lock);
|
||||
INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
|
||||
c->wbuf_pagesize = c->mtd->writesize;
|
||||
c->wbuf_ofs = 0xFFFFFFFF;
|
||||
|
@ -1251,7 +1241,6 @@ int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
|
|||
|
||||
/* Initialize write buffer */
|
||||
init_rwsem(&c->wbuf_sem);
|
||||
spin_lock_init(&c->wbuf_dwork_lock);
|
||||
INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
|
||||
c->wbuf_pagesize = c->mtd->erasesize;
|
||||
|
||||
|
@ -1311,7 +1300,6 @@ int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
|
|||
|
||||
/* Initialize write buffer */
|
||||
init_rwsem(&c->wbuf_sem);
|
||||
spin_lock_init(&c->wbuf_dwork_lock);
|
||||
INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
|
||||
|
||||
c->wbuf_pagesize = c->mtd->writesize;
|
||||
|
@ -1346,7 +1334,6 @@ int jffs2_ubivol_setup(struct jffs2_sb_info *c) {
|
|||
return 0;
|
||||
|
||||
init_rwsem(&c->wbuf_sem);
|
||||
spin_lock_init(&c->wbuf_dwork_lock);
|
||||
INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
|
||||
|
||||
c->wbuf_pagesize = c->mtd->writesize;
|
||||
|
|
Loading…
Reference in New Issue