[GFS2] Use mutices rather than semaphores
As well as a number of minor bug fixes, this patch changes GFS to use mutices rather than semaphores. This results in better information in case there are any locking problems. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
This commit is contained in:
parent
5c4e9e0366
commit
f55ab26a8f
|
@ -149,7 +149,7 @@ int gfs2_glock_put(struct gfs2_glock *gl)
|
|||
struct gfs2_gl_hash_bucket *bucket = gl->gl_bucket;
|
||||
int rv = 0;
|
||||
|
||||
down(&sdp->sd_invalidate_inodes_mutex);
|
||||
mutex_lock(&sdp->sd_invalidate_inodes_mutex);
|
||||
|
||||
write_lock(&bucket->hb_lock);
|
||||
if (kref_put(&gl->gl_ref, kill_glock)) {
|
||||
|
@ -161,7 +161,7 @@ int gfs2_glock_put(struct gfs2_glock *gl)
|
|||
}
|
||||
write_unlock(&bucket->hb_lock);
|
||||
out:
|
||||
up(&sdp->sd_invalidate_inodes_mutex);
|
||||
mutex_unlock(&sdp->sd_invalidate_inodes_mutex);
|
||||
return rv;
|
||||
}
|
||||
|
||||
|
@ -2312,9 +2312,9 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
|
|||
invalidate_inodes_mutex prevents glock_put()'s during
|
||||
an invalidate_inodes() */
|
||||
|
||||
down(&sdp->sd_invalidate_inodes_mutex);
|
||||
mutex_lock(&sdp->sd_invalidate_inodes_mutex);
|
||||
invalidate_inodes(sdp->sd_vfs);
|
||||
up(&sdp->sd_invalidate_inodes_mutex);
|
||||
mutex_unlock(&sdp->sd_invalidate_inodes_mutex);
|
||||
yield();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -88,7 +88,7 @@ struct gfs2_rgrpd {
|
|||
uint64_t rd_rg_vn;
|
||||
struct gfs2_bitmap *rd_bits;
|
||||
unsigned int rd_bh_count;
|
||||
struct semaphore rd_mutex;
|
||||
struct mutex rd_mutex;
|
||||
uint32_t rd_free_clone;
|
||||
struct gfs2_log_element rd_le;
|
||||
uint32_t rd_last_alloc_data;
|
||||
|
@ -277,7 +277,7 @@ enum {
|
|||
struct gfs2_file {
|
||||
unsigned long f_flags; /* GFF_... */
|
||||
|
||||
struct semaphore f_fl_mutex;
|
||||
struct mutex f_fl_mutex;
|
||||
struct gfs2_holder f_fl_gh;
|
||||
|
||||
struct gfs2_inode *f_inode;
|
||||
|
@ -510,7 +510,7 @@ struct gfs2_sbd {
|
|||
struct gfs2_holder sd_live_gh;
|
||||
struct gfs2_glock *sd_rename_gl;
|
||||
struct gfs2_glock *sd_trans_gl;
|
||||
struct semaphore sd_invalidate_inodes_mutex;
|
||||
struct mutex sd_invalidate_inodes_mutex;
|
||||
|
||||
/* Inode Stuff */
|
||||
|
||||
|
@ -528,12 +528,12 @@ struct gfs2_sbd {
|
|||
|
||||
/* Inum stuff */
|
||||
|
||||
struct semaphore sd_inum_mutex;
|
||||
struct mutex sd_inum_mutex;
|
||||
|
||||
/* StatFS stuff */
|
||||
|
||||
spinlock_t sd_statfs_spin;
|
||||
struct semaphore sd_statfs_mutex;
|
||||
struct mutex sd_statfs_mutex;
|
||||
struct gfs2_statfs_change sd_statfs_master;
|
||||
struct gfs2_statfs_change sd_statfs_local;
|
||||
unsigned long sd_statfs_sync_time;
|
||||
|
@ -542,7 +542,7 @@ struct gfs2_sbd {
|
|||
|
||||
uint64_t sd_rindex_vn;
|
||||
spinlock_t sd_rindex_spin;
|
||||
struct semaphore sd_rindex_mutex;
|
||||
struct mutex sd_rindex_mutex;
|
||||
struct list_head sd_rindex_list;
|
||||
struct list_head sd_rindex_mru_list;
|
||||
struct list_head sd_rindex_recent_list;
|
||||
|
@ -553,7 +553,7 @@ struct gfs2_sbd {
|
|||
|
||||
struct list_head sd_jindex_list;
|
||||
spinlock_t sd_jindex_spin;
|
||||
struct semaphore sd_jindex_mutex;
|
||||
struct mutex sd_jindex_mutex;
|
||||
unsigned int sd_journals;
|
||||
unsigned long sd_jindex_refresh_time;
|
||||
|
||||
|
@ -581,7 +581,7 @@ struct gfs2_sbd {
|
|||
struct list_head sd_unlinked_list;
|
||||
atomic_t sd_unlinked_count;
|
||||
spinlock_t sd_unlinked_spin;
|
||||
struct semaphore sd_unlinked_mutex;
|
||||
struct mutex sd_unlinked_mutex;
|
||||
|
||||
unsigned int sd_unlinked_slots;
|
||||
unsigned int sd_unlinked_chunks;
|
||||
|
@ -592,7 +592,7 @@ struct gfs2_sbd {
|
|||
struct list_head sd_quota_list;
|
||||
atomic_t sd_quota_count;
|
||||
spinlock_t sd_quota_spin;
|
||||
struct semaphore sd_quota_mutex;
|
||||
struct mutex sd_quota_mutex;
|
||||
|
||||
unsigned int sd_quota_slots;
|
||||
unsigned int sd_quota_chunks;
|
||||
|
@ -637,7 +637,7 @@ struct gfs2_sbd {
|
|||
int sd_log_idle;
|
||||
|
||||
unsigned long sd_log_flush_time;
|
||||
struct semaphore sd_log_flush_lock;
|
||||
struct mutex sd_log_flush_lock;
|
||||
struct list_head sd_log_flush_list;
|
||||
|
||||
unsigned int sd_log_flush_head;
|
||||
|
@ -659,7 +659,7 @@ struct gfs2_sbd {
|
|||
/* For quiescing the filesystem */
|
||||
|
||||
struct gfs2_holder sd_freeze_gh;
|
||||
struct semaphore sd_freeze_lock;
|
||||
struct mutex sd_freeze_lock;
|
||||
unsigned int sd_freeze_count;
|
||||
|
||||
/* Counters */
|
||||
|
|
|
@ -782,11 +782,11 @@ static int pick_formal_ino_1(struct gfs2_sbd *sdp, uint64_t *formal_ino)
|
|||
error = gfs2_trans_begin(sdp, RES_DINODE, 0);
|
||||
if (error)
|
||||
return error;
|
||||
down(&sdp->sd_inum_mutex);
|
||||
mutex_lock(&sdp->sd_inum_mutex);
|
||||
|
||||
error = gfs2_meta_inode_buffer(ip, &bh);
|
||||
if (error) {
|
||||
up(&sdp->sd_inum_mutex);
|
||||
mutex_unlock(&sdp->sd_inum_mutex);
|
||||
gfs2_trans_end(sdp);
|
||||
return error;
|
||||
}
|
||||
|
@ -800,14 +800,14 @@ static int pick_formal_ino_1(struct gfs2_sbd *sdp, uint64_t *formal_ino)
|
|||
gfs2_inum_range_out(&ir,
|
||||
bh->b_data + sizeof(struct gfs2_dinode));
|
||||
brelse(bh);
|
||||
up(&sdp->sd_inum_mutex);
|
||||
mutex_unlock(&sdp->sd_inum_mutex);
|
||||
gfs2_trans_end(sdp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
brelse(bh);
|
||||
|
||||
up(&sdp->sd_inum_mutex);
|
||||
mutex_unlock(&sdp->sd_inum_mutex);
|
||||
gfs2_trans_end(sdp);
|
||||
|
||||
return 1;
|
||||
|
@ -829,7 +829,7 @@ static int pick_formal_ino_2(struct gfs2_sbd *sdp, uint64_t *formal_ino)
|
|||
error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
|
||||
if (error)
|
||||
goto out;
|
||||
down(&sdp->sd_inum_mutex);
|
||||
mutex_lock(&sdp->sd_inum_mutex);
|
||||
|
||||
error = gfs2_meta_inode_buffer(ip, &bh);
|
||||
if (error)
|
||||
|
@ -869,7 +869,7 @@ static int pick_formal_ino_2(struct gfs2_sbd *sdp, uint64_t *formal_ino)
|
|||
brelse(bh);
|
||||
|
||||
out_end_trans:
|
||||
up(&sdp->sd_inum_mutex);
|
||||
mutex_unlock(&sdp->sd_inum_mutex);
|
||||
gfs2_trans_end(sdp);
|
||||
|
||||
out:
|
||||
|
|
|
@ -23,29 +23,16 @@
|
|||
|
||||
#define PULL 1
|
||||
|
||||
static inline int is_done(struct gfs2_sbd *sdp, atomic_t *a)
|
||||
{
|
||||
int done;
|
||||
gfs2_log_lock(sdp);
|
||||
done = atomic_read(a) ? 0 : 1;
|
||||
gfs2_log_unlock(sdp);
|
||||
return done;
|
||||
}
|
||||
|
||||
static void do_lock_wait(struct gfs2_sbd *sdp, wait_queue_head_t *wq,
|
||||
atomic_t *a)
|
||||
{
|
||||
gfs2_log_unlock(sdp);
|
||||
wait_event(*wq, is_done(sdp, a));
|
||||
gfs2_log_lock(sdp);
|
||||
wait_event(*wq, atomic_read(a) ? 0 : 1);
|
||||
}
|
||||
|
||||
static void lock_for_trans(struct gfs2_sbd *sdp)
|
||||
{
|
||||
gfs2_log_lock(sdp);
|
||||
do_lock_wait(sdp, &sdp->sd_log_trans_wq, &sdp->sd_log_flush_count);
|
||||
atomic_inc(&sdp->sd_log_trans_count);
|
||||
gfs2_log_unlock(sdp);
|
||||
}
|
||||
|
||||
static void unlock_from_trans(struct gfs2_sbd *sdp)
|
||||
|
@ -55,15 +42,13 @@ static void unlock_from_trans(struct gfs2_sbd *sdp)
|
|||
wake_up(&sdp->sd_log_flush_wq);
|
||||
}
|
||||
|
||||
void gfs2_lock_for_flush(struct gfs2_sbd *sdp)
|
||||
static void gfs2_lock_for_flush(struct gfs2_sbd *sdp)
|
||||
{
|
||||
gfs2_log_lock(sdp);
|
||||
atomic_inc(&sdp->sd_log_flush_count);
|
||||
do_lock_wait(sdp, &sdp->sd_log_flush_wq, &sdp->sd_log_trans_count);
|
||||
gfs2_log_unlock(sdp);
|
||||
}
|
||||
|
||||
void gfs2_unlock_from_flush(struct gfs2_sbd *sdp)
|
||||
static void gfs2_unlock_from_flush(struct gfs2_sbd *sdp)
|
||||
{
|
||||
gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_flush_count));
|
||||
if (atomic_dec_and_test(&sdp->sd_log_flush_count))
|
||||
|
@ -209,7 +194,6 @@ int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
|
|||
|
||||
for (;;) {
|
||||
gfs2_log_lock(sdp);
|
||||
|
||||
if (list_empty(&list)) {
|
||||
list_add_tail(&list, &sdp->sd_log_blks_list);
|
||||
while (sdp->sd_log_blks_list.next != &list) {
|
||||
|
@ -225,7 +209,6 @@ int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
|
|||
set_current_state(TASK_RUNNING);
|
||||
}
|
||||
}
|
||||
|
||||
/* Never give away the last block so we can
|
||||
always pull the tail if we need to. */
|
||||
if (sdp->sd_log_blks_free > blks) {
|
||||
|
@ -237,14 +220,12 @@ int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
|
|||
}
|
||||
|
||||
gfs2_log_unlock(sdp);
|
||||
|
||||
gfs2_ail1_empty(sdp, 0);
|
||||
gfs2_log_flush(sdp);
|
||||
|
||||
if (try++)
|
||||
gfs2_ail1_start(sdp, 0);
|
||||
}
|
||||
|
||||
lock_for_trans(sdp);
|
||||
|
||||
return 0;
|
||||
|
@ -512,22 +493,26 @@ void gfs2_log_flush_i(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
|
|||
ai = kzalloc(sizeof(struct gfs2_ail), GFP_NOFS | __GFP_NOFAIL);
|
||||
INIT_LIST_HEAD(&ai->ai_ail1_list);
|
||||
INIT_LIST_HEAD(&ai->ai_ail2_list);
|
||||
|
||||
gfs2_lock_for_flush(sdp);
|
||||
down(&sdp->sd_log_flush_lock);
|
||||
|
||||
if (gl) {
|
||||
gfs2_log_lock(sdp);
|
||||
if (list_empty(&gl->gl_le.le_list)) {
|
||||
gfs2_log_unlock(sdp);
|
||||
gfs2_unlock_from_flush(sdp);
|
||||
kfree(ai);
|
||||
return;
|
||||
}
|
||||
gfs2_log_unlock(sdp);
|
||||
}
|
||||
|
||||
mutex_lock(&sdp->sd_log_flush_lock);
|
||||
|
||||
gfs2_assert_withdraw(sdp,
|
||||
sdp->sd_log_num_buf == sdp->sd_log_commited_buf);
|
||||
gfs2_assert_withdraw(sdp,
|
||||
sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke);
|
||||
|
||||
if (gl && list_empty(&gl->gl_le.le_list)) {
|
||||
up(&sdp->sd_log_flush_lock);
|
||||
gfs2_unlock_from_flush(sdp);
|
||||
kfree(ai);
|
||||
return;
|
||||
}
|
||||
|
||||
sdp->sd_log_flush_head = sdp->sd_log_head;
|
||||
sdp->sd_log_flush_wrapped = 0;
|
||||
ai->ai_first = sdp->sd_log_flush_head;
|
||||
|
@ -538,7 +523,6 @@ void gfs2_log_flush_i(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
|
|||
else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle)
|
||||
log_write_header(sdp, 0, PULL);
|
||||
lops_after_commit(sdp, ai);
|
||||
|
||||
sdp->sd_log_head = sdp->sd_log_flush_head;
|
||||
if (sdp->sd_log_flush_wrapped)
|
||||
sdp->sd_log_wraps++;
|
||||
|
@ -554,7 +538,7 @@ void gfs2_log_flush_i(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
|
|||
}
|
||||
gfs2_log_unlock(sdp);
|
||||
|
||||
up(&sdp->sd_log_flush_lock);
|
||||
mutex_unlock(&sdp->sd_log_flush_lock);
|
||||
sdp->sd_vfs->s_dirt = 0;
|
||||
gfs2_unlock_from_flush(sdp);
|
||||
|
||||
|
@ -627,7 +611,7 @@ void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
|
|||
|
||||
void gfs2_log_shutdown(struct gfs2_sbd *sdp)
|
||||
{
|
||||
down(&sdp->sd_log_flush_lock);
|
||||
mutex_lock(&sdp->sd_log_flush_lock);
|
||||
|
||||
gfs2_assert_withdraw(sdp, !atomic_read(&sdp->sd_log_trans_count));
|
||||
gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
|
||||
|
@ -654,6 +638,6 @@ void gfs2_log_shutdown(struct gfs2_sbd *sdp)
|
|||
sdp->sd_log_wraps++;
|
||||
sdp->sd_log_tail = sdp->sd_log_head;
|
||||
|
||||
up(&sdp->sd_log_flush_lock);
|
||||
mutex_unlock(&sdp->sd_log_flush_lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -42,9 +42,6 @@ static inline void gfs2_log_pointers_init(struct gfs2_sbd *sdp,
|
|||
sdp->sd_log_head = sdp->sd_log_tail = value;
|
||||
}
|
||||
|
||||
void gfs2_lock_for_flush(struct gfs2_sbd *sdp);
|
||||
void gfs2_unlock_from_flush(struct gfs2_sbd *sdp);
|
||||
|
||||
unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
|
||||
unsigned int ssize);
|
||||
|
||||
|
|
|
@ -502,7 +502,8 @@ static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
|
|||
unsigned int total_dbuf = sdp->sd_log_num_databuf;
|
||||
unsigned int total_jdata = sdp->sd_log_num_jdata;
|
||||
unsigned int num, n;
|
||||
__be64 *ptr;
|
||||
__be64 *ptr = NULL;
|
||||
unsigned i;
|
||||
|
||||
offset += (2*sizeof(__be64) - 1);
|
||||
offset &= ~(2*sizeof(__be64) - 1);
|
||||
|
@ -513,14 +514,17 @@ static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
|
|||
* Start writing ordered buffers, write journaled buffers
|
||||
* into the log along with a header
|
||||
*/
|
||||
gfs2_log_lock(sdp);
|
||||
/* printk(KERN_INFO "locked in lops databuf_before_commit\n"); */
|
||||
bd2 = bd1 = list_prepare_entry(bd1, &sdp->sd_log_le_databuf, bd_le.le_list);
|
||||
while(total_dbuf) {
|
||||
num = total_jdata;
|
||||
if (num > limit)
|
||||
num = limit;
|
||||
/* printk(KERN_INFO "total_dbuf=%u num=%u\n", total_dbuf, num); */
|
||||
n = 0;
|
||||
i = 0;
|
||||
list_for_each_entry_safe_continue(bd1, bdt, &sdp->sd_log_le_databuf, bd_le.le_list) {
|
||||
gfs2_log_lock(sdp);
|
||||
/* An ordered write buffer */
|
||||
if (bd1->bd_bh && !buffer_pinned(bd1->bd_bh)) {
|
||||
list_move(&bd1->bd_le.le_list, &started);
|
||||
|
@ -531,20 +535,28 @@ static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
|
|||
total_dbuf--;
|
||||
if (bd1->bd_bh) {
|
||||
get_bh(bd1->bd_bh);
|
||||
gfs2_log_unlock(sdp);
|
||||
if (buffer_dirty(bd1->bd_bh)) {
|
||||
gfs2_log_unlock(sdp);
|
||||
wait_on_buffer(bd1->bd_bh);
|
||||
ll_rw_block(WRITE, 1, &bd1->bd_bh);
|
||||
gfs2_log_lock(sdp);
|
||||
}
|
||||
brelse(bd1->bd_bh);
|
||||
/* printk(KERN_INFO "db write %p\n", bd1); */
|
||||
if (++i > 100000) {
|
||||
printk(KERN_INFO "looping bd1=%p bdt=%p eol=%p started=%p\n", bd1, bdt, &sdp->sd_log_le_databuf, &started);
|
||||
dump_stack();
|
||||
BUG();
|
||||
}
|
||||
continue;
|
||||
}
|
||||
gfs2_log_unlock(sdp);
|
||||
/* printk(KERN_INFO "db skip\n"); */
|
||||
continue;
|
||||
} else if (bd1->bd_bh) { /* A journaled buffer */
|
||||
int magic;
|
||||
gfs2_log_unlock(sdp);
|
||||
/* printk(KERN_INFO "journaled buffer\n"); */
|
||||
printk(KERN_INFO "journaled buffer %p\n", bd1->bd_bh);
|
||||
printk(KERN_INFO "%lu %u %p %p\n", bd1->bd_bh->b_blocknr, bd1->bd_bh->b_size, bd1->bd_bh->b_data, bd1->bd_bh->b_page);
|
||||
if (!bh) {
|
||||
bh = gfs2_log_get_buf(sdp);
|
||||
ld = (struct gfs2_log_descriptor *)bh->b_data;
|
||||
|
@ -558,16 +570,21 @@ static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
|
|||
ld->ld_data2 = cpu_to_be32(0);
|
||||
memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
|
||||
}
|
||||
/* printk(KERN_INFO "check_magic\n"); */
|
||||
magic = gfs2_check_magic(bd1->bd_bh);
|
||||
/* printk(KERN_INFO "write data\n"); */
|
||||
*ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
|
||||
*ptr++ = cpu_to_be64((__u64)magic);
|
||||
/* printk(KERN_INFO "mark escaped or not\n"); */
|
||||
clear_buffer_escaped(bd1->bd_bh);
|
||||
if (unlikely(magic != 0))
|
||||
set_buffer_escaped(bd1->bd_bh);
|
||||
gfs2_log_lock(sdp);
|
||||
if (n++ > num)
|
||||
break;
|
||||
}
|
||||
}
|
||||
gfs2_log_unlock(sdp);
|
||||
if (bh) {
|
||||
set_buffer_dirty(bh);
|
||||
ll_rw_block(WRITE, 1, &bh);
|
||||
|
@ -575,10 +592,12 @@ static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
|
|||
}
|
||||
n = 0;
|
||||
/* printk(KERN_INFO "totals2: jdata=%u dbuf=%u\n", total_jdata, total_dbuf); */
|
||||
gfs2_log_lock(sdp);
|
||||
list_for_each_entry_continue(bd2, &sdp->sd_log_le_databuf, bd_le.le_list) {
|
||||
if (!bd2->bd_bh)
|
||||
continue;
|
||||
/* copy buffer if it needs escaping */
|
||||
gfs2_log_unlock(sdp);
|
||||
if (unlikely(buffer_escaped(bd2->bd_bh))) {
|
||||
void *kaddr;
|
||||
struct page *page = bd2->bd_bh->b_page;
|
||||
|
@ -592,6 +611,7 @@ static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
|
|||
}
|
||||
set_buffer_dirty(bh);
|
||||
ll_rw_block(WRITE, 1, &bh);
|
||||
gfs2_log_lock(sdp);
|
||||
if (++n >= num)
|
||||
break;
|
||||
}
|
||||
|
@ -599,6 +619,8 @@ static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
|
|||
total_dbuf -= num;
|
||||
total_jdata -= num;
|
||||
}
|
||||
gfs2_log_unlock(sdp);
|
||||
|
||||
/* printk(KERN_INFO "wait on ordered data buffers\n"); */
|
||||
/* Wait on all ordered buffers */
|
||||
while (!list_empty(&started)) {
|
||||
|
@ -701,12 +723,10 @@ static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
|
|||
|
||||
while (!list_empty(head)) {
|
||||
bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
|
||||
list_del_init(&bd->bd_le.le_list);
|
||||
list_del(&bd->bd_le.le_list);
|
||||
sdp->sd_log_num_databuf--;
|
||||
sdp->sd_log_num_jdata--;
|
||||
gfs2_unpin(sdp, bd->bd_bh, ai);
|
||||
brelse(bd->bd_bh);
|
||||
kfree(bd);
|
||||
}
|
||||
gfs2_assert_warn(sdp, !sdp->sd_log_num_databuf);
|
||||
gfs2_assert_warn(sdp, !sdp->sd_log_num_jdata);
|
||||
|
|
|
@ -556,7 +556,7 @@ void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh, int meta
|
|||
return;
|
||||
}
|
||||
|
||||
bd = kmem_cache_alloc(gfs2_bufdata_cachep, GFP_KERNEL | __GFP_NOFAIL),
|
||||
bd = kmem_cache_alloc(gfs2_bufdata_cachep, GFP_NOFS | __GFP_NOFAIL),
|
||||
atomic_inc(&gl->gl_sbd->sd_bufdata_count);
|
||||
|
||||
memset(bd, 0, sizeof(struct gfs2_bufdata));
|
||||
|
|
|
@ -189,7 +189,6 @@ static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
|
|||
gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
|
||||
done_trans = 1;
|
||||
}
|
||||
|
||||
error = block_write_full_page(page, get_block_noalloc, wbc);
|
||||
if (done_trans)
|
||||
gfs2_trans_end(sdp);
|
||||
|
|
|
@ -688,7 +688,7 @@ static int gfs2_open(struct inode *inode, struct file *file)
|
|||
if (!fp)
|
||||
return -ENOMEM;
|
||||
|
||||
init_MUTEX(&fp->f_fl_mutex);
|
||||
mutex_init(&fp->f_fl_mutex);
|
||||
|
||||
fp->f_inode = ip;
|
||||
fp->f_vfile = file;
|
||||
|
@ -858,7 +858,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
|
|||
state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
|
||||
flags = ((IS_SETLKW(cmd)) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE;
|
||||
|
||||
down(&fp->f_fl_mutex);
|
||||
mutex_lock(&fp->f_fl_mutex);
|
||||
|
||||
gl = fl_gh->gh_gl;
|
||||
if (gl) {
|
||||
|
@ -890,7 +890,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
|
|||
}
|
||||
|
||||
out:
|
||||
up(&fp->f_fl_mutex);
|
||||
mutex_unlock(&fp->f_fl_mutex);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
@ -900,11 +900,11 @@ static void do_unflock(struct file *file, struct file_lock *fl)
|
|||
struct gfs2_file *fp = get_v2fp(file);
|
||||
struct gfs2_holder *fl_gh = &fp->f_fl_gh;
|
||||
|
||||
down(&fp->f_fl_mutex);
|
||||
mutex_lock(&fp->f_fl_mutex);
|
||||
flock_lock_file_wait(file, fl);
|
||||
if (fl_gh->gh_gl)
|
||||
gfs2_glock_dq_uninit(fl_gh);
|
||||
up(&fp->f_fl_mutex);
|
||||
mutex_unlock(&fp->f_fl_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -59,29 +59,29 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
|
|||
INIT_LIST_HEAD(&sdp->sd_reclaim_list);
|
||||
spin_lock_init(&sdp->sd_reclaim_lock);
|
||||
init_waitqueue_head(&sdp->sd_reclaim_wq);
|
||||
init_MUTEX(&sdp->sd_invalidate_inodes_mutex);
|
||||
mutex_init(&sdp->sd_invalidate_inodes_mutex);
|
||||
|
||||
init_MUTEX(&sdp->sd_inum_mutex);
|
||||
mutex_init(&sdp->sd_inum_mutex);
|
||||
spin_lock_init(&sdp->sd_statfs_spin);
|
||||
init_MUTEX(&sdp->sd_statfs_mutex);
|
||||
mutex_init(&sdp->sd_statfs_mutex);
|
||||
|
||||
spin_lock_init(&sdp->sd_rindex_spin);
|
||||
init_MUTEX(&sdp->sd_rindex_mutex);
|
||||
mutex_init(&sdp->sd_rindex_mutex);
|
||||
INIT_LIST_HEAD(&sdp->sd_rindex_list);
|
||||
INIT_LIST_HEAD(&sdp->sd_rindex_mru_list);
|
||||
INIT_LIST_HEAD(&sdp->sd_rindex_recent_list);
|
||||
|
||||
INIT_LIST_HEAD(&sdp->sd_jindex_list);
|
||||
spin_lock_init(&sdp->sd_jindex_spin);
|
||||
init_MUTEX(&sdp->sd_jindex_mutex);
|
||||
mutex_init(&sdp->sd_jindex_mutex);
|
||||
|
||||
INIT_LIST_HEAD(&sdp->sd_unlinked_list);
|
||||
spin_lock_init(&sdp->sd_unlinked_spin);
|
||||
init_MUTEX(&sdp->sd_unlinked_mutex);
|
||||
mutex_init(&sdp->sd_unlinked_mutex);
|
||||
|
||||
INIT_LIST_HEAD(&sdp->sd_quota_list);
|
||||
spin_lock_init(&sdp->sd_quota_spin);
|
||||
init_MUTEX(&sdp->sd_quota_mutex);
|
||||
mutex_init(&sdp->sd_quota_mutex);
|
||||
|
||||
spin_lock_init(&sdp->sd_log_lock);
|
||||
init_waitqueue_head(&sdp->sd_log_trans_wq);
|
||||
|
@ -99,12 +99,12 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
|
|||
INIT_LIST_HEAD(&sdp->sd_ail1_list);
|
||||
INIT_LIST_HEAD(&sdp->sd_ail2_list);
|
||||
|
||||
init_MUTEX(&sdp->sd_log_flush_lock);
|
||||
mutex_init(&sdp->sd_log_flush_lock);
|
||||
INIT_LIST_HEAD(&sdp->sd_log_flush_list);
|
||||
|
||||
INIT_LIST_HEAD(&sdp->sd_revoke_list);
|
||||
|
||||
init_MUTEX(&sdp->sd_freeze_lock);
|
||||
mutex_init(&sdp->sd_freeze_lock);
|
||||
|
||||
return sdp;
|
||||
}
|
||||
|
|
|
@ -74,10 +74,10 @@ static void gfs2_put_super(struct super_block *sb)
|
|||
|
||||
/* Unfreeze the filesystem, if we need to */
|
||||
|
||||
down(&sdp->sd_freeze_lock);
|
||||
mutex_lock(&sdp->sd_freeze_lock);
|
||||
if (sdp->sd_freeze_count)
|
||||
gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
|
||||
up(&sdp->sd_freeze_lock);
|
||||
mutex_unlock(&sdp->sd_freeze_lock);
|
||||
|
||||
kthread_stop(sdp->sd_inoded_process);
|
||||
kthread_stop(sdp->sd_quotad_process);
|
||||
|
|
|
@ -251,10 +251,10 @@ static int bh_get(struct gfs2_quota_data *qd)
|
|||
struct buffer_head *bh;
|
||||
int error;
|
||||
|
||||
down(&sdp->sd_quota_mutex);
|
||||
mutex_lock(&sdp->sd_quota_mutex);
|
||||
|
||||
if (qd->qd_bh_count++) {
|
||||
up(&sdp->sd_quota_mutex);
|
||||
mutex_unlock(&sdp->sd_quota_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -276,7 +276,7 @@ static int bh_get(struct gfs2_quota_data *qd)
|
|||
(bh->b_data + sizeof(struct gfs2_meta_header) +
|
||||
offset * sizeof(struct gfs2_quota_change));
|
||||
|
||||
up(&sdp->sd_quota_mutex);
|
||||
mutex_lock(&sdp->sd_quota_mutex);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -285,7 +285,7 @@ static int bh_get(struct gfs2_quota_data *qd)
|
|||
|
||||
fail:
|
||||
qd->qd_bh_count--;
|
||||
up(&sdp->sd_quota_mutex);
|
||||
mutex_unlock(&sdp->sd_quota_mutex);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -293,14 +293,14 @@ static void bh_put(struct gfs2_quota_data *qd)
|
|||
{
|
||||
struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
|
||||
|
||||
down(&sdp->sd_quota_mutex);
|
||||
mutex_lock(&sdp->sd_quota_mutex);
|
||||
gfs2_assert(sdp, qd->qd_bh_count);
|
||||
if (!--qd->qd_bh_count) {
|
||||
brelse(qd->qd_bh);
|
||||
qd->qd_bh = NULL;
|
||||
qd->qd_bh_qc = NULL;
|
||||
}
|
||||
up(&sdp->sd_quota_mutex);
|
||||
mutex_unlock(&sdp->sd_quota_mutex);
|
||||
}
|
||||
|
||||
static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
|
||||
|
@ -529,7 +529,7 @@ static void do_qc(struct gfs2_quota_data *qd, int64_t change)
|
|||
struct gfs2_quota_change *qc = qd->qd_bh_qc;
|
||||
int64_t x;
|
||||
|
||||
down(&sdp->sd_quota_mutex);
|
||||
mutex_lock(&sdp->sd_quota_mutex);
|
||||
gfs2_trans_add_bh(ip->i_gl, qd->qd_bh, 1);
|
||||
|
||||
if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
|
||||
|
@ -560,7 +560,7 @@ static void do_qc(struct gfs2_quota_data *qd, int64_t change)
|
|||
slot_hold(qd);
|
||||
}
|
||||
|
||||
up(&sdp->sd_quota_mutex);
|
||||
mutex_unlock(&sdp->sd_quota_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -182,9 +182,9 @@ static void clear_rgrpdi(struct gfs2_sbd *sdp)
|
|||
|
||||
void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
|
||||
{
|
||||
down(&sdp->sd_rindex_mutex);
|
||||
mutex_lock(&sdp->sd_rindex_mutex);
|
||||
clear_rgrpdi(sdp);
|
||||
up(&sdp->sd_rindex_mutex);
|
||||
mutex_unlock(&sdp->sd_rindex_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -301,7 +301,7 @@ static int gfs2_ri_update(struct gfs2_inode *ip)
|
|||
if (!rgd)
|
||||
goto fail;
|
||||
|
||||
init_MUTEX(&rgd->rd_mutex);
|
||||
mutex_init(&rgd->rd_mutex);
|
||||
lops_init_le(&rgd->rd_le, &gfs2_rg_lops);
|
||||
rgd->rd_sbd = sdp;
|
||||
|
||||
|
@ -363,13 +363,13 @@ int gfs2_rindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ri_gh)
|
|||
|
||||
/* Read new copy from disk if we don't have the latest */
|
||||
if (sdp->sd_rindex_vn != gl->gl_vn) {
|
||||
down(&sdp->sd_rindex_mutex);
|
||||
mutex_lock(&sdp->sd_rindex_mutex);
|
||||
if (sdp->sd_rindex_vn != gl->gl_vn) {
|
||||
error = gfs2_ri_update(ip);
|
||||
if (error)
|
||||
gfs2_glock_dq_uninit(ri_gh);
|
||||
}
|
||||
up(&sdp->sd_rindex_mutex);
|
||||
mutex_unlock(&sdp->sd_rindex_mutex);
|
||||
}
|
||||
|
||||
return error;
|
||||
|
@ -394,13 +394,13 @@ int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
|
|||
unsigned int x, y;
|
||||
int error;
|
||||
|
||||
down(&rgd->rd_mutex);
|
||||
mutex_lock(&rgd->rd_mutex);
|
||||
|
||||
spin_lock(&sdp->sd_rindex_spin);
|
||||
if (rgd->rd_bh_count) {
|
||||
rgd->rd_bh_count++;
|
||||
spin_unlock(&sdp->sd_rindex_spin);
|
||||
up(&rgd->rd_mutex);
|
||||
mutex_unlock(&rgd->rd_mutex);
|
||||
return 0;
|
||||
}
|
||||
spin_unlock(&sdp->sd_rindex_spin);
|
||||
|
@ -436,7 +436,7 @@ int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
|
|||
rgd->rd_bh_count++;
|
||||
spin_unlock(&sdp->sd_rindex_spin);
|
||||
|
||||
up(&rgd->rd_mutex);
|
||||
mutex_unlock(&rgd->rd_mutex);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -447,7 +447,7 @@ int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
|
|||
bi->bi_bh = NULL;
|
||||
gfs2_assert_warn(sdp, !bi->bi_clone);
|
||||
}
|
||||
up(&rgd->rd_mutex);
|
||||
mutex_unlock(&rgd->rd_mutex);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
|
|
@ -279,7 +279,7 @@ int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh)
|
|||
|
||||
name.name = buf;
|
||||
|
||||
down(&sdp->sd_jindex_mutex);
|
||||
mutex_lock(&sdp->sd_jindex_mutex);
|
||||
|
||||
for (;;) {
|
||||
error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED,
|
||||
|
@ -317,7 +317,7 @@ int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh)
|
|||
spin_unlock(&sdp->sd_jindex_spin);
|
||||
}
|
||||
|
||||
up(&sdp->sd_jindex_mutex);
|
||||
mutex_unlock(&sdp->sd_jindex_mutex);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
@ -608,9 +608,9 @@ void gfs2_statfs_change(struct gfs2_sbd *sdp, int64_t total, int64_t free,
|
|||
if (error)
|
||||
return;
|
||||
|
||||
down(&sdp->sd_statfs_mutex);
|
||||
mutex_lock(&sdp->sd_statfs_mutex);
|
||||
gfs2_trans_add_bh(l_ip->i_gl, l_bh, 1);
|
||||
up(&sdp->sd_statfs_mutex);
|
||||
mutex_unlock(&sdp->sd_statfs_mutex);
|
||||
|
||||
spin_lock(&sdp->sd_statfs_spin);
|
||||
l_sc->sc_total += total;
|
||||
|
@ -659,9 +659,9 @@ int gfs2_statfs_sync(struct gfs2_sbd *sdp)
|
|||
if (error)
|
||||
goto out_bh2;
|
||||
|
||||
down(&sdp->sd_statfs_mutex);
|
||||
mutex_lock(&sdp->sd_statfs_mutex);
|
||||
gfs2_trans_add_bh(l_ip->i_gl, l_bh, 1);
|
||||
up(&sdp->sd_statfs_mutex);
|
||||
mutex_unlock(&sdp->sd_statfs_mutex);
|
||||
|
||||
spin_lock(&sdp->sd_statfs_spin);
|
||||
m_sc->sc_total += l_sc->sc_total;
|
||||
|
@ -910,7 +910,7 @@ int gfs2_freeze_fs(struct gfs2_sbd *sdp)
|
|||
{
|
||||
int error = 0;
|
||||
|
||||
down(&sdp->sd_freeze_lock);
|
||||
mutex_lock(&sdp->sd_freeze_lock);
|
||||
|
||||
if (!sdp->sd_freeze_count++) {
|
||||
error = gfs2_lock_fs_check_clean(sdp, &sdp->sd_freeze_gh);
|
||||
|
@ -918,7 +918,7 @@ int gfs2_freeze_fs(struct gfs2_sbd *sdp)
|
|||
sdp->sd_freeze_count--;
|
||||
}
|
||||
|
||||
up(&sdp->sd_freeze_lock);
|
||||
mutex_unlock(&sdp->sd_freeze_lock);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
@ -935,11 +935,11 @@ int gfs2_freeze_fs(struct gfs2_sbd *sdp)
|
|||
|
||||
void gfs2_unfreeze_fs(struct gfs2_sbd *sdp)
|
||||
{
|
||||
down(&sdp->sd_freeze_lock);
|
||||
mutex_lock(&sdp->sd_freeze_lock);
|
||||
|
||||
if (sdp->sd_freeze_count && !--sdp->sd_freeze_count)
|
||||
gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
|
||||
|
||||
up(&sdp->sd_freeze_lock);
|
||||
mutex_unlock(&sdp->sd_freeze_lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -41,9 +41,9 @@ static ssize_t freeze_show(struct gfs2_sbd *sdp, char *buf)
|
|||
{
|
||||
unsigned int count;
|
||||
|
||||
down(&sdp->sd_freeze_lock);
|
||||
mutex_lock(&sdp->sd_freeze_lock);
|
||||
count = sdp->sd_freeze_count;
|
||||
up(&sdp->sd_freeze_lock);
|
||||
mutex_unlock(&sdp->sd_freeze_lock);
|
||||
|
||||
return sprintf(buf, "%u\n", count);
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ int gfs2_trans_begin_i(struct gfs2_sbd *sdp, unsigned int blocks,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
tr = kzalloc(sizeof(struct gfs2_trans), GFP_KERNEL);
|
||||
tr = kzalloc(sizeof(struct gfs2_trans), GFP_NOFS);
|
||||
if (!tr)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -51,7 +51,7 @@ int gfs2_trans_begin_i(struct gfs2_sbd *sdp, unsigned int blocks,
|
|||
|
||||
error = -ENOMEM;
|
||||
tr->tr_t_gh = gfs2_holder_get(sdp->sd_trans_gl, LM_ST_SHARED,
|
||||
GL_NEVER_RECURSE, GFP_KERNEL);
|
||||
GL_NEVER_RECURSE, GFP_NOFS);
|
||||
if (!tr->tr_t_gh)
|
||||
goto fail;
|
||||
|
||||
|
|
|
@ -46,12 +46,12 @@ static int munge_ondisk(struct gfs2_sbd *sdp, unsigned int slot,
|
|||
goto out;
|
||||
}
|
||||
|
||||
down(&sdp->sd_unlinked_mutex);
|
||||
mutex_lock(&sdp->sd_unlinked_mutex);
|
||||
gfs2_trans_add_bh(ip->i_gl, bh, 1);
|
||||
gfs2_unlinked_tag_out(ut, bh->b_data +
|
||||
sizeof(struct gfs2_meta_header) +
|
||||
offset * sizeof(struct gfs2_unlinked_tag));
|
||||
up(&sdp->sd_unlinked_mutex);
|
||||
mutex_unlock(&sdp->sd_unlinked_mutex);
|
||||
|
||||
out:
|
||||
brelse(bh);
|
||||
|
|
Loading…
Reference in New Issue