Changes in gfs2:
* Log space and revoke accounting rework to fix some failed asserts. * Local resource group glock sharing for better local performance. * Add support for version 1802 filesystems: trusted xattr support and '-o rgrplvb' mounts by default. * Actually synchronize on the inode glock's FREEING bit during withdraw ("gfs2: fix glock confusion in function signal_our_withdraw"). * Fix parallel recovery of multiple journals ("gfs2: keep bios separate for each journal"). * Various other bug fixes. -----BEGIN PGP SIGNATURE----- iQJIBAABCAAyFiEEJZs3krPW0xkhLMTc1b+f6wMTZToFAmA1TmwUHGFncnVlbmJh QHJlZGhhdC5jb20ACgkQ1b+f6wMTZTpDZhAArnFj5AhWMI2+DD5o05EILdgDSpwh JWYT1pfRqR1OZrs7ZZ7tGZB4H6oytYfJ+4mg9Kk7CE7oJKcBh695IPZoIWv8+BCC WIgQGJytCFp4tuDNw11HZ0ahgW4zXPyJTt6jidZ5jVkux31JrUS7fVqSsD2vIPqA iQMcJIH+NLTlYbNt4d5T/ngaoRcx7m18RWkcxf6Y+/DBnnwIe4ZDpZmkWVykuncv OFSvXK8vKyLWGnvH/MIsywfYeU5rj/0AIu66JhVILQ4v5kGYIigwY3quXP2SoITM Z0+N5Gj/N4OWSscRS86zyqhnRucrjDkNP2+oGSzJWgtSXE/KplyfInAmQWzhIPRM n7T0boTp+gOTzGq7ELCzj44KICLG76WgDwaR2bLHuQ2/ppVrHNltZqncP2iwynN6 glfST/eHBUBu1qTYLaOAfkUBlhpKDXu0YPcXX7lH6M0JqyvkRUFfuBAU9dic9D9K zsxplHGJrZnE9QFWWbS3aOviPlSHaXfkZF0Xv7QCLyuPRhu+e/qfcAoeVhxSd4+e I0grs/TxM61jyju9SmqnM7P+8qYS55naYH1V+6iNCU5dax8MvdxNZuneBQIa07U+ Y84JPQvTBZDUE0gZ8fUzZtnYS7RqyiG7BL+T4W5Ph7LgxXbgQD7CWerYpg7fBm/j HEpjKqrS96zfTyk= =45VG -----END PGP SIGNATURE----- Merge tag 'gfs2-for-5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2 Pull gfs2 updates from Andreas Gruenbacher: - Log space and revoke accounting rework to fix some failed asserts. - Local resource group glock sharing for better local performance. - Add support for version 1802 filesystems: trusted xattr support and '-o rgrplvb' mounts by default. - Actually synchronize on the inode glock's FREEING bit during withdraw ("gfs2: fix glock confusion in function signal_our_withdraw"). - Fix parallel recovery of multiple journals ("gfs2: keep bios separate for each journal"). - Various other bug fixes. * tag 'gfs2-for-5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2: (49 commits) gfs2: Don't get stuck with I/O plugged in gfs2_ail1_flush gfs2: Per-revoke accounting in transactions gfs2: Rework the log space allocation logic gfs2: Minor calc_reserved cleanup gfs2: Use resource group glock sharing gfs2: Allow node-wide exclusive glock sharing gfs2: Add local resource group locking gfs2: Add per-reservation reserved block accounting gfs2: Rename rs_{free -> requested} and rd_{reserved -> requested} gfs2: Check for active reservation in gfs2_release gfs2: Don't search for unreserved space twice gfs2: Only pass reservation down to gfs2_rbm_find gfs2: Also reflect single-block allocations in rgd->rd_extfail_pt gfs2: Recursive gfs2_quota_hold in gfs2_iomap_end gfs2: Add trusted xattr support gfs2: Enable rgrplvb for sb_fs_format 1802 gfs2: Don't skip dlm unlock if glock has an lvb gfs2: Lock imbalance on error path in gfs2_recover_one gfs2: Move function gfs2_ail_empty_tr gfs2: Get rid of current_tail() ...
This commit is contained in:
commit
f6e1e1d1e1
|
@ -1230,6 +1230,9 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
|
|||
|
||||
gfs2_inplace_release(ip);
|
||||
|
||||
if (ip->i_qadata && ip->i_qadata->qa_qd_num)
|
||||
gfs2_quota_unlock(ip);
|
||||
|
||||
if (length != written && (iomap->flags & IOMAP_F_NEW)) {
|
||||
/* Deallocate blocks that were just allocated. */
|
||||
loff_t blockmask = i_blocksize(inode) - 1;
|
||||
|
@ -1242,9 +1245,6 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
|
|||
}
|
||||
}
|
||||
|
||||
if (ip->i_qadata && ip->i_qadata->qa_qd_num)
|
||||
gfs2_quota_unlock(ip);
|
||||
|
||||
if (unlikely(!written))
|
||||
goto out_unlock;
|
||||
|
||||
|
@ -1538,13 +1538,13 @@ more_rgrps:
|
|||
goto out;
|
||||
}
|
||||
ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
|
||||
0, rd_gh);
|
||||
LM_FLAG_NODE_SCOPE, rd_gh);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* Must be done with the rgrp glock held: */
|
||||
if (gfs2_rs_active(&ip->i_res) &&
|
||||
rgd == ip->i_res.rs_rbm.rgd)
|
||||
rgd == ip->i_res.rs_rgd)
|
||||
gfs2_rs_deltree(&ip->i_res);
|
||||
}
|
||||
|
||||
|
|
|
@ -716,10 +716,10 @@ static int gfs2_release(struct inode *inode, struct file *file)
|
|||
kfree(file->private_data);
|
||||
file->private_data = NULL;
|
||||
|
||||
if (file->f_mode & FMODE_WRITE) {
|
||||
if (gfs2_rs_active(&ip->i_res))
|
||||
gfs2_rs_delete(ip, &inode->i_writecount);
|
||||
if (file->f_mode & FMODE_WRITE)
|
||||
gfs2_qa_put(ip);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1112,8 +1112,8 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
|
|||
goto out_qunlock;
|
||||
|
||||
/* check if the selected rgrp limits our max_blks further */
|
||||
if (ap.allowed && ap.allowed < max_blks)
|
||||
max_blks = ap.allowed;
|
||||
if (ip->i_res.rs_reserved < max_blks)
|
||||
max_blks = ip->i_res.rs_reserved;
|
||||
|
||||
/* Almost done. Calculate bytes that can be written using
|
||||
* max_blks. We also recompute max_bytes, data_blocks and
|
||||
|
|
|
@ -313,9 +313,23 @@ void gfs2_glock_put(struct gfs2_glock *gl)
|
|||
static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
|
||||
{
|
||||
const struct gfs2_holder *gh_head = list_first_entry(&gl->gl_holders, const struct gfs2_holder, gh_list);
|
||||
if ((gh->gh_state == LM_ST_EXCLUSIVE ||
|
||||
gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
|
||||
return 0;
|
||||
|
||||
if (gh != gh_head) {
|
||||
/**
|
||||
* Here we make a special exception to grant holders who agree
|
||||
* to share the EX lock with other holders who also have the
|
||||
* bit set. If the original holder has the LM_FLAG_NODE_SCOPE bit
|
||||
* is set, we grant more holders with the bit set.
|
||||
*/
|
||||
if (gh_head->gh_state == LM_ST_EXCLUSIVE &&
|
||||
(gh_head->gh_flags & LM_FLAG_NODE_SCOPE) &&
|
||||
gh->gh_state == LM_ST_EXCLUSIVE &&
|
||||
(gh->gh_flags & LM_FLAG_NODE_SCOPE))
|
||||
return 1;
|
||||
if ((gh->gh_state == LM_ST_EXCLUSIVE ||
|
||||
gh_head->gh_state == LM_ST_EXCLUSIVE))
|
||||
return 0;
|
||||
}
|
||||
if (gl->gl_state == gh->gh_state)
|
||||
return 1;
|
||||
if (gh->gh_flags & GL_EXACT)
|
||||
|
@ -2030,6 +2044,8 @@ static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
|
|||
*p++ = 'A';
|
||||
if (flags & LM_FLAG_PRIORITY)
|
||||
*p++ = 'p';
|
||||
if (flags & LM_FLAG_NODE_SCOPE)
|
||||
*p++ = 'n';
|
||||
if (flags & GL_ASYNC)
|
||||
*p++ = 'a';
|
||||
if (flags & GL_EXACT)
|
||||
|
|
|
@ -75,6 +75,11 @@ enum {
|
|||
* request and directly join the other shared lock. A shared lock request
|
||||
* without the priority flag might be forced to wait until the deferred
|
||||
* requested had acquired and released the lock.
|
||||
*
|
||||
* LM_FLAG_NODE_SCOPE
|
||||
* This holder agrees to share the lock within this node. In other words,
|
||||
* the glock is held in EX mode according to DLM, but local holders on the
|
||||
* same node can share it.
|
||||
*/
|
||||
|
||||
#define LM_FLAG_TRY 0x0001
|
||||
|
@ -82,6 +87,7 @@ enum {
|
|||
#define LM_FLAG_NOEXP 0x0004
|
||||
#define LM_FLAG_ANY 0x0008
|
||||
#define LM_FLAG_PRIORITY 0x0010
|
||||
#define LM_FLAG_NODE_SCOPE 0x0020
|
||||
#define GL_ASYNC 0x0040
|
||||
#define GL_EXACT 0x0080
|
||||
#define GL_SKIP 0x0100
|
||||
|
|
|
@ -86,16 +86,12 @@ static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
|
|||
{
|
||||
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
|
||||
struct gfs2_trans tr;
|
||||
unsigned int revokes;
|
||||
int ret;
|
||||
|
||||
memset(&tr, 0, sizeof(tr));
|
||||
INIT_LIST_HEAD(&tr.tr_buf);
|
||||
INIT_LIST_HEAD(&tr.tr_databuf);
|
||||
INIT_LIST_HEAD(&tr.tr_ail1_list);
|
||||
INIT_LIST_HEAD(&tr.tr_ail2_list);
|
||||
tr.tr_revokes = atomic_read(&gl->gl_ail_count);
|
||||
revokes = atomic_read(&gl->gl_ail_count);
|
||||
|
||||
if (!tr.tr_revokes) {
|
||||
if (!revokes) {
|
||||
bool have_revokes;
|
||||
bool log_in_flight;
|
||||
|
||||
|
@ -122,20 +118,14 @@ static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* A shortened, inline version of gfs2_trans_begin()
|
||||
* tr->alloced is not set since the transaction structure is
|
||||
* on the stack */
|
||||
tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes);
|
||||
tr.tr_ip = _RET_IP_;
|
||||
ret = gfs2_log_reserve(sdp, tr.tr_reserved);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
WARN_ON_ONCE(current->journal_info);
|
||||
current->journal_info = &tr;
|
||||
|
||||
__gfs2_ail_flush(gl, 0, tr.tr_revokes);
|
||||
|
||||
memset(&tr, 0, sizeof(tr));
|
||||
set_bit(TR_ONSTACK, &tr.tr_flags);
|
||||
ret = __gfs2_trans_begin(&tr, sdp, 0, revokes, _RET_IP_);
|
||||
if (ret)
|
||||
goto flush;
|
||||
__gfs2_ail_flush(gl, 0, revokes);
|
||||
gfs2_trans_end(sdp);
|
||||
|
||||
flush:
|
||||
gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
|
||||
GFS2_LFC_AIL_EMPTY_GL);
|
||||
|
@ -146,19 +136,15 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
|
|||
{
|
||||
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
|
||||
unsigned int revokes = atomic_read(&gl->gl_ail_count);
|
||||
unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
|
||||
int ret;
|
||||
|
||||
if (!revokes)
|
||||
return;
|
||||
|
||||
while (revokes > max_revokes)
|
||||
max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
|
||||
|
||||
ret = gfs2_trans_begin(sdp, 0, max_revokes);
|
||||
ret = gfs2_trans_begin(sdp, 0, revokes);
|
||||
if (ret)
|
||||
return;
|
||||
__gfs2_ail_flush(gl, fsync, max_revokes);
|
||||
__gfs2_ail_flush(gl, fsync, revokes);
|
||||
gfs2_trans_end(sdp);
|
||||
gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
|
||||
GFS2_LFC_AIL_FLUSH);
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <linux/percpu.h>
|
||||
#include <linux/lockref.h>
|
||||
#include <linux/rhashtable.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
#define DIO_WAIT 0x00000010
|
||||
#define DIO_METADATA 0x00000020
|
||||
|
@ -106,7 +107,8 @@ struct gfs2_rgrpd {
|
|||
u32 rd_data; /* num of data blocks in rgrp */
|
||||
u32 rd_bitbytes; /* number of bytes in data bitmaps */
|
||||
u32 rd_free;
|
||||
u32 rd_reserved; /* number of blocks reserved */
|
||||
u32 rd_requested; /* number of blocks in rd_rstree */
|
||||
u32 rd_reserved; /* number of reserved blocks */
|
||||
u32 rd_free_clone;
|
||||
u32 rd_dinodes;
|
||||
u64 rd_igeneration;
|
||||
|
@ -122,34 +124,10 @@ struct gfs2_rgrpd {
|
|||
#define GFS2_RDF_PREFERRED 0x80000000 /* This rgrp is preferred */
|
||||
#define GFS2_RDF_MASK 0xf0000000 /* mask for internal flags */
|
||||
spinlock_t rd_rsspin; /* protects reservation related vars */
|
||||
struct mutex rd_mutex;
|
||||
struct rb_root rd_rstree; /* multi-block reservation tree */
|
||||
};
|
||||
|
||||
struct gfs2_rbm {
|
||||
struct gfs2_rgrpd *rgd;
|
||||
u32 offset; /* The offset is bitmap relative */
|
||||
int bii; /* Bitmap index */
|
||||
};
|
||||
|
||||
static inline struct gfs2_bitmap *rbm_bi(const struct gfs2_rbm *rbm)
|
||||
{
|
||||
return rbm->rgd->rd_bits + rbm->bii;
|
||||
}
|
||||
|
||||
static inline u64 gfs2_rbm_to_block(const struct gfs2_rbm *rbm)
|
||||
{
|
||||
BUG_ON(rbm->offset >= rbm->rgd->rd_data);
|
||||
return rbm->rgd->rd_data0 + (rbm_bi(rbm)->bi_start * GFS2_NBBY) +
|
||||
rbm->offset;
|
||||
}
|
||||
|
||||
static inline bool gfs2_rbm_eq(const struct gfs2_rbm *rbm1,
|
||||
const struct gfs2_rbm *rbm2)
|
||||
{
|
||||
return (rbm1->rgd == rbm2->rgd) && (rbm1->bii == rbm2->bii) &&
|
||||
(rbm1->offset == rbm2->offset);
|
||||
}
|
||||
|
||||
enum gfs2_state_bits {
|
||||
BH_Pinned = BH_PrivateStart,
|
||||
BH_Escaped = BH_PrivateStart + 1,
|
||||
|
@ -313,9 +291,11 @@ struct gfs2_qadata { /* quota allocation data */
|
|||
*/
|
||||
|
||||
struct gfs2_blkreserv {
|
||||
struct rb_node rs_node; /* link to other block reservations */
|
||||
struct gfs2_rbm rs_rbm; /* Start of reservation */
|
||||
u32 rs_free; /* how many blocks are still free */
|
||||
struct rb_node rs_node; /* node within rd_rstree */
|
||||
struct gfs2_rgrpd *rs_rgd;
|
||||
u64 rs_start;
|
||||
u32 rs_requested;
|
||||
u32 rs_reserved; /* number of reserved blocks */
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -490,7 +470,7 @@ struct gfs2_quota_data {
|
|||
enum {
|
||||
TR_TOUCHED = 1,
|
||||
TR_ATTACHED = 2,
|
||||
TR_ALLOCED = 3,
|
||||
TR_ONSTACK = 3,
|
||||
};
|
||||
|
||||
struct gfs2_trans {
|
||||
|
@ -506,7 +486,6 @@ struct gfs2_trans {
|
|||
unsigned int tr_num_buf_rm;
|
||||
unsigned int tr_num_databuf_rm;
|
||||
unsigned int tr_num_revoke;
|
||||
unsigned int tr_num_revoke_rm;
|
||||
|
||||
struct list_head tr_list;
|
||||
struct list_head tr_databuf;
|
||||
|
@ -531,6 +510,7 @@ struct gfs2_jdesc {
|
|||
unsigned int nr_extents;
|
||||
struct work_struct jd_work;
|
||||
struct inode *jd_inode;
|
||||
struct bio *jd_log_bio;
|
||||
unsigned long jd_flags;
|
||||
#define JDF_RECOVERY 1
|
||||
unsigned int jd_jid;
|
||||
|
@ -585,6 +565,7 @@ struct gfs2_args {
|
|||
unsigned int ar_errors:2; /* errors=withdraw | panic */
|
||||
unsigned int ar_nobarrier:1; /* do not send barriers */
|
||||
unsigned int ar_rgrplvb:1; /* use lvbs for rgrp info */
|
||||
unsigned int ar_got_rgrplvb:1; /* Was the rgrplvb opt given? */
|
||||
unsigned int ar_loccookie:1; /* use location based readdir
|
||||
cookies */
|
||||
s32 ar_commit; /* Commit interval */
|
||||
|
@ -821,7 +802,6 @@ struct gfs2_sbd {
|
|||
|
||||
struct gfs2_trans *sd_log_tr;
|
||||
unsigned int sd_log_blks_reserved;
|
||||
int sd_log_committed_revoke;
|
||||
|
||||
atomic_t sd_log_pinned;
|
||||
unsigned int sd_log_num_revoke;
|
||||
|
@ -834,24 +814,22 @@ struct gfs2_sbd {
|
|||
atomic_t sd_log_thresh2;
|
||||
atomic_t sd_log_blks_free;
|
||||
atomic_t sd_log_blks_needed;
|
||||
atomic_t sd_log_revokes_available;
|
||||
wait_queue_head_t sd_log_waitq;
|
||||
wait_queue_head_t sd_logd_waitq;
|
||||
|
||||
u64 sd_log_sequence;
|
||||
unsigned int sd_log_head;
|
||||
unsigned int sd_log_tail;
|
||||
int sd_log_idle;
|
||||
|
||||
struct rw_semaphore sd_log_flush_lock;
|
||||
atomic_t sd_log_in_flight;
|
||||
struct bio *sd_log_bio;
|
||||
wait_queue_head_t sd_log_flush_wait;
|
||||
int sd_log_error; /* First log error */
|
||||
wait_queue_head_t sd_withdraw_wait;
|
||||
|
||||
atomic_t sd_reserving_log;
|
||||
wait_queue_head_t sd_reserving_log_wait;
|
||||
|
||||
unsigned int sd_log_tail;
|
||||
unsigned int sd_log_flush_tail;
|
||||
unsigned int sd_log_head;
|
||||
unsigned int sd_log_flush_head;
|
||||
|
||||
spinlock_t sd_ail_lock;
|
||||
|
|
|
@ -1147,7 +1147,7 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry)
|
|||
if (!rgd)
|
||||
goto out_inodes;
|
||||
|
||||
gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, ghs + 2);
|
||||
gfs2_holder_init(rgd->rd_gl, LM_ST_EXCLUSIVE, LM_FLAG_NODE_SCOPE, ghs + 2);
|
||||
|
||||
|
||||
error = gfs2_glock_nq(ghs); /* parent */
|
||||
|
@ -1453,8 +1453,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
|
|||
error = -ENOENT;
|
||||
goto out_gunlock;
|
||||
}
|
||||
error = gfs2_glock_nq_init(nrgd->rd_gl, LM_ST_EXCLUSIVE, 0,
|
||||
&rd_gh);
|
||||
error = gfs2_glock_nq_init(nrgd->rd_gl, LM_ST_EXCLUSIVE,
|
||||
LM_FLAG_NODE_SCOPE, &rd_gh);
|
||||
if (error)
|
||||
goto out_gunlock;
|
||||
}
|
||||
|
|
|
@ -284,7 +284,6 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
|
|||
{
|
||||
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
|
||||
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
|
||||
int lvb_needs_unlock = 0;
|
||||
int error;
|
||||
|
||||
if (gl->gl_lksb.sb_lkid == 0) {
|
||||
|
@ -297,13 +296,10 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
|
|||
gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
|
||||
gfs2_update_request_times(gl);
|
||||
|
||||
/* don't want to skip dlm_unlock writing the lvb when lock is ex */
|
||||
|
||||
if (gl->gl_lksb.sb_lvbptr && (gl->gl_state == LM_ST_EXCLUSIVE))
|
||||
lvb_needs_unlock = 1;
|
||||
/* don't want to skip dlm_unlock writing the lvb when lock has one */
|
||||
|
||||
if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
|
||||
!lvb_needs_unlock) {
|
||||
!gl->gl_lksb.sb_lvbptr) {
|
||||
gfs2_glock_free(gl);
|
||||
return;
|
||||
}
|
||||
|
|
531
fs/gfs2/log.c
531
fs/gfs2/log.c
|
@ -50,10 +50,12 @@ unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct)
|
|||
unsigned int blks;
|
||||
unsigned int first, second;
|
||||
|
||||
/* The initial struct gfs2_log_descriptor block */
|
||||
blks = 1;
|
||||
first = sdp->sd_ldptrs;
|
||||
|
||||
if (nstruct > first) {
|
||||
/* Subsequent struct gfs2_meta_header blocks */
|
||||
second = sdp->sd_inptrs;
|
||||
blks += DIV_ROUND_UP(nstruct - first, second);
|
||||
}
|
||||
|
@ -89,7 +91,7 @@ void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
|
|||
|
||||
static int gfs2_ail1_start_one(struct gfs2_sbd *sdp,
|
||||
struct writeback_control *wbc,
|
||||
struct gfs2_trans *tr)
|
||||
struct gfs2_trans *tr, struct blk_plug *plug)
|
||||
__releases(&sdp->sd_ail_lock)
|
||||
__acquires(&sdp->sd_ail_lock)
|
||||
{
|
||||
|
@ -131,6 +133,11 @@ __acquires(&sdp->sd_ail_lock)
|
|||
continue;
|
||||
spin_unlock(&sdp->sd_ail_lock);
|
||||
ret = generic_writepages(mapping, wbc);
|
||||
if (need_resched()) {
|
||||
blk_finish_plug(plug);
|
||||
cond_resched();
|
||||
blk_start_plug(plug);
|
||||
}
|
||||
spin_lock(&sdp->sd_ail_lock);
|
||||
if (ret == -ENODATA) /* if a jdata write into a new hole */
|
||||
ret = 0; /* ignore it */
|
||||
|
@ -205,7 +212,7 @@ restart:
|
|||
list_for_each_entry_reverse(tr, head, tr_list) {
|
||||
if (wbc->nr_to_write <= 0)
|
||||
break;
|
||||
ret = gfs2_ail1_start_one(sdp, wbc, tr);
|
||||
ret = gfs2_ail1_start_one(sdp, wbc, tr, &plug);
|
||||
if (ret) {
|
||||
if (ret == -EBUSY)
|
||||
goto restart;
|
||||
|
@ -240,6 +247,45 @@ static void gfs2_ail1_start(struct gfs2_sbd *sdp)
|
|||
return gfs2_ail1_flush(sdp, &wbc);
|
||||
}
|
||||
|
||||
static void gfs2_log_update_flush_tail(struct gfs2_sbd *sdp)
|
||||
{
|
||||
unsigned int new_flush_tail = sdp->sd_log_head;
|
||||
struct gfs2_trans *tr;
|
||||
|
||||
if (!list_empty(&sdp->sd_ail1_list)) {
|
||||
tr = list_last_entry(&sdp->sd_ail1_list,
|
||||
struct gfs2_trans, tr_list);
|
||||
new_flush_tail = tr->tr_first;
|
||||
}
|
||||
sdp->sd_log_flush_tail = new_flush_tail;
|
||||
}
|
||||
|
||||
static void gfs2_log_update_head(struct gfs2_sbd *sdp)
|
||||
{
|
||||
unsigned int new_head = sdp->sd_log_flush_head;
|
||||
|
||||
if (sdp->sd_log_flush_tail == sdp->sd_log_head)
|
||||
sdp->sd_log_flush_tail = new_head;
|
||||
sdp->sd_log_head = new_head;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_ail_empty_tr - empty one of the ail lists of a transaction
|
||||
*/
|
||||
|
||||
static void gfs2_ail_empty_tr(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
|
||||
struct list_head *head)
|
||||
{
|
||||
struct gfs2_bufdata *bd;
|
||||
|
||||
while (!list_empty(head)) {
|
||||
bd = list_first_entry(head, struct gfs2_bufdata,
|
||||
bd_ail_st_list);
|
||||
gfs2_assert(sdp, bd->bd_tr == tr);
|
||||
gfs2_remove_from_ail(bd);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
|
||||
* @sdp: the filesystem
|
||||
|
@ -315,6 +361,7 @@ static int gfs2_ail1_empty(struct gfs2_sbd *sdp, int max_revokes)
|
|||
else
|
||||
oldest_tr = 0;
|
||||
}
|
||||
gfs2_log_update_flush_tail(sdp);
|
||||
ret = list_empty(&sdp->sd_ail1_list);
|
||||
spin_unlock(&sdp->sd_ail_lock);
|
||||
|
||||
|
@ -348,49 +395,71 @@ static void gfs2_ail1_wait(struct gfs2_sbd *sdp)
|
|||
spin_unlock(&sdp->sd_ail_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_ail_empty_tr - empty one of the ail lists for a transaction
|
||||
*/
|
||||
|
||||
static void gfs2_ail_empty_tr(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
|
||||
struct list_head *head)
|
||||
static void __ail2_empty(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
|
||||
{
|
||||
struct gfs2_bufdata *bd;
|
||||
|
||||
while (!list_empty(head)) {
|
||||
bd = list_first_entry(head, struct gfs2_bufdata,
|
||||
bd_ail_st_list);
|
||||
gfs2_assert(sdp, bd->bd_tr == tr);
|
||||
gfs2_remove_from_ail(bd);
|
||||
}
|
||||
gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
|
||||
list_del(&tr->tr_list);
|
||||
gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
|
||||
gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
|
||||
gfs2_trans_free(sdp, tr);
|
||||
}
|
||||
|
||||
static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
|
||||
{
|
||||
struct gfs2_trans *tr, *safe;
|
||||
struct list_head *ail2_list = &sdp->sd_ail2_list;
|
||||
unsigned int old_tail = sdp->sd_log_tail;
|
||||
int wrap = (new_tail < old_tail);
|
||||
int a, b, rm;
|
||||
struct gfs2_trans *tr, *safe;
|
||||
|
||||
spin_lock(&sdp->sd_ail_lock);
|
||||
|
||||
list_for_each_entry_safe(tr, safe, &sdp->sd_ail2_list, tr_list) {
|
||||
a = (old_tail <= tr->tr_first);
|
||||
b = (tr->tr_first < new_tail);
|
||||
rm = (wrap) ? (a || b) : (a && b);
|
||||
if (!rm)
|
||||
continue;
|
||||
|
||||
gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
|
||||
list_del(&tr->tr_list);
|
||||
gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
|
||||
gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
|
||||
gfs2_trans_free(sdp, tr);
|
||||
if (old_tail <= new_tail) {
|
||||
list_for_each_entry_safe(tr, safe, ail2_list, tr_list) {
|
||||
if (old_tail <= tr->tr_first && tr->tr_first < new_tail)
|
||||
__ail2_empty(sdp, tr);
|
||||
}
|
||||
} else {
|
||||
list_for_each_entry_safe(tr, safe, ail2_list, tr_list) {
|
||||
if (old_tail <= tr->tr_first || tr->tr_first < new_tail)
|
||||
__ail2_empty(sdp, tr);
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&sdp->sd_ail_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_log_is_empty - Check if the log is empty
|
||||
* @sdp: The GFS2 superblock
|
||||
*/
|
||||
|
||||
bool gfs2_log_is_empty(struct gfs2_sbd *sdp) {
|
||||
return atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks;
|
||||
}
|
||||
|
||||
static bool __gfs2_log_try_reserve_revokes(struct gfs2_sbd *sdp, unsigned int revokes)
|
||||
{
|
||||
unsigned int available;
|
||||
|
||||
available = atomic_read(&sdp->sd_log_revokes_available);
|
||||
while (available >= revokes) {
|
||||
if (atomic_try_cmpxchg(&sdp->sd_log_revokes_available,
|
||||
&available, available - revokes))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_log_release_revokes - Release a given number of revokes
|
||||
* @sdp: The GFS2 superblock
|
||||
* @revokes: The number of revokes to release
|
||||
*
|
||||
* sdp->sd_log_flush_lock must be held.
|
||||
*/
|
||||
void gfs2_log_release_revokes(struct gfs2_sbd *sdp, unsigned int revokes)
|
||||
{
|
||||
if (revokes)
|
||||
atomic_add(revokes, &sdp->sd_log_revokes_available);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_log_release - Release a given number of log blocks
|
||||
* @sdp: The GFS2 superblock
|
||||
|
@ -400,86 +469,141 @@ static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
|
|||
|
||||
void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
|
||||
{
|
||||
|
||||
atomic_add(blks, &sdp->sd_log_blks_free);
|
||||
trace_gfs2_log_blocks(sdp, blks);
|
||||
gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
|
||||
sdp->sd_jdesc->jd_blocks);
|
||||
up_read(&sdp->sd_log_flush_lock);
|
||||
if (atomic_read(&sdp->sd_log_blks_needed))
|
||||
wake_up(&sdp->sd_log_waitq);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_log_reserve - Make a log reservation
|
||||
* __gfs2_log_try_reserve - Try to make a log reservation
|
||||
* @sdp: The GFS2 superblock
|
||||
* @blks: The number of blocks to reserve
|
||||
* @taboo_blks: The number of blocks to leave free
|
||||
*
|
||||
* Note that we never give out the last few blocks of the journal. Thats
|
||||
* due to the fact that there is a small number of header blocks
|
||||
* associated with each log flush. The exact number can't be known until
|
||||
* flush time, so we ensure that we have just enough free blocks at all
|
||||
* times to avoid running out during a log flush.
|
||||
* Try to do the same as __gfs2_log_reserve(), but fail if no more log
|
||||
* space is immediately available.
|
||||
*/
|
||||
static bool __gfs2_log_try_reserve(struct gfs2_sbd *sdp, unsigned int blks,
|
||||
unsigned int taboo_blks)
|
||||
{
|
||||
unsigned wanted = blks + taboo_blks;
|
||||
unsigned int free_blocks;
|
||||
|
||||
free_blocks = atomic_read(&sdp->sd_log_blks_free);
|
||||
while (free_blocks >= wanted) {
|
||||
if (atomic_try_cmpxchg(&sdp->sd_log_blks_free, &free_blocks,
|
||||
free_blocks - blks)) {
|
||||
trace_gfs2_log_blocks(sdp, -blks);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* __gfs2_log_reserve - Make a log reservation
|
||||
* @sdp: The GFS2 superblock
|
||||
* @blks: The number of blocks to reserve
|
||||
* @taboo_blks: The number of blocks to leave free
|
||||
*
|
||||
* @taboo_blks is set to 0 for logd, and to GFS2_LOG_FLUSH_MIN_BLOCKS
|
||||
* for all other processes. This ensures that when the log is almost full,
|
||||
* logd will still be able to call gfs2_log_flush one more time without
|
||||
* blocking, which will advance the tail and make some more log space
|
||||
* available.
|
||||
*
|
||||
* We no longer flush the log here, instead we wake up logd to do that
|
||||
* for us. To avoid the thundering herd and to ensure that we deal fairly
|
||||
* with queued waiters, we use an exclusive wait. This means that when we
|
||||
* get woken with enough journal space to get our reservation, we need to
|
||||
* wake the next waiter on the list.
|
||||
*
|
||||
* Returns: errno
|
||||
*/
|
||||
|
||||
int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
|
||||
static void __gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks,
|
||||
unsigned int taboo_blks)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned reserved_blks = 7 * (4096 / sdp->sd_vfs->s_blocksize);
|
||||
unsigned wanted = blks + reserved_blks;
|
||||
DEFINE_WAIT(wait);
|
||||
int did_wait = 0;
|
||||
unsigned wanted = blks + taboo_blks;
|
||||
unsigned int free_blocks;
|
||||
|
||||
if (gfs2_assert_warn(sdp, blks) ||
|
||||
gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
|
||||
return -EINVAL;
|
||||
atomic_add(blks, &sdp->sd_log_blks_needed);
|
||||
retry:
|
||||
free_blocks = atomic_read(&sdp->sd_log_blks_free);
|
||||
if (unlikely(free_blocks <= wanted)) {
|
||||
do {
|
||||
prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
for (;;) {
|
||||
if (current != sdp->sd_logd_process)
|
||||
wake_up(&sdp->sd_logd_waitq);
|
||||
did_wait = 1;
|
||||
if (atomic_read(&sdp->sd_log_blks_free) <= wanted)
|
||||
io_schedule();
|
||||
free_blocks = atomic_read(&sdp->sd_log_blks_free);
|
||||
} while(free_blocks <= wanted);
|
||||
finish_wait(&sdp->sd_log_waitq, &wait);
|
||||
io_wait_event(sdp->sd_log_waitq,
|
||||
(free_blocks = atomic_read(&sdp->sd_log_blks_free),
|
||||
free_blocks >= wanted));
|
||||
do {
|
||||
if (atomic_try_cmpxchg(&sdp->sd_log_blks_free,
|
||||
&free_blocks,
|
||||
free_blocks - blks))
|
||||
goto reserved;
|
||||
} while (free_blocks >= wanted);
|
||||
}
|
||||
atomic_inc(&sdp->sd_reserving_log);
|
||||
if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks,
|
||||
free_blocks - blks) != free_blocks) {
|
||||
if (atomic_dec_and_test(&sdp->sd_reserving_log))
|
||||
wake_up(&sdp->sd_reserving_log_wait);
|
||||
goto retry;
|
||||
}
|
||||
atomic_sub(blks, &sdp->sd_log_blks_needed);
|
||||
|
||||
reserved:
|
||||
trace_gfs2_log_blocks(sdp, -blks);
|
||||
|
||||
/*
|
||||
* If we waited, then so might others, wake them up _after_ we get
|
||||
* our share of the log.
|
||||
*/
|
||||
if (unlikely(did_wait))
|
||||
if (atomic_sub_return(blks, &sdp->sd_log_blks_needed))
|
||||
wake_up(&sdp->sd_log_waitq);
|
||||
}
|
||||
|
||||
down_read(&sdp->sd_log_flush_lock);
|
||||
if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
|
||||
gfs2_log_release(sdp, blks);
|
||||
ret = -EROFS;
|
||||
/**
|
||||
* gfs2_log_try_reserve - Try to make a log reservation
|
||||
* @sdp: The GFS2 superblock
|
||||
* @tr: The transaction
|
||||
* @extra_revokes: The number of additional revokes reserved (output)
|
||||
*
|
||||
* This is similar to gfs2_log_reserve, but sdp->sd_log_flush_lock must be
|
||||
* held for correct revoke accounting.
|
||||
*/
|
||||
|
||||
bool gfs2_log_try_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
|
||||
unsigned int *extra_revokes)
|
||||
{
|
||||
unsigned int blks = tr->tr_reserved;
|
||||
unsigned int revokes = tr->tr_revokes;
|
||||
unsigned int revoke_blks = 0;
|
||||
|
||||
*extra_revokes = 0;
|
||||
if (revokes && !__gfs2_log_try_reserve_revokes(sdp, revokes)) {
|
||||
revoke_blks = DIV_ROUND_UP(revokes, sdp->sd_inptrs);
|
||||
*extra_revokes = revoke_blks * sdp->sd_inptrs - revokes;
|
||||
blks += revoke_blks;
|
||||
}
|
||||
if (atomic_dec_and_test(&sdp->sd_reserving_log))
|
||||
wake_up(&sdp->sd_reserving_log_wait);
|
||||
return ret;
|
||||
if (!blks)
|
||||
return true;
|
||||
if (__gfs2_log_try_reserve(sdp, blks, GFS2_LOG_FLUSH_MIN_BLOCKS))
|
||||
return true;
|
||||
if (!revoke_blks)
|
||||
gfs2_log_release_revokes(sdp, revokes);
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_log_reserve - Make a log reservation
|
||||
* @sdp: The GFS2 superblock
|
||||
* @tr: The transaction
|
||||
* @extra_revokes: The number of additional revokes reserved (output)
|
||||
*
|
||||
* sdp->sd_log_flush_lock must not be held.
|
||||
*/
|
||||
|
||||
void gfs2_log_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
|
||||
unsigned int *extra_revokes)
|
||||
{
|
||||
unsigned int blks = tr->tr_reserved;
|
||||
unsigned int revokes = tr->tr_revokes;
|
||||
unsigned int revoke_blks = 0;
|
||||
|
||||
*extra_revokes = 0;
|
||||
if (revokes) {
|
||||
revoke_blks = DIV_ROUND_UP(revokes, sdp->sd_inptrs);
|
||||
*extra_revokes = revoke_blks * sdp->sd_inptrs - revokes;
|
||||
blks += revoke_blks;
|
||||
}
|
||||
__gfs2_log_reserve(sdp, blks, GFS2_LOG_FLUSH_MIN_BLOCKS);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -507,24 +631,20 @@ static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer
|
|||
}
|
||||
|
||||
/**
|
||||
* calc_reserved - Calculate the number of blocks to reserve when
|
||||
* refunding a transaction's unused buffers.
|
||||
* calc_reserved - Calculate the number of blocks to keep reserved
|
||||
* @sdp: The GFS2 superblock
|
||||
*
|
||||
* This is complex. We need to reserve room for all our currently used
|
||||
* metadata buffers (e.g. normal file I/O rewriting file time stamps) and
|
||||
* all our journaled data buffers for journaled files (e.g. files in the
|
||||
* metadata blocks (e.g. normal file I/O rewriting file time stamps) and
|
||||
* all our journaled data blocks for journaled files (e.g. files in the
|
||||
* meta_fs like rindex, or files for which chattr +j was done.)
|
||||
* If we don't reserve enough space, gfs2_log_refund and gfs2_log_flush
|
||||
* will count it as free space (sd_log_blks_free) and corruption will follow.
|
||||
* If we don't reserve enough space, corruption will follow.
|
||||
*
|
||||
* We can have metadata bufs and jdata bufs in the same journal. So each
|
||||
* type gets its own log header, for which we need to reserve a block.
|
||||
* In fact, each type has the potential for needing more than one header
|
||||
* in cases where we have more buffers than will fit on a journal page.
|
||||
* We can have metadata blocks and jdata blocks in the same journal. Each
|
||||
* type gets its own log descriptor, for which we need to reserve a block.
|
||||
* In fact, each type has the potential for needing more than one log descriptor
|
||||
* in cases where we have more blocks than will fit in a log descriptor.
|
||||
* Metadata journal entries take up half the space of journaled buffer entries.
|
||||
* Thus, metadata entries have buf_limit (502) and journaled buffers have
|
||||
* databuf_limit (251) before they cause a wrap around.
|
||||
*
|
||||
* Also, we need to reserve blocks for revoke journal entries and one for an
|
||||
* overall header for the lot.
|
||||
|
@ -533,59 +653,29 @@ static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer
|
|||
*/
|
||||
static unsigned int calc_reserved(struct gfs2_sbd *sdp)
|
||||
{
|
||||
unsigned int reserved = 0;
|
||||
unsigned int mbuf;
|
||||
unsigned int dbuf;
|
||||
unsigned int reserved = GFS2_LOG_FLUSH_MIN_BLOCKS;
|
||||
unsigned int blocks;
|
||||
struct gfs2_trans *tr = sdp->sd_log_tr;
|
||||
|
||||
if (tr) {
|
||||
mbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
|
||||
dbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
|
||||
reserved = mbuf + dbuf;
|
||||
/* Account for header blocks */
|
||||
reserved += DIV_ROUND_UP(mbuf, buf_limit(sdp));
|
||||
reserved += DIV_ROUND_UP(dbuf, databuf_limit(sdp));
|
||||
blocks = tr->tr_num_buf_new - tr->tr_num_buf_rm;
|
||||
reserved += blocks + DIV_ROUND_UP(blocks, buf_limit(sdp));
|
||||
blocks = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
|
||||
reserved += blocks + DIV_ROUND_UP(blocks, databuf_limit(sdp));
|
||||
}
|
||||
|
||||
if (sdp->sd_log_committed_revoke > 0)
|
||||
reserved += gfs2_struct2blk(sdp, sdp->sd_log_committed_revoke);
|
||||
/* One for the overall header */
|
||||
if (reserved)
|
||||
reserved++;
|
||||
return reserved;
|
||||
}
|
||||
|
||||
static unsigned int current_tail(struct gfs2_sbd *sdp)
|
||||
static void log_pull_tail(struct gfs2_sbd *sdp)
|
||||
{
|
||||
struct gfs2_trans *tr;
|
||||
unsigned int tail;
|
||||
|
||||
spin_lock(&sdp->sd_ail_lock);
|
||||
|
||||
if (list_empty(&sdp->sd_ail1_list)) {
|
||||
tail = sdp->sd_log_head;
|
||||
} else {
|
||||
tr = list_last_entry(&sdp->sd_ail1_list, struct gfs2_trans,
|
||||
tr_list);
|
||||
tail = tr->tr_first;
|
||||
}
|
||||
|
||||
spin_unlock(&sdp->sd_ail_lock);
|
||||
|
||||
return tail;
|
||||
}
|
||||
|
||||
static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
|
||||
{
|
||||
unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
|
||||
unsigned int new_tail = sdp->sd_log_flush_tail;
|
||||
unsigned int dist;
|
||||
|
||||
if (new_tail == sdp->sd_log_tail)
|
||||
return;
|
||||
dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
|
||||
ail2_empty(sdp, new_tail);
|
||||
|
||||
atomic_add(dist, &sdp->sd_log_blks_free);
|
||||
trace_gfs2_log_blocks(sdp, dist);
|
||||
gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
|
||||
sdp->sd_jdesc->jd_blocks);
|
||||
|
||||
gfs2_log_release(sdp, dist);
|
||||
sdp->sd_log_tail = new_tail;
|
||||
}
|
||||
|
||||
|
@ -698,7 +788,7 @@ void gfs2_glock_remove_revoke(struct gfs2_glock *gl)
|
|||
}
|
||||
|
||||
/**
|
||||
* gfs2_write_revokes - Add as many revokes to the system transaction as we can
|
||||
* gfs2_flush_revokes - Add as many revokes to the system transaction as we can
|
||||
* @sdp: The GFS2 superblock
|
||||
*
|
||||
* Our usual strategy is to defer writing revokes as much as we can in the hope
|
||||
|
@ -709,38 +799,14 @@ void gfs2_glock_remove_revoke(struct gfs2_glock *gl)
|
|||
* been written back. This will basically come at no cost now, and will save
|
||||
* us from having to keep track of those blocks on the AIL2 list later.
|
||||
*/
|
||||
void gfs2_write_revokes(struct gfs2_sbd *sdp)
|
||||
void gfs2_flush_revokes(struct gfs2_sbd *sdp)
|
||||
{
|
||||
/* number of revokes we still have room for */
|
||||
int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
|
||||
unsigned int max_revokes = atomic_read(&sdp->sd_log_revokes_available);
|
||||
|
||||
gfs2_log_lock(sdp);
|
||||
while (sdp->sd_log_num_revoke > max_revokes)
|
||||
max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
|
||||
max_revokes -= sdp->sd_log_num_revoke;
|
||||
if (!sdp->sd_log_num_revoke) {
|
||||
atomic_dec(&sdp->sd_log_blks_free);
|
||||
/* If no blocks have been reserved, we need to also
|
||||
* reserve a block for the header */
|
||||
if (!sdp->sd_log_blks_reserved) {
|
||||
atomic_dec(&sdp->sd_log_blks_free);
|
||||
trace_gfs2_log_blocks(sdp, -2);
|
||||
} else {
|
||||
trace_gfs2_log_blocks(sdp, -1);
|
||||
}
|
||||
}
|
||||
gfs2_ail1_empty(sdp, max_revokes);
|
||||
gfs2_log_unlock(sdp);
|
||||
|
||||
if (!sdp->sd_log_num_revoke) {
|
||||
atomic_inc(&sdp->sd_log_blks_free);
|
||||
if (!sdp->sd_log_blks_reserved) {
|
||||
atomic_inc(&sdp->sd_log_blks_free);
|
||||
trace_gfs2_log_blocks(sdp, 2);
|
||||
} else {
|
||||
trace_gfs2_log_blocks(sdp, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -769,7 +835,7 @@ void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
|
|||
u64 dblock;
|
||||
|
||||
if (gfs2_withdrawn(sdp))
|
||||
goto out;
|
||||
return;
|
||||
|
||||
page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
|
||||
lh = page_address(page);
|
||||
|
@ -822,10 +888,8 @@ void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
|
|||
sb->s_blocksize - LH_V1_SIZE - 4);
|
||||
lh->lh_crc = cpu_to_be32(crc);
|
||||
|
||||
gfs2_log_write(sdp, page, sb->s_blocksize, 0, dblock);
|
||||
gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE | op_flags);
|
||||
out:
|
||||
log_flush_wait(sdp);
|
||||
gfs2_log_write(sdp, jd, page, sb->s_blocksize, 0, dblock);
|
||||
gfs2_log_submit_bio(&jd->jd_log_bio, REQ_OP_WRITE | op_flags);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -838,25 +902,24 @@ out:
|
|||
|
||||
static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
|
||||
{
|
||||
unsigned int tail;
|
||||
int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC;
|
||||
enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
|
||||
|
||||
gfs2_assert_withdraw(sdp, (state != SFS_FROZEN));
|
||||
tail = current_tail(sdp);
|
||||
|
||||
if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
|
||||
gfs2_ordered_wait(sdp);
|
||||
log_flush_wait(sdp);
|
||||
op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
|
||||
}
|
||||
sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
|
||||
gfs2_write_log_header(sdp, sdp->sd_jdesc, sdp->sd_log_sequence++, tail,
|
||||
sdp->sd_log_flush_head, flags, op_flags);
|
||||
sdp->sd_log_idle = (sdp->sd_log_flush_tail == sdp->sd_log_flush_head);
|
||||
gfs2_write_log_header(sdp, sdp->sd_jdesc, sdp->sd_log_sequence++,
|
||||
sdp->sd_log_flush_tail, sdp->sd_log_flush_head,
|
||||
flags, op_flags);
|
||||
gfs2_log_incr_head(sdp);
|
||||
|
||||
if (sdp->sd_log_tail != tail)
|
||||
log_pull_tail(sdp, tail);
|
||||
log_flush_wait(sdp);
|
||||
log_pull_tail(sdp);
|
||||
gfs2_log_update_head(sdp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -956,10 +1019,15 @@ static void trans_drain(struct gfs2_trans *tr)
|
|||
void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
|
||||
{
|
||||
struct gfs2_trans *tr = NULL;
|
||||
unsigned int reserved_blocks = 0, used_blocks = 0;
|
||||
enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
|
||||
unsigned int first_log_head;
|
||||
unsigned int reserved_revokes = 0;
|
||||
|
||||
down_write(&sdp->sd_log_flush_lock);
|
||||
trace_gfs2_log_flush(sdp, 1, flags);
|
||||
|
||||
repeat:
|
||||
/*
|
||||
* Do this check while holding the log_flush_lock to prevent new
|
||||
* buffers from being added to the ail via gfs2_pin()
|
||||
|
@ -970,28 +1038,47 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
|
|||
/* Log might have been flushed while we waited for the flush lock */
|
||||
if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags))
|
||||
goto out;
|
||||
trace_gfs2_log_flush(sdp, 1, flags);
|
||||
|
||||
first_log_head = sdp->sd_log_head;
|
||||
sdp->sd_log_flush_head = first_log_head;
|
||||
|
||||
tr = sdp->sd_log_tr;
|
||||
if (tr || sdp->sd_log_num_revoke) {
|
||||
if (reserved_blocks)
|
||||
gfs2_log_release(sdp, reserved_blocks);
|
||||
reserved_blocks = sdp->sd_log_blks_reserved;
|
||||
reserved_revokes = sdp->sd_log_num_revoke;
|
||||
if (tr) {
|
||||
sdp->sd_log_tr = NULL;
|
||||
tr->tr_first = first_log_head;
|
||||
if (unlikely (state == SFS_FROZEN)) {
|
||||
if (gfs2_assert_withdraw_delayed(sdp,
|
||||
!tr->tr_num_buf_new && !tr->tr_num_databuf_new))
|
||||
goto out_withdraw;
|
||||
}
|
||||
}
|
||||
} else if (!reserved_blocks) {
|
||||
unsigned int taboo_blocks = GFS2_LOG_FLUSH_MIN_BLOCKS;
|
||||
|
||||
reserved_blocks = GFS2_LOG_FLUSH_MIN_BLOCKS;
|
||||
if (current == sdp->sd_logd_process)
|
||||
taboo_blocks = 0;
|
||||
|
||||
if (!__gfs2_log_try_reserve(sdp, reserved_blocks, taboo_blocks)) {
|
||||
up_write(&sdp->sd_log_flush_lock);
|
||||
__gfs2_log_reserve(sdp, reserved_blocks, taboo_blocks);
|
||||
down_write(&sdp->sd_log_flush_lock);
|
||||
goto repeat;
|
||||
}
|
||||
BUG_ON(sdp->sd_log_num_revoke);
|
||||
}
|
||||
|
||||
if (flags & GFS2_LOG_HEAD_FLUSH_SHUTDOWN)
|
||||
clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
|
||||
|
||||
sdp->sd_log_flush_head = sdp->sd_log_head;
|
||||
tr = sdp->sd_log_tr;
|
||||
if (tr) {
|
||||
sdp->sd_log_tr = NULL;
|
||||
tr->tr_first = sdp->sd_log_flush_head;
|
||||
if (unlikely (state == SFS_FROZEN))
|
||||
if (gfs2_assert_withdraw_delayed(sdp,
|
||||
!tr->tr_num_buf_new && !tr->tr_num_databuf_new))
|
||||
goto out_withdraw;
|
||||
}
|
||||
|
||||
if (unlikely(state == SFS_FROZEN))
|
||||
if (gfs2_assert_withdraw_delayed(sdp, !sdp->sd_log_num_revoke))
|
||||
if (gfs2_assert_withdraw_delayed(sdp, !reserved_revokes))
|
||||
goto out_withdraw;
|
||||
if (gfs2_assert_withdraw_delayed(sdp,
|
||||
sdp->sd_log_num_revoke == sdp->sd_log_committed_revoke))
|
||||
goto out_withdraw;
|
||||
|
||||
gfs2_ordered_write(sdp);
|
||||
if (gfs2_withdrawn(sdp))
|
||||
|
@ -999,16 +1086,13 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
|
|||
lops_before_commit(sdp, tr);
|
||||
if (gfs2_withdrawn(sdp))
|
||||
goto out_withdraw;
|
||||
gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE);
|
||||
gfs2_log_submit_bio(&sdp->sd_jdesc->jd_log_bio, REQ_OP_WRITE);
|
||||
if (gfs2_withdrawn(sdp))
|
||||
goto out_withdraw;
|
||||
|
||||
if (sdp->sd_log_head != sdp->sd_log_flush_head) {
|
||||
log_flush_wait(sdp);
|
||||
log_write_header(sdp, flags);
|
||||
} else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){
|
||||
atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
|
||||
trace_gfs2_log_blocks(sdp, -1);
|
||||
} else if (sdp->sd_log_tail != sdp->sd_log_flush_tail && !sdp->sd_log_idle) {
|
||||
log_write_header(sdp, flags);
|
||||
}
|
||||
if (gfs2_withdrawn(sdp))
|
||||
|
@ -1016,9 +1100,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
|
|||
lops_after_commit(sdp, tr);
|
||||
|
||||
gfs2_log_lock(sdp);
|
||||
sdp->sd_log_head = sdp->sd_log_flush_head;
|
||||
sdp->sd_log_blks_reserved = 0;
|
||||
sdp->sd_log_committed_revoke = 0;
|
||||
|
||||
spin_lock(&sdp->sd_ail_lock);
|
||||
if (tr && !list_empty(&tr->tr_ail1_list)) {
|
||||
|
@ -1033,10 +1115,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
|
|||
empty_ail1_list(sdp);
|
||||
if (gfs2_withdrawn(sdp))
|
||||
goto out_withdraw;
|
||||
atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
|
||||
trace_gfs2_log_blocks(sdp, -1);
|
||||
log_write_header(sdp, flags);
|
||||
sdp->sd_log_head = sdp->sd_log_flush_head;
|
||||
}
|
||||
if (flags & (GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
|
||||
GFS2_LOG_HEAD_FLUSH_FREEZE))
|
||||
|
@ -1046,12 +1125,22 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
|
|||
}
|
||||
|
||||
out_end:
|
||||
trace_gfs2_log_flush(sdp, 0, flags);
|
||||
used_blocks = log_distance(sdp, sdp->sd_log_flush_head, first_log_head);
|
||||
reserved_revokes += atomic_read(&sdp->sd_log_revokes_available);
|
||||
atomic_set(&sdp->sd_log_revokes_available, sdp->sd_ldptrs);
|
||||
gfs2_assert_withdraw(sdp, reserved_revokes % sdp->sd_inptrs == sdp->sd_ldptrs);
|
||||
if (reserved_revokes > sdp->sd_ldptrs)
|
||||
reserved_blocks += (reserved_revokes - sdp->sd_ldptrs) / sdp->sd_inptrs;
|
||||
out:
|
||||
if (used_blocks != reserved_blocks) {
|
||||
gfs2_assert_withdraw_delayed(sdp, used_blocks < reserved_blocks);
|
||||
gfs2_log_release(sdp, reserved_blocks - used_blocks);
|
||||
}
|
||||
up_write(&sdp->sd_log_flush_lock);
|
||||
gfs2_trans_free(sdp, tr);
|
||||
if (gfs2_withdrawing(sdp))
|
||||
gfs2_withdraw(sdp);
|
||||
trace_gfs2_log_flush(sdp, 0, flags);
|
||||
return;
|
||||
|
||||
out_withdraw:
|
||||
|
@ -1087,8 +1176,8 @@ static void gfs2_merge_trans(struct gfs2_sbd *sdp, struct gfs2_trans *new)
|
|||
old->tr_num_databuf_new += new->tr_num_databuf_new;
|
||||
old->tr_num_buf_rm += new->tr_num_buf_rm;
|
||||
old->tr_num_databuf_rm += new->tr_num_databuf_rm;
|
||||
old->tr_revokes += new->tr_revokes;
|
||||
old->tr_num_revoke += new->tr_num_revoke;
|
||||
old->tr_num_revoke_rm += new->tr_num_revoke_rm;
|
||||
|
||||
list_splice_tail_init(&new->tr_databuf, &old->tr_databuf);
|
||||
list_splice_tail_init(&new->tr_buf, &old->tr_buf);
|
||||
|
@ -1110,20 +1199,17 @@ static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
|
|||
if (sdp->sd_log_tr) {
|
||||
gfs2_merge_trans(sdp, tr);
|
||||
} else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) {
|
||||
gfs2_assert_withdraw(sdp, test_bit(TR_ALLOCED, &tr->tr_flags));
|
||||
gfs2_assert_withdraw(sdp, !test_bit(TR_ONSTACK, &tr->tr_flags));
|
||||
sdp->sd_log_tr = tr;
|
||||
set_bit(TR_ATTACHED, &tr->tr_flags);
|
||||
}
|
||||
|
||||
sdp->sd_log_committed_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm;
|
||||
reserved = calc_reserved(sdp);
|
||||
maxres = sdp->sd_log_blks_reserved + tr->tr_reserved;
|
||||
gfs2_assert_withdraw(sdp, maxres >= reserved);
|
||||
unused = maxres - reserved;
|
||||
atomic_add(unused, &sdp->sd_log_blks_free);
|
||||
trace_gfs2_log_blocks(sdp, unused);
|
||||
gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
|
||||
sdp->sd_jdesc->jd_blocks);
|
||||
if (unused)
|
||||
gfs2_log_release(sdp, unused);
|
||||
sdp->sd_log_blks_reserved = reserved;
|
||||
|
||||
gfs2_log_unlock(sdp);
|
||||
|
@ -1166,15 +1252,11 @@ static void gfs2_log_shutdown(struct gfs2_sbd *sdp)
|
|||
gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
|
||||
gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
|
||||
|
||||
sdp->sd_log_flush_head = sdp->sd_log_head;
|
||||
|
||||
log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT | GFS2_LFC_SHUTDOWN);
|
||||
log_pull_tail(sdp);
|
||||
|
||||
gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
|
||||
gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
|
||||
|
||||
sdp->sd_log_head = sdp->sd_log_flush_head;
|
||||
sdp->sd_log_tail = sdp->sd_log_head;
|
||||
}
|
||||
|
||||
static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
|
||||
|
@ -1208,7 +1290,6 @@ int gfs2_logd(void *data)
|
|||
struct gfs2_sbd *sdp = data;
|
||||
unsigned long t = 1;
|
||||
DEFINE_WAIT(wait);
|
||||
bool did_flush;
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
|
||||
|
@ -1227,12 +1308,10 @@ int gfs2_logd(void *data)
|
|||
continue;
|
||||
}
|
||||
|
||||
did_flush = false;
|
||||
if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
|
||||
gfs2_ail1_empty(sdp, 0);
|
||||
gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
|
||||
GFS2_LFC_LOGD_JFLUSH_REQD);
|
||||
did_flush = true;
|
||||
GFS2_LFC_LOGD_JFLUSH_REQD);
|
||||
}
|
||||
|
||||
if (gfs2_ail_flush_reqd(sdp)) {
|
||||
|
@ -1240,13 +1319,9 @@ int gfs2_logd(void *data)
|
|||
gfs2_ail1_wait(sdp);
|
||||
gfs2_ail1_empty(sdp, 0);
|
||||
gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
|
||||
GFS2_LFC_LOGD_AIL_FLUSH_REQD);
|
||||
did_flush = true;
|
||||
GFS2_LFC_LOGD_AIL_FLUSH_REQD);
|
||||
}
|
||||
|
||||
if (!gfs2_ail_flush_reqd(sdp) || did_flush)
|
||||
wake_up(&sdp->sd_log_waitq);
|
||||
|
||||
t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
|
||||
|
||||
try_to_freeze();
|
||||
|
|
|
@ -13,6 +13,13 @@
|
|||
#include "incore.h"
|
||||
#include "inode.h"
|
||||
|
||||
/*
|
||||
* The minimum amount of log space required for a log flush is one block for
|
||||
* revokes and one block for the log header. Log flushes other than
|
||||
* GFS2_LOG_HEAD_FLUSH_NORMAL may write one or two more log headers.
|
||||
*/
|
||||
#define GFS2_LOG_FLUSH_MIN_BLOCKS 4
|
||||
|
||||
/**
|
||||
* gfs2_log_lock - acquire the right to mess with the log manager
|
||||
* @sdp: the filesystem
|
||||
|
@ -43,7 +50,9 @@ static inline void gfs2_log_pointers_init(struct gfs2_sbd *sdp,
|
|||
if (++value == sdp->sd_jdesc->jd_blocks) {
|
||||
value = 0;
|
||||
}
|
||||
sdp->sd_log_head = sdp->sd_log_tail = value;
|
||||
sdp->sd_log_tail = value;
|
||||
sdp->sd_log_flush_tail = value;
|
||||
sdp->sd_log_head = value;
|
||||
}
|
||||
|
||||
static inline void gfs2_ordered_add_inode(struct gfs2_inode *ip)
|
||||
|
@ -64,8 +73,13 @@ static inline void gfs2_ordered_add_inode(struct gfs2_inode *ip)
|
|||
extern void gfs2_ordered_del_inode(struct gfs2_inode *ip);
|
||||
extern unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct);
|
||||
extern void gfs2_remove_from_ail(struct gfs2_bufdata *bd);
|
||||
extern bool gfs2_log_is_empty(struct gfs2_sbd *sdp);
|
||||
extern void gfs2_log_release_revokes(struct gfs2_sbd *sdp, unsigned int revokes);
|
||||
extern void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks);
|
||||
extern int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks);
|
||||
extern bool gfs2_log_try_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
|
||||
unsigned int *extra_revokes);
|
||||
extern void gfs2_log_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
|
||||
unsigned int *extra_revokes);
|
||||
extern void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
|
||||
u64 seq, u32 tail, u32 lblock, u32 flags,
|
||||
int op_flags);
|
||||
|
@ -78,6 +92,6 @@ extern void log_flush_wait(struct gfs2_sbd *sdp);
|
|||
extern int gfs2_logd(void *data);
|
||||
extern void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
|
||||
extern void gfs2_glock_remove_revoke(struct gfs2_glock *gl);
|
||||
extern void gfs2_write_revokes(struct gfs2_sbd *sdp);
|
||||
extern void gfs2_flush_revokes(struct gfs2_sbd *sdp);
|
||||
|
||||
#endif /* __LOG_DOT_H__ */
|
||||
|
|
|
@ -76,15 +76,20 @@ static void maybe_release_space(struct gfs2_bufdata *bd)
|
|||
unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
|
||||
struct gfs2_bitmap *bi = rgd->rd_bits + index;
|
||||
|
||||
rgrp_lock_local(rgd);
|
||||
if (bi->bi_clone == NULL)
|
||||
return;
|
||||
goto out;
|
||||
if (sdp->sd_args.ar_discard)
|
||||
gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
|
||||
memcpy(bi->bi_clone + bi->bi_offset,
|
||||
bd->bd_bh->b_data + bi->bi_offset, bi->bi_bytes);
|
||||
clear_bit(GBF_FULL, &bi->bi_flags);
|
||||
rgd->rd_free_clone = rgd->rd_free;
|
||||
BUG_ON(rgd->rd_free_clone < rgd->rd_reserved);
|
||||
rgd->rd_extfail_pt = rgd->rd_free;
|
||||
|
||||
out:
|
||||
rgrp_unlock_local(rgd);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -322,17 +327,18 @@ static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno,
|
|||
* then add the page segment to that.
|
||||
*/
|
||||
|
||||
void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
|
||||
unsigned size, unsigned offset, u64 blkno)
|
||||
void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
|
||||
struct page *page, unsigned size, unsigned offset,
|
||||
u64 blkno)
|
||||
{
|
||||
struct bio *bio;
|
||||
int ret;
|
||||
|
||||
bio = gfs2_log_get_bio(sdp, blkno, &sdp->sd_log_bio, REQ_OP_WRITE,
|
||||
bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio, REQ_OP_WRITE,
|
||||
gfs2_end_log_write, false);
|
||||
ret = bio_add_page(bio, page, size, offset);
|
||||
if (ret == 0) {
|
||||
bio = gfs2_log_get_bio(sdp, blkno, &sdp->sd_log_bio,
|
||||
bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio,
|
||||
REQ_OP_WRITE, gfs2_end_log_write, true);
|
||||
ret = bio_add_page(bio, page, size, offset);
|
||||
WARN_ON(ret == 0);
|
||||
|
@ -355,7 +361,8 @@ static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
|
|||
|
||||
dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
|
||||
gfs2_log_incr_head(sdp);
|
||||
gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh), dblock);
|
||||
gfs2_log_write(sdp, sdp->sd_jdesc, bh->b_page, bh->b_size,
|
||||
bh_offset(bh), dblock);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -369,14 +376,14 @@ static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
|
|||
* the page may be freed at any time.
|
||||
*/
|
||||
|
||||
void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
|
||||
static void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
|
||||
{
|
||||
struct super_block *sb = sdp->sd_vfs;
|
||||
u64 dblock;
|
||||
|
||||
dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
|
||||
gfs2_log_incr_head(sdp);
|
||||
gfs2_log_write(sdp, page, sb->s_blocksize, 0, dblock);
|
||||
gfs2_log_write(sdp, sdp->sd_jdesc, page, sb->s_blocksize, 0, dblock);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -845,7 +852,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
|
|||
struct page *page;
|
||||
unsigned int length;
|
||||
|
||||
gfs2_write_revokes(sdp);
|
||||
gfs2_flush_revokes(sdp);
|
||||
if (!sdp->sd_log_num_revoke)
|
||||
return;
|
||||
|
||||
|
@ -857,7 +864,6 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
|
|||
sdp->sd_log_num_revoke--;
|
||||
|
||||
if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
|
||||
|
||||
gfs2_log_write_page(sdp, page);
|
||||
page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
|
||||
mh = page_address(page);
|
||||
|
|
|
@ -10,37 +10,24 @@
|
|||
#include <linux/list.h>
|
||||
#include "incore.h"
|
||||
|
||||
#define BUF_OFFSET \
|
||||
((sizeof(struct gfs2_log_descriptor) + sizeof(__be64) - 1) & \
|
||||
~(sizeof(__be64) - 1))
|
||||
#define DATABUF_OFFSET \
|
||||
((sizeof(struct gfs2_log_descriptor) + (2 * sizeof(__be64) - 1)) & \
|
||||
~(2 * sizeof(__be64) - 1))
|
||||
|
||||
extern const struct gfs2_log_operations *gfs2_log_ops[];
|
||||
extern void gfs2_log_incr_head(struct gfs2_sbd *sdp);
|
||||
extern u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lbn);
|
||||
extern void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
|
||||
unsigned size, unsigned offset, u64 blkno);
|
||||
extern void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page);
|
||||
extern void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
|
||||
struct page *page, unsigned size, unsigned offset,
|
||||
u64 blkno);
|
||||
extern void gfs2_log_submit_bio(struct bio **biop, int opf);
|
||||
extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh);
|
||||
extern int gfs2_find_jhead(struct gfs2_jdesc *jd,
|
||||
struct gfs2_log_header_host *head, bool keep_cache);
|
||||
static inline unsigned int buf_limit(struct gfs2_sbd *sdp)
|
||||
{
|
||||
unsigned int limit;
|
||||
|
||||
limit = (sdp->sd_sb.sb_bsize - BUF_OFFSET) / sizeof(__be64);
|
||||
return limit;
|
||||
return sdp->sd_ldptrs;
|
||||
}
|
||||
|
||||
static inline unsigned int databuf_limit(struct gfs2_sbd *sdp)
|
||||
{
|
||||
unsigned int limit;
|
||||
|
||||
limit = (sdp->sd_sb.sb_bsize - DATABUF_OFFSET) / (2 * sizeof(__be64));
|
||||
return limit;
|
||||
return sdp->sd_ldptrs / 2;
|
||||
}
|
||||
|
||||
static inline void lops_before_commit(struct gfs2_sbd *sdp,
|
||||
|
|
|
@ -98,7 +98,7 @@ static int __init init_gfs2_fs(void)
|
|||
error = -ENOMEM;
|
||||
gfs2_glock_cachep = kmem_cache_create("gfs2_glock",
|
||||
sizeof(struct gfs2_glock),
|
||||
0, 0,
|
||||
0, SLAB_RECLAIM_ACCOUNT,
|
||||
gfs2_init_glock_once);
|
||||
if (!gfs2_glock_cachep)
|
||||
goto fail_cachep1;
|
||||
|
@ -134,7 +134,7 @@ static int __init init_gfs2_fs(void)
|
|||
|
||||
gfs2_quotad_cachep = kmem_cache_create("gfs2_quotad",
|
||||
sizeof(struct gfs2_quota_data),
|
||||
0, 0, NULL);
|
||||
0, SLAB_RECLAIM_ACCOUNT, NULL);
|
||||
if (!gfs2_quotad_cachep)
|
||||
goto fail_cachep6;
|
||||
|
||||
|
|
|
@ -136,8 +136,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
|
|||
|
||||
init_rwsem(&sdp->sd_log_flush_lock);
|
||||
atomic_set(&sdp->sd_log_in_flight, 0);
|
||||
atomic_set(&sdp->sd_reserving_log, 0);
|
||||
init_waitqueue_head(&sdp->sd_reserving_log_wait);
|
||||
init_waitqueue_head(&sdp->sd_log_flush_wait);
|
||||
atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
|
||||
mutex_init(&sdp->sd_freeze_mutex);
|
||||
|
@ -171,7 +169,8 @@ static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (sb->sb_fs_format != GFS2_FORMAT_FS ||
|
||||
if (sb->sb_fs_format < GFS2_FS_FORMAT_MIN ||
|
||||
sb->sb_fs_format > GFS2_FS_FORMAT_MAX ||
|
||||
sb->sb_multihost_format != GFS2_FORMAT_MULTI) {
|
||||
fs_warn(sdp, "Unknown on-disk format, unable to mount\n");
|
||||
return -EINVAL;
|
||||
|
@ -179,7 +178,7 @@ static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent)
|
|||
|
||||
if (sb->sb_bsize < 512 || sb->sb_bsize > PAGE_SIZE ||
|
||||
(sb->sb_bsize & (sb->sb_bsize - 1))) {
|
||||
pr_warn("Invalid superblock size\n");
|
||||
pr_warn("Invalid block size\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -317,6 +316,13 @@ static int gfs2_read_sb(struct gfs2_sbd *sdp, int silent)
|
|||
sizeof(struct gfs2_meta_header))
|
||||
* GFS2_NBBY; /* not the rgrp bitmap, subsequent bitmaps only */
|
||||
|
||||
/*
|
||||
* We always keep at least one block reserved for revokes in
|
||||
* transactions. This greatly simplifies allocating additional
|
||||
* revoke blocks.
|
||||
*/
|
||||
atomic_set(&sdp->sd_log_revokes_available, sdp->sd_ldptrs);
|
||||
|
||||
/* Compute maximum reservation required to add a entry to a directory */
|
||||
|
||||
hash_blocks = DIV_ROUND_UP(sizeof(u64) * BIT(GFS2_DIR_MAX_DEPTH),
|
||||
|
@ -488,6 +494,19 @@ static int init_sb(struct gfs2_sbd *sdp, int silent)
|
|||
goto out;
|
||||
}
|
||||
|
||||
switch(sdp->sd_sb.sb_fs_format) {
|
||||
case GFS2_FS_FORMAT_MAX:
|
||||
sb->s_xattr = gfs2_xattr_handlers_max;
|
||||
break;
|
||||
|
||||
case GFS2_FS_FORMAT_MIN:
|
||||
sb->s_xattr = gfs2_xattr_handlers_min;
|
||||
break;
|
||||
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
/* Set up the buffer cache and SB for real */
|
||||
if (sdp->sd_sb.sb_bsize < bdev_logical_block_size(sb->s_bdev)) {
|
||||
ret = -EINVAL;
|
||||
|
@ -1032,13 +1051,14 @@ hostdata_error:
|
|||
}
|
||||
|
||||
if (lm->lm_mount == NULL) {
|
||||
fs_info(sdp, "Now mounting FS...\n");
|
||||
fs_info(sdp, "Now mounting FS (format %u)...\n", sdp->sd_sb.sb_fs_format);
|
||||
complete_all(&sdp->sd_locking_init);
|
||||
return 0;
|
||||
}
|
||||
ret = lm->lm_mount(sdp, table);
|
||||
if (ret == 0)
|
||||
fs_info(sdp, "Joined cluster. Now mounting FS...\n");
|
||||
fs_info(sdp, "Joined cluster. Now mounting FS (format %u)...\n",
|
||||
sdp->sd_sb.sb_fs_format);
|
||||
complete_all(&sdp->sd_locking_init);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1084,6 +1104,7 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
|
|||
int silent = fc->sb_flags & SB_SILENT;
|
||||
struct gfs2_sbd *sdp;
|
||||
struct gfs2_holder mount_gh;
|
||||
struct gfs2_holder freeze_gh;
|
||||
int error;
|
||||
|
||||
sdp = init_sbd(sb);
|
||||
|
@ -1107,7 +1128,6 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
|
|||
sb->s_op = &gfs2_super_ops;
|
||||
sb->s_d_op = &gfs2_dops;
|
||||
sb->s_export_op = &gfs2_export_ops;
|
||||
sb->s_xattr = gfs2_xattr_handlers;
|
||||
sb->s_qcop = &gfs2_quotactl_ops;
|
||||
sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
|
||||
sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
|
||||
|
@ -1156,6 +1176,10 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
|
|||
if (error)
|
||||
goto fail_locking;
|
||||
|
||||
/* Turn rgrplvb on by default if fs format is recent enough */
|
||||
if (!sdp->sd_args.ar_got_rgrplvb && sdp->sd_sb.sb_fs_format > 1801)
|
||||
sdp->sd_args.ar_rgrplvb = 1;
|
||||
|
||||
error = wait_on_journal(sdp);
|
||||
if (error)
|
||||
goto fail_sb;
|
||||
|
@ -1195,25 +1219,18 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
|
|||
goto fail_per_node;
|
||||
}
|
||||
|
||||
if (sb_rdonly(sb)) {
|
||||
struct gfs2_holder freeze_gh;
|
||||
error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
|
||||
if (error)
|
||||
goto fail_per_node;
|
||||
|
||||
error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
|
||||
LM_FLAG_NOEXP | GL_EXACT,
|
||||
&freeze_gh);
|
||||
if (error) {
|
||||
fs_err(sdp, "can't make FS RO: %d\n", error);
|
||||
goto fail_per_node;
|
||||
}
|
||||
gfs2_glock_dq_uninit(&freeze_gh);
|
||||
} else {
|
||||
if (!sb_rdonly(sb))
|
||||
error = gfs2_make_fs_rw(sdp);
|
||||
if (error) {
|
||||
fs_err(sdp, "can't make FS RW: %d\n", error);
|
||||
goto fail_per_node;
|
||||
}
|
||||
}
|
||||
|
||||
gfs2_freeze_unlock(&freeze_gh);
|
||||
if (error) {
|
||||
fs_err(sdp, "can't make FS RW: %d\n", error);
|
||||
goto fail_per_node;
|
||||
}
|
||||
gfs2_glock_dq_uninit(&mount_gh);
|
||||
gfs2_online_uevent(sdp);
|
||||
return 0;
|
||||
|
@ -1456,6 +1473,7 @@ static int gfs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
|
|||
break;
|
||||
case Opt_rgrplvb:
|
||||
args->ar_rgrplvb = result.boolean;
|
||||
args->ar_got_rgrplvb = 1;
|
||||
break;
|
||||
case Opt_loccookie:
|
||||
args->ar_loccookie = result.boolean;
|
||||
|
@ -1514,6 +1532,12 @@ static int gfs2_reconfigure(struct fs_context *fc)
|
|||
fc->sb_flags |= SB_RDONLY;
|
||||
|
||||
if ((sb->s_flags ^ fc->sb_flags) & SB_RDONLY) {
|
||||
struct gfs2_holder freeze_gh;
|
||||
|
||||
error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
|
||||
if (error)
|
||||
return -EINVAL;
|
||||
|
||||
if (fc->sb_flags & SB_RDONLY) {
|
||||
error = gfs2_make_fs_ro(sdp);
|
||||
if (error)
|
||||
|
@ -1523,6 +1547,7 @@ static int gfs2_reconfigure(struct fs_context *fc)
|
|||
if (error)
|
||||
errorfc(fc, "unable to remount read-write");
|
||||
}
|
||||
gfs2_freeze_unlock(&freeze_gh);
|
||||
}
|
||||
sdp->sd_args = *newargs;
|
||||
|
||||
|
|
|
@ -470,9 +470,7 @@ void gfs2_recover_func(struct work_struct *work)
|
|||
|
||||
/* Acquire a shared hold on the freeze lock */
|
||||
|
||||
error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
|
||||
LM_FLAG_NOEXP | LM_FLAG_PRIORITY |
|
||||
GL_EXACT, &thaw_gh);
|
||||
error = gfs2_freeze_lock(sdp, &thaw_gh, LM_FLAG_PRIORITY);
|
||||
if (error)
|
||||
goto fail_gunlock_ji;
|
||||
|
||||
|
@ -507,22 +505,24 @@ void gfs2_recover_func(struct work_struct *work)
|
|||
|
||||
/* We take the sd_log_flush_lock here primarily to prevent log
|
||||
* flushes and simultaneous journal replays from stomping on
|
||||
* each other wrt sd_log_bio. */
|
||||
* each other wrt jd_log_bio. */
|
||||
down_read(&sdp->sd_log_flush_lock);
|
||||
for (pass = 0; pass < 2; pass++) {
|
||||
lops_before_scan(jd, &head, pass);
|
||||
error = foreach_descriptor(jd, head.lh_tail,
|
||||
head.lh_blkno, pass);
|
||||
lops_after_scan(jd, error, pass);
|
||||
if (error)
|
||||
if (error) {
|
||||
up_read(&sdp->sd_log_flush_lock);
|
||||
goto fail_gunlock_thaw;
|
||||
}
|
||||
}
|
||||
|
||||
recover_local_statfs(jd, &head);
|
||||
clean_journal(jd, &head);
|
||||
up_read(&sdp->sd_log_flush_lock);
|
||||
|
||||
gfs2_glock_dq_uninit(&thaw_gh);
|
||||
gfs2_freeze_unlock(&thaw_gh);
|
||||
t_rep = ktime_get();
|
||||
fs_info(sdp, "jid=%u: Journal replayed in %lldms [jlck:%lldms, "
|
||||
"jhead:%lldms, tlck:%lldms, replay:%lldms]\n",
|
||||
|
@ -544,7 +544,7 @@ void gfs2_recover_func(struct work_struct *work)
|
|||
goto done;
|
||||
|
||||
fail_gunlock_thaw:
|
||||
gfs2_glock_dq_uninit(&thaw_gh);
|
||||
gfs2_freeze_unlock(&thaw_gh);
|
||||
fail_gunlock_ji:
|
||||
if (jlocked) {
|
||||
gfs2_glock_dq_uninit(&ji_gh);
|
||||
|
|
442
fs/gfs2/rgrp.c
442
fs/gfs2/rgrp.c
|
@ -36,6 +36,24 @@
|
|||
#define BFITNOENT ((u32)~0)
|
||||
#define NO_BLOCK ((u64)~0)
|
||||
|
||||
struct gfs2_rbm {
|
||||
struct gfs2_rgrpd *rgd;
|
||||
u32 offset; /* The offset is bitmap relative */
|
||||
int bii; /* Bitmap index */
|
||||
};
|
||||
|
||||
static inline struct gfs2_bitmap *rbm_bi(const struct gfs2_rbm *rbm)
|
||||
{
|
||||
return rbm->rgd->rd_bits + rbm->bii;
|
||||
}
|
||||
|
||||
static inline u64 gfs2_rbm_to_block(const struct gfs2_rbm *rbm)
|
||||
{
|
||||
BUG_ON(rbm->offset >= rbm->rgd->rd_data);
|
||||
return rbm->rgd->rd_data0 + (rbm_bi(rbm)->bi_start * GFS2_NBBY) +
|
||||
rbm->offset;
|
||||
}
|
||||
|
||||
/*
|
||||
* These routines are used by the resource group routines (rgrp.c)
|
||||
* to keep track of block allocation. Each block is represented by two
|
||||
|
@ -61,7 +79,7 @@ static const char valid_change[16] = {
|
|||
};
|
||||
|
||||
static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
|
||||
const struct gfs2_inode *ip, bool nowrap);
|
||||
struct gfs2_blkreserv *rs, bool nowrap);
|
||||
|
||||
|
||||
/**
|
||||
|
@ -175,7 +193,7 @@ static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state)
|
|||
|
||||
/**
|
||||
* rs_cmp - multi-block reservation range compare
|
||||
* @blk: absolute file system block number of the new reservation
|
||||
* @start: start of the new reservation
|
||||
* @len: number of blocks in the new reservation
|
||||
* @rs: existing reservation to compare against
|
||||
*
|
||||
|
@ -183,13 +201,11 @@ static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state)
|
|||
* -1 if the block range is before the start of the reservation
|
||||
* 0 if the block range overlaps with the reservation
|
||||
*/
|
||||
static inline int rs_cmp(u64 blk, u32 len, struct gfs2_blkreserv *rs)
|
||||
static inline int rs_cmp(u64 start, u32 len, struct gfs2_blkreserv *rs)
|
||||
{
|
||||
u64 startblk = gfs2_rbm_to_block(&rs->rs_rbm);
|
||||
|
||||
if (blk >= startblk + rs->rs_free)
|
||||
if (start >= rs->rs_start + rs->rs_requested)
|
||||
return 1;
|
||||
if (blk + len - 1 < startblk)
|
||||
if (rs->rs_start >= start + len)
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
@ -277,29 +293,38 @@ static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
|
|||
}
|
||||
|
||||
/**
|
||||
* gfs2_rbm_incr - increment an rbm structure
|
||||
* gfs2_rbm_add - add a number of blocks to an rbm
|
||||
* @rbm: The rbm with rgd already set correctly
|
||||
* @blocks: The number of blocks to add to rpm
|
||||
*
|
||||
* This function takes an existing rbm structure and increments it to the next
|
||||
* viable block offset.
|
||||
*
|
||||
* Returns: If incrementing the offset would cause the rbm to go past the
|
||||
* end of the rgrp, true is returned, otherwise false.
|
||||
* This function takes an existing rbm structure and adds a number of blocks to
|
||||
* it.
|
||||
*
|
||||
* Returns: True if the new rbm would point past the end of the rgrp.
|
||||
*/
|
||||
|
||||
static bool gfs2_rbm_incr(struct gfs2_rbm *rbm)
|
||||
static bool gfs2_rbm_add(struct gfs2_rbm *rbm, u32 blocks)
|
||||
{
|
||||
if (rbm->offset + 1 < rbm_bi(rbm)->bi_blocks) { /* in the same bitmap */
|
||||
rbm->offset++;
|
||||
struct gfs2_rgrpd *rgd = rbm->rgd;
|
||||
struct gfs2_bitmap *bi = rgd->rd_bits + rbm->bii;
|
||||
|
||||
if (rbm->offset + blocks < bi->bi_blocks) {
|
||||
rbm->offset += blocks;
|
||||
return false;
|
||||
}
|
||||
if (rbm->bii == rbm->rgd->rd_length - 1) /* at the last bitmap */
|
||||
return true;
|
||||
blocks -= bi->bi_blocks - rbm->offset;
|
||||
|
||||
rbm->offset = 0;
|
||||
rbm->bii++;
|
||||
return false;
|
||||
for(;;) {
|
||||
bi++;
|
||||
if (bi == rgd->rd_bits + rgd->rd_length)
|
||||
return true;
|
||||
if (blocks < bi->bi_blocks) {
|
||||
rbm->offset = blocks;
|
||||
rbm->bii = bi - rgd->rd_bits;
|
||||
return false;
|
||||
}
|
||||
blocks -= bi->bi_blocks;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -308,7 +333,8 @@ static bool gfs2_rbm_incr(struct gfs2_rbm *rbm)
|
|||
* @n_unaligned: Number of unaligned blocks to check
|
||||
* @len: Decremented for each block found (terminate on zero)
|
||||
*
|
||||
* Returns: true if a non-free block is encountered
|
||||
* Returns: true if a non-free block is encountered or the end of the resource
|
||||
* group is reached.
|
||||
*/
|
||||
|
||||
static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len)
|
||||
|
@ -323,7 +349,7 @@ static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *le
|
|||
(*len)--;
|
||||
if (*len == 0)
|
||||
return true;
|
||||
if (gfs2_rbm_incr(rbm))
|
||||
if (gfs2_rbm_add(rbm, 1))
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -595,10 +621,11 @@ static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs,
|
|||
{
|
||||
struct gfs2_inode *ip = container_of(rs, struct gfs2_inode, i_res);
|
||||
|
||||
gfs2_print_dbg(seq, "%s B: n:%llu s:%llu b:%u f:%u\n", fs_id_buf,
|
||||
gfs2_print_dbg(seq, "%s B: n:%llu s:%llu f:%u\n",
|
||||
fs_id_buf,
|
||||
(unsigned long long)ip->i_no_addr,
|
||||
(unsigned long long)gfs2_rbm_to_block(&rs->rs_rbm),
|
||||
rs->rs_rbm.offset, rs->rs_free);
|
||||
(unsigned long long)rs->rs_start,
|
||||
rs->rs_requested);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -613,33 +640,22 @@ static void __rs_deltree(struct gfs2_blkreserv *rs)
|
|||
if (!gfs2_rs_active(rs))
|
||||
return;
|
||||
|
||||
rgd = rs->rs_rbm.rgd;
|
||||
rgd = rs->rs_rgd;
|
||||
trace_gfs2_rs(rs, TRACE_RS_TREEDEL);
|
||||
rb_erase(&rs->rs_node, &rgd->rd_rstree);
|
||||
RB_CLEAR_NODE(&rs->rs_node);
|
||||
|
||||
if (rs->rs_free) {
|
||||
u64 last_block = gfs2_rbm_to_block(&rs->rs_rbm) +
|
||||
rs->rs_free - 1;
|
||||
struct gfs2_rbm last_rbm = { .rgd = rs->rs_rbm.rgd, };
|
||||
struct gfs2_bitmap *start, *last;
|
||||
if (rs->rs_requested) {
|
||||
/* return requested blocks to the rgrp */
|
||||
BUG_ON(rs->rs_rgd->rd_requested < rs->rs_requested);
|
||||
rs->rs_rgd->rd_requested -= rs->rs_requested;
|
||||
|
||||
/* return reserved blocks to the rgrp */
|
||||
BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
|
||||
rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
|
||||
/* The rgrp extent failure point is likely not to increase;
|
||||
it will only do so if the freed blocks are somehow
|
||||
contiguous with a span of free blocks that follows. Still,
|
||||
it will force the number to be recalculated later. */
|
||||
rgd->rd_extfail_pt += rs->rs_free;
|
||||
rs->rs_free = 0;
|
||||
if (gfs2_rbm_from_block(&last_rbm, last_block))
|
||||
return;
|
||||
start = rbm_bi(&rs->rs_rbm);
|
||||
last = rbm_bi(&last_rbm);
|
||||
do
|
||||
clear_bit(GBF_FULL, &start->bi_flags);
|
||||
while (start++ != last);
|
||||
rgd->rd_extfail_pt += rs->rs_requested;
|
||||
rs->rs_requested = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -652,11 +668,11 @@ void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
|
|||
{
|
||||
struct gfs2_rgrpd *rgd;
|
||||
|
||||
rgd = rs->rs_rbm.rgd;
|
||||
rgd = rs->rs_rgd;
|
||||
if (rgd) {
|
||||
spin_lock(&rgd->rd_rsspin);
|
||||
__rs_deltree(rs);
|
||||
BUG_ON(rs->rs_free);
|
||||
BUG_ON(rs->rs_requested);
|
||||
spin_unlock(&rgd->rd_rsspin);
|
||||
}
|
||||
}
|
||||
|
@ -904,6 +920,7 @@ static int read_rindex_entry(struct gfs2_inode *ip)
|
|||
rgd->rd_data = be32_to_cpu(buf.ri_data);
|
||||
rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes);
|
||||
spin_lock_init(&rgd->rd_rsspin);
|
||||
mutex_init(&rgd->rd_mutex);
|
||||
|
||||
error = compute_bitstructs(rgd);
|
||||
if (error)
|
||||
|
@ -1149,6 +1166,23 @@ static u32 count_unlinked(struct gfs2_rgrpd *rgd)
|
|||
return count;
|
||||
}
|
||||
|
||||
static void rgrp_set_bitmap_flags(struct gfs2_rgrpd *rgd)
|
||||
{
|
||||
struct gfs2_bitmap *bi;
|
||||
int x;
|
||||
|
||||
if (rgd->rd_free) {
|
||||
for (x = 0; x < rgd->rd_length; x++) {
|
||||
bi = rgd->rd_bits + x;
|
||||
clear_bit(GBF_FULL, &bi->bi_flags);
|
||||
}
|
||||
} else {
|
||||
for (x = 0; x < rgd->rd_length; x++) {
|
||||
bi = rgd->rd_bits + x;
|
||||
set_bit(GBF_FULL, &bi->bi_flags);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
|
||||
|
@ -1192,11 +1226,11 @@ static int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
|
|||
}
|
||||
|
||||
if (!(rgd->rd_flags & GFS2_RDF_UPTODATE)) {
|
||||
for (x = 0; x < length; x++)
|
||||
clear_bit(GBF_FULL, &rgd->rd_bits[x].bi_flags);
|
||||
gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
|
||||
rgrp_set_bitmap_flags(rgd);
|
||||
rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
|
||||
rgd->rd_free_clone = rgd->rd_free;
|
||||
BUG_ON(rgd->rd_reserved);
|
||||
/* max out the rgrp allocation failure point */
|
||||
rgd->rd_extfail_pt = rgd->rd_free;
|
||||
}
|
||||
|
@ -1244,7 +1278,11 @@ static int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
|
|||
if (rgd->rd_rgl->rl_unlinked == 0)
|
||||
rgd->rd_flags &= ~GFS2_RDF_CHECK;
|
||||
rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free);
|
||||
rgrp_set_bitmap_flags(rgd);
|
||||
rgd->rd_free_clone = rgd->rd_free;
|
||||
BUG_ON(rgd->rd_reserved);
|
||||
/* max out the rgrp allocation failure point */
|
||||
rgd->rd_extfail_pt = rgd->rd_free;
|
||||
rgd->rd_dinodes = be32_to_cpu(rgd->rd_rgl->rl_dinodes);
|
||||
rgd->rd_igeneration = be64_to_cpu(rgd->rd_rgl->rl_igeneration);
|
||||
return 0;
|
||||
|
@ -1404,7 +1442,8 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
|
|||
|
||||
while (1) {
|
||||
|
||||
ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
|
||||
ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
|
||||
LM_FLAG_NODE_SCOPE, &gh);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
@ -1412,9 +1451,11 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
|
|||
/* Trim each bitmap in the rgrp */
|
||||
for (x = 0; x < rgd->rd_length; x++) {
|
||||
struct gfs2_bitmap *bi = rgd->rd_bits + x;
|
||||
rgrp_lock_local(rgd);
|
||||
ret = gfs2_rgrp_send_discards(sdp,
|
||||
rgd->rd_data0, NULL, bi, minlen,
|
||||
&amt);
|
||||
rgrp_unlock_local(rgd);
|
||||
if (ret) {
|
||||
gfs2_glock_dq_uninit(&gh);
|
||||
goto out;
|
||||
|
@ -1426,9 +1467,11 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
|
|||
ret = gfs2_trans_begin(sdp, RES_RG_HDR, 0);
|
||||
if (ret == 0) {
|
||||
bh = rgd->rd_bits[0].bi_bh;
|
||||
rgrp_lock_local(rgd);
|
||||
rgd->rd_flags |= GFS2_RGF_TRIMMED;
|
||||
gfs2_trans_add_meta(rgd->rd_gl, bh);
|
||||
gfs2_rgrp_out(rgd, bh->b_data);
|
||||
rgrp_unlock_local(rgd);
|
||||
gfs2_trans_end(sdp);
|
||||
}
|
||||
}
|
||||
|
@ -1458,8 +1501,7 @@ static void rs_insert(struct gfs2_inode *ip)
|
|||
struct rb_node **newn, *parent = NULL;
|
||||
int rc;
|
||||
struct gfs2_blkreserv *rs = &ip->i_res;
|
||||
struct gfs2_rgrpd *rgd = rs->rs_rbm.rgd;
|
||||
u64 fsblock = gfs2_rbm_to_block(&rs->rs_rbm);
|
||||
struct gfs2_rgrpd *rgd = rs->rs_rgd;
|
||||
|
||||
BUG_ON(gfs2_rs_active(rs));
|
||||
|
||||
|
@ -1470,7 +1512,7 @@ static void rs_insert(struct gfs2_inode *ip)
|
|||
rb_entry(*newn, struct gfs2_blkreserv, rs_node);
|
||||
|
||||
parent = *newn;
|
||||
rc = rs_cmp(fsblock, rs->rs_free, cur);
|
||||
rc = rs_cmp(rs->rs_start, rs->rs_requested, cur);
|
||||
if (rc > 0)
|
||||
newn = &((*newn)->rb_right);
|
||||
else if (rc < 0)
|
||||
|
@ -1486,7 +1528,7 @@ static void rs_insert(struct gfs2_inode *ip)
|
|||
rb_insert_color(&rs->rs_node, &rgd->rd_rstree);
|
||||
|
||||
/* Do our rgrp accounting for the reservation */
|
||||
rgd->rd_reserved += rs->rs_free; /* blocks reserved */
|
||||
rgd->rd_requested += rs->rs_requested; /* blocks requested */
|
||||
spin_unlock(&rgd->rd_rsspin);
|
||||
trace_gfs2_rs(rs, TRACE_RS_INSERT);
|
||||
}
|
||||
|
@ -1507,9 +1549,9 @@ static inline u32 rgd_free(struct gfs2_rgrpd *rgd, struct gfs2_blkreserv *rs)
|
|||
{
|
||||
u32 tot_reserved, tot_free;
|
||||
|
||||
if (WARN_ON_ONCE(rgd->rd_reserved < rs->rs_free))
|
||||
if (WARN_ON_ONCE(rgd->rd_requested < rs->rs_requested))
|
||||
return 0;
|
||||
tot_reserved = rgd->rd_reserved - rs->rs_free;
|
||||
tot_reserved = rgd->rd_requested - rs->rs_requested;
|
||||
|
||||
if (rgd->rd_free_clone < tot_reserved)
|
||||
tot_reserved = 0;
|
||||
|
@ -1534,17 +1576,26 @@ static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
|
|||
u64 goal;
|
||||
struct gfs2_blkreserv *rs = &ip->i_res;
|
||||
u32 extlen;
|
||||
u32 free_blocks = rgd_free(rgd, rs);
|
||||
u32 free_blocks, blocks_available;
|
||||
int ret;
|
||||
struct inode *inode = &ip->i_inode;
|
||||
|
||||
spin_lock(&rgd->rd_rsspin);
|
||||
free_blocks = rgd_free(rgd, rs);
|
||||
if (rgd->rd_free_clone < rgd->rd_requested)
|
||||
free_blocks = 0;
|
||||
blocks_available = rgd->rd_free_clone - rgd->rd_reserved;
|
||||
if (rgd == rs->rs_rgd)
|
||||
blocks_available += rs->rs_reserved;
|
||||
spin_unlock(&rgd->rd_rsspin);
|
||||
|
||||
if (S_ISDIR(inode->i_mode))
|
||||
extlen = 1;
|
||||
else {
|
||||
extlen = max_t(u32, atomic_read(&ip->i_sizehint), ap->target);
|
||||
extlen = clamp(extlen, (u32)RGRP_RSRV_MINBLKS, free_blocks);
|
||||
}
|
||||
if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
|
||||
if (free_blocks < extlen || blocks_available < extlen)
|
||||
return;
|
||||
|
||||
/* Find bitmap block that contains bits for goal block */
|
||||
|
@ -1556,10 +1607,10 @@ static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
|
|||
if (WARN_ON(gfs2_rbm_from_block(&rbm, goal)))
|
||||
return;
|
||||
|
||||
ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &extlen, ip, true);
|
||||
ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &extlen, &ip->i_res, true);
|
||||
if (ret == 0) {
|
||||
rs->rs_rbm = rbm;
|
||||
rs->rs_free = extlen;
|
||||
rs->rs_start = gfs2_rbm_to_block(&rbm);
|
||||
rs->rs_requested = extlen;
|
||||
rs_insert(ip);
|
||||
} else {
|
||||
if (goal == rgd->rd_last_alloc + rgd->rd_data0)
|
||||
|
@ -1572,7 +1623,7 @@ static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
|
|||
* @rgd: The resource group
|
||||
* @block: The starting block
|
||||
* @length: The required length
|
||||
* @ip: Ignore any reservations for this inode
|
||||
* @ignore_rs: Reservation to ignore
|
||||
*
|
||||
* If the block does not appear in any reservation, then return the
|
||||
* block number unchanged. If it does appear in the reservation, then
|
||||
|
@ -1582,7 +1633,7 @@ static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
|
|||
|
||||
static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
|
||||
u32 length,
|
||||
const struct gfs2_inode *ip)
|
||||
struct gfs2_blkreserv *ignore_rs)
|
||||
{
|
||||
struct gfs2_blkreserv *rs;
|
||||
struct rb_node *n;
|
||||
|
@ -1602,8 +1653,8 @@ static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
|
|||
}
|
||||
|
||||
if (n) {
|
||||
while ((rs_cmp(block, length, rs) == 0) && (&ip->i_res != rs)) {
|
||||
block = gfs2_rbm_to_block(&rs->rs_rbm) + rs->rs_free;
|
||||
while (rs_cmp(block, length, rs) == 0 && rs != ignore_rs) {
|
||||
block = rs->rs_start + rs->rs_requested;
|
||||
n = n->rb_right;
|
||||
if (n == NULL)
|
||||
break;
|
||||
|
@ -1618,7 +1669,7 @@ static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
|
|||
/**
|
||||
* gfs2_reservation_check_and_update - Check for reservations during block alloc
|
||||
* @rbm: The current position in the resource group
|
||||
* @ip: The inode for which we are searching for blocks
|
||||
* @rs: Our own reservation
|
||||
* @minext: The minimum extent length
|
||||
* @maxext: A pointer to the maximum extent structure
|
||||
*
|
||||
|
@ -1632,20 +1683,19 @@ static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
|
|||
*/
|
||||
|
||||
static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
|
||||
const struct gfs2_inode *ip,
|
||||
struct gfs2_blkreserv *rs,
|
||||
u32 minext,
|
||||
struct gfs2_extent *maxext)
|
||||
{
|
||||
u64 block = gfs2_rbm_to_block(rbm);
|
||||
u32 extlen = 1;
|
||||
u64 nblock;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* If we have a minimum extent length, then skip over any extent
|
||||
* which is less than the min extent length in size.
|
||||
*/
|
||||
if (minext) {
|
||||
if (minext > 1) {
|
||||
extlen = gfs2_free_extlen(rbm, minext);
|
||||
if (extlen <= maxext->len)
|
||||
goto fail;
|
||||
|
@ -1655,7 +1705,7 @@ static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
|
|||
* Check the extent which has been found against the reservations
|
||||
* and skip if parts of it are already reserved
|
||||
*/
|
||||
nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, ip);
|
||||
nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, rs);
|
||||
if (nblock == block) {
|
||||
if (!minext || extlen >= minext)
|
||||
return 0;
|
||||
|
@ -1664,12 +1714,15 @@ static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
|
|||
maxext->len = extlen;
|
||||
maxext->rbm = *rbm;
|
||||
}
|
||||
fail:
|
||||
nblock = block + extlen;
|
||||
} else {
|
||||
u64 len = nblock - block;
|
||||
if (len >= (u64)1 << 32)
|
||||
return -E2BIG;
|
||||
extlen = len;
|
||||
}
|
||||
ret = gfs2_rbm_from_block(rbm, nblock);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
fail:
|
||||
if (gfs2_rbm_add(rbm, extlen))
|
||||
return -E2BIG;
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -1677,9 +1730,9 @@ fail:
|
|||
* gfs2_rbm_find - Look for blocks of a particular state
|
||||
* @rbm: Value/result starting position and final position
|
||||
* @state: The state which we want to find
|
||||
* @minext: Pointer to the requested extent length (NULL for a single block)
|
||||
* @minext: Pointer to the requested extent length
|
||||
* This is updated to be the actual reservation size.
|
||||
* @ip: If set, check for reservations
|
||||
* @rs: Our own reservation (NULL to skip checking for reservations)
|
||||
* @nowrap: Stop looking at the end of the rgrp, rather than wrapping
|
||||
* around until we've reached the starting point.
|
||||
*
|
||||
|
@ -1693,7 +1746,7 @@ fail:
|
|||
*/
|
||||
|
||||
static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
|
||||
const struct gfs2_inode *ip, bool nowrap)
|
||||
struct gfs2_blkreserv *rs, bool nowrap)
|
||||
{
|
||||
bool scan_from_start = rbm->bii == 0 && rbm->offset == 0;
|
||||
struct buffer_head *bh;
|
||||
|
@ -1714,8 +1767,7 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
|
|||
|
||||
while(1) {
|
||||
bi = rbm_bi(rbm);
|
||||
if ((ip == NULL || !gfs2_rs_active(&ip->i_res)) &&
|
||||
test_bit(GBF_FULL, &bi->bi_flags) &&
|
||||
if (test_bit(GBF_FULL, &bi->bi_flags) &&
|
||||
(state == GFS2_BLKST_FREE))
|
||||
goto next_bitmap;
|
||||
|
||||
|
@ -1731,11 +1783,10 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
|
|||
goto next_bitmap;
|
||||
}
|
||||
rbm->offset = offset;
|
||||
if (ip == NULL)
|
||||
if (!rs)
|
||||
return 0;
|
||||
|
||||
ret = gfs2_reservation_check_and_update(rbm, ip,
|
||||
minext ? *minext : 0,
|
||||
ret = gfs2_reservation_check_and_update(rbm, rs, *minext,
|
||||
&maxext);
|
||||
if (ret == 0)
|
||||
return 0;
|
||||
|
@ -1767,7 +1818,7 @@ next_iter:
|
|||
break;
|
||||
}
|
||||
|
||||
if (minext == NULL || state != GFS2_BLKST_FREE)
|
||||
if (state != GFS2_BLKST_FREE)
|
||||
return -ENOSPC;
|
||||
|
||||
/* If the extent was too small, and it's smaller than the smallest
|
||||
|
@ -1775,7 +1826,7 @@ next_iter:
|
|||
useless to search this rgrp again for this amount or more. */
|
||||
if (wrapped && (scan_from_start || rbm->bii > last_bii) &&
|
||||
*minext < rbm->rgd->rd_extfail_pt)
|
||||
rbm->rgd->rd_extfail_pt = *minext;
|
||||
rbm->rgd->rd_extfail_pt = *minext - 1;
|
||||
|
||||
/* If the maximum extent we found is big enough to fulfill the
|
||||
minimum requirements, use it anyway. */
|
||||
|
@ -1938,7 +1989,7 @@ static bool gfs2_rgrp_used_recently(const struct gfs2_blkreserv *rs,
|
|||
u64 tdiff;
|
||||
|
||||
tdiff = ktime_to_ns(ktime_sub(ktime_get_real(),
|
||||
rs->rs_rbm.rgd->rd_gl->gl_dstamp));
|
||||
rs->rs_rgd->rd_gl->gl_dstamp));
|
||||
|
||||
return tdiff > (msecs * 1000 * 1000);
|
||||
}
|
||||
|
@ -1993,8 +2044,7 @@ static inline int fast_to_acquire(struct gfs2_rgrpd *rgd)
|
|||
* We try our best to find an rgrp that has at least ap->target blocks
|
||||
* available. After a couple of passes (loops == 2), the prospects of finding
|
||||
* such an rgrp diminish. At this stage, we return the first rgrp that has
|
||||
* at least ap->min_target blocks available. Either way, we set ap->allowed to
|
||||
* the number of blocks available in the chosen rgrp.
|
||||
* at least ap->min_target blocks available.
|
||||
*
|
||||
* Returns: 0 on success,
|
||||
* -ENOMEM if a suitable rgrp can't be found
|
||||
|
@ -2006,56 +2056,64 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
|
|||
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
|
||||
struct gfs2_rgrpd *begin = NULL;
|
||||
struct gfs2_blkreserv *rs = &ip->i_res;
|
||||
int error = 0, rg_locked, flags = 0;
|
||||
int error = 0, flags = LM_FLAG_NODE_SCOPE;
|
||||
bool rg_locked;
|
||||
u64 last_unlinked = NO_BLOCK;
|
||||
u32 target = ap->target;
|
||||
int loops = 0;
|
||||
u32 free_blocks, skip = 0;
|
||||
u32 free_blocks, blocks_available, skip = 0;
|
||||
|
||||
BUG_ON(rs->rs_reserved);
|
||||
|
||||
if (sdp->sd_args.ar_rgrplvb)
|
||||
flags |= GL_SKIP;
|
||||
if (gfs2_assert_warn(sdp, ap->target))
|
||||
if (gfs2_assert_warn(sdp, target))
|
||||
return -EINVAL;
|
||||
if (gfs2_rs_active(rs)) {
|
||||
begin = rs->rs_rbm.rgd;
|
||||
} else if (rs->rs_rbm.rgd &&
|
||||
rgrp_contains_block(rs->rs_rbm.rgd, ip->i_goal)) {
|
||||
begin = rs->rs_rbm.rgd;
|
||||
begin = rs->rs_rgd;
|
||||
} else if (rs->rs_rgd &&
|
||||
rgrp_contains_block(rs->rs_rgd, ip->i_goal)) {
|
||||
begin = rs->rs_rgd;
|
||||
} else {
|
||||
check_and_update_goal(ip);
|
||||
rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
|
||||
rs->rs_rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
|
||||
}
|
||||
if (S_ISDIR(ip->i_inode.i_mode) && (ap->aflags & GFS2_AF_ORLOV))
|
||||
skip = gfs2_orlov_skip(ip);
|
||||
if (rs->rs_rbm.rgd == NULL)
|
||||
if (rs->rs_rgd == NULL)
|
||||
return -EBADSLT;
|
||||
|
||||
while (loops < 3) {
|
||||
rg_locked = 1;
|
||||
struct gfs2_rgrpd *rgd;
|
||||
|
||||
if (!gfs2_glock_is_locked_by_me(rs->rs_rbm.rgd->rd_gl)) {
|
||||
rg_locked = 0;
|
||||
rg_locked = gfs2_glock_is_locked_by_me(rs->rs_rgd->rd_gl);
|
||||
if (rg_locked) {
|
||||
rgrp_lock_local(rs->rs_rgd);
|
||||
} else {
|
||||
if (skip && skip--)
|
||||
goto next_rgrp;
|
||||
if (!gfs2_rs_active(rs)) {
|
||||
if (loops == 0 &&
|
||||
!fast_to_acquire(rs->rs_rbm.rgd))
|
||||
!fast_to_acquire(rs->rs_rgd))
|
||||
goto next_rgrp;
|
||||
if ((loops < 2) &&
|
||||
gfs2_rgrp_used_recently(rs, 1000) &&
|
||||
gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
|
||||
gfs2_rgrp_congested(rs->rs_rgd, loops))
|
||||
goto next_rgrp;
|
||||
}
|
||||
error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl,
|
||||
error = gfs2_glock_nq_init(rs->rs_rgd->rd_gl,
|
||||
LM_ST_EXCLUSIVE, flags,
|
||||
&ip->i_rgd_gh);
|
||||
if (unlikely(error))
|
||||
return error;
|
||||
rgrp_lock_local(rs->rs_rgd);
|
||||
if (!gfs2_rs_active(rs) && (loops < 2) &&
|
||||
gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
|
||||
gfs2_rgrp_congested(rs->rs_rgd, loops))
|
||||
goto skip_rgrp;
|
||||
if (sdp->sd_args.ar_rgrplvb) {
|
||||
error = update_rgrp_lvb(rs->rs_rbm.rgd);
|
||||
error = update_rgrp_lvb(rs->rs_rgd);
|
||||
if (unlikely(error)) {
|
||||
rgrp_unlock_local(rs->rs_rgd);
|
||||
gfs2_glock_dq_uninit(&ip->i_rgd_gh);
|
||||
return error;
|
||||
}
|
||||
|
@ -2063,36 +2121,46 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
|
|||
}
|
||||
|
||||
/* Skip unusable resource groups */
|
||||
if ((rs->rs_rbm.rgd->rd_flags & (GFS2_RGF_NOALLOC |
|
||||
if ((rs->rs_rgd->rd_flags & (GFS2_RGF_NOALLOC |
|
||||
GFS2_RDF_ERROR)) ||
|
||||
(loops == 0 && ap->target > rs->rs_rbm.rgd->rd_extfail_pt))
|
||||
(loops == 0 && target > rs->rs_rgd->rd_extfail_pt))
|
||||
goto skip_rgrp;
|
||||
|
||||
if (sdp->sd_args.ar_rgrplvb)
|
||||
gfs2_rgrp_bh_get(rs->rs_rbm.rgd);
|
||||
gfs2_rgrp_bh_get(rs->rs_rgd);
|
||||
|
||||
/* Get a reservation if we don't already have one */
|
||||
if (!gfs2_rs_active(rs))
|
||||
rg_mblk_search(rs->rs_rbm.rgd, ip, ap);
|
||||
rg_mblk_search(rs->rs_rgd, ip, ap);
|
||||
|
||||
/* Skip rgrps when we can't get a reservation on first pass */
|
||||
if (!gfs2_rs_active(rs) && (loops < 1))
|
||||
goto check_rgrp;
|
||||
|
||||
/* If rgrp has enough free space, use it */
|
||||
free_blocks = rgd_free(rs->rs_rbm.rgd, rs);
|
||||
if (free_blocks >= ap->target ||
|
||||
(loops == 2 && ap->min_target &&
|
||||
free_blocks >= ap->min_target)) {
|
||||
ap->allowed = free_blocks;
|
||||
return 0;
|
||||
rgd = rs->rs_rgd;
|
||||
spin_lock(&rgd->rd_rsspin);
|
||||
free_blocks = rgd_free(rgd, rs);
|
||||
blocks_available = rgd->rd_free_clone - rgd->rd_reserved;
|
||||
if (free_blocks < target || blocks_available < target) {
|
||||
spin_unlock(&rgd->rd_rsspin);
|
||||
goto check_rgrp;
|
||||
}
|
||||
rs->rs_reserved = ap->target;
|
||||
if (rs->rs_reserved > blocks_available)
|
||||
rs->rs_reserved = blocks_available;
|
||||
rgd->rd_reserved += rs->rs_reserved;
|
||||
spin_unlock(&rgd->rd_rsspin);
|
||||
rgrp_unlock_local(rs->rs_rgd);
|
||||
return 0;
|
||||
check_rgrp:
|
||||
/* Check for unlinked inodes which can be reclaimed */
|
||||
if (rs->rs_rbm.rgd->rd_flags & GFS2_RDF_CHECK)
|
||||
try_rgrp_unlink(rs->rs_rbm.rgd, &last_unlinked,
|
||||
if (rs->rs_rgd->rd_flags & GFS2_RDF_CHECK)
|
||||
try_rgrp_unlink(rs->rs_rgd, &last_unlinked,
|
||||
ip->i_no_addr);
|
||||
skip_rgrp:
|
||||
rgrp_unlock_local(rs->rs_rgd);
|
||||
|
||||
/* Drop reservation, if we couldn't use reserved rgrp */
|
||||
if (gfs2_rs_active(rs))
|
||||
gfs2_rs_deltree(rs);
|
||||
|
@ -2102,7 +2170,7 @@ skip_rgrp:
|
|||
gfs2_glock_dq_uninit(&ip->i_rgd_gh);
|
||||
next_rgrp:
|
||||
/* Find the next rgrp, and continue looking */
|
||||
if (gfs2_select_rgrp(&rs->rs_rbm.rgd, begin))
|
||||
if (gfs2_select_rgrp(&rs->rs_rgd, begin))
|
||||
continue;
|
||||
if (skip)
|
||||
continue;
|
||||
|
@ -2119,9 +2187,12 @@ next_rgrp:
|
|||
return error;
|
||||
}
|
||||
/* Flushing the log may release space */
|
||||
if (loops == 2)
|
||||
if (loops == 2) {
|
||||
if (ap->min_target)
|
||||
target = ap->min_target;
|
||||
gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
|
||||
GFS2_LFC_INPLACE_RESERVE);
|
||||
}
|
||||
}
|
||||
|
||||
return -ENOSPC;
|
||||
|
@ -2136,6 +2207,17 @@ next_rgrp:
|
|||
|
||||
void gfs2_inplace_release(struct gfs2_inode *ip)
|
||||
{
|
||||
struct gfs2_blkreserv *rs = &ip->i_res;
|
||||
|
||||
if (rs->rs_reserved) {
|
||||
struct gfs2_rgrpd *rgd = rs->rs_rgd;
|
||||
|
||||
spin_lock(&rgd->rd_rsspin);
|
||||
BUG_ON(rgd->rd_reserved < rs->rs_reserved);
|
||||
rgd->rd_reserved -= rs->rs_reserved;
|
||||
spin_unlock(&rgd->rd_rsspin);
|
||||
rs->rs_reserved = 0;
|
||||
}
|
||||
if (gfs2_holder_initialized(&ip->i_rgd_gh))
|
||||
gfs2_glock_dq_uninit(&ip->i_rgd_gh);
|
||||
}
|
||||
|
@ -2205,7 +2287,7 @@ static void rgblk_free(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd,
|
|||
bi_prev = bi;
|
||||
}
|
||||
gfs2_setbit(&rbm, false, new_state);
|
||||
gfs2_rbm_incr(&rbm);
|
||||
gfs2_rbm_add(&rbm, 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2223,11 +2305,12 @@ void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_rgrpd *rgd,
|
|||
struct gfs2_blkreserv *trs;
|
||||
const struct rb_node *n;
|
||||
|
||||
gfs2_print_dbg(seq, "%s R: n:%llu f:%02x b:%u/%u i:%u r:%u e:%u\n",
|
||||
spin_lock(&rgd->rd_rsspin);
|
||||
gfs2_print_dbg(seq, "%s R: n:%llu f:%02x b:%u/%u i:%u q:%u r:%u e:%u\n",
|
||||
fs_id_buf,
|
||||
(unsigned long long)rgd->rd_addr, rgd->rd_flags,
|
||||
rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
|
||||
rgd->rd_reserved, rgd->rd_extfail_pt);
|
||||
rgd->rd_requested, rgd->rd_reserved, rgd->rd_extfail_pt);
|
||||
if (rgd->rd_sbd->sd_args.ar_rgrplvb) {
|
||||
struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
|
||||
|
||||
|
@ -2236,7 +2319,6 @@ void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_rgrpd *rgd,
|
|||
be32_to_cpu(rgl->rl_free),
|
||||
be32_to_cpu(rgl->rl_dinodes));
|
||||
}
|
||||
spin_lock(&rgd->rd_rsspin);
|
||||
for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) {
|
||||
trs = rb_entry(n, struct gfs2_blkreserv, rs_node);
|
||||
dump_rs(seq, trs, fs_id_buf);
|
||||
|
@ -2273,29 +2355,29 @@ static void gfs2_adjust_reservation(struct gfs2_inode *ip,
|
|||
{
|
||||
struct gfs2_blkreserv *rs = &ip->i_res;
|
||||
struct gfs2_rgrpd *rgd = rbm->rgd;
|
||||
unsigned rlen;
|
||||
u64 block;
|
||||
int ret;
|
||||
|
||||
spin_lock(&rgd->rd_rsspin);
|
||||
BUG_ON(rs->rs_reserved < len);
|
||||
rs->rs_reserved -= len;
|
||||
if (gfs2_rs_active(rs)) {
|
||||
if (gfs2_rbm_eq(&rs->rs_rbm, rbm)) {
|
||||
block = gfs2_rbm_to_block(rbm);
|
||||
ret = gfs2_rbm_from_block(&rs->rs_rbm, block + len);
|
||||
rlen = min(rs->rs_free, len);
|
||||
rs->rs_free -= rlen;
|
||||
rgd->rd_reserved -= rlen;
|
||||
u64 start = gfs2_rbm_to_block(rbm);
|
||||
|
||||
if (rs->rs_start == start) {
|
||||
unsigned int rlen;
|
||||
|
||||
rs->rs_start += len;
|
||||
rlen = min(rs->rs_requested, len);
|
||||
rs->rs_requested -= rlen;
|
||||
rgd->rd_requested -= rlen;
|
||||
trace_gfs2_rs(rs, TRACE_RS_CLAIM);
|
||||
if (rs->rs_free && !ret)
|
||||
goto out;
|
||||
if (rs->rs_start < rgd->rd_data0 + rgd->rd_data &&
|
||||
rs->rs_requested)
|
||||
return;
|
||||
/* We used up our block reservation, so we should
|
||||
reserve more blocks next time. */
|
||||
atomic_add(RGRP_RSRV_ADDBLKS, &ip->i_sizehint);
|
||||
}
|
||||
__rs_deltree(rs);
|
||||
}
|
||||
out:
|
||||
spin_unlock(&rgd->rd_rsspin);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2315,15 +2397,13 @@ static void gfs2_set_alloc_start(struct gfs2_rbm *rbm,
|
|||
u64 goal;
|
||||
|
||||
if (gfs2_rs_active(&ip->i_res)) {
|
||||
*rbm = ip->i_res.rs_rbm;
|
||||
return;
|
||||
goal = ip->i_res.rs_start;
|
||||
} else {
|
||||
if (!dinode && rgrp_contains_block(rbm->rgd, ip->i_goal))
|
||||
goal = ip->i_goal;
|
||||
else
|
||||
goal = rbm->rgd->rd_last_alloc + rbm->rgd->rd_data0;
|
||||
}
|
||||
|
||||
if (!dinode && rgrp_contains_block(rbm->rgd, ip->i_goal))
|
||||
goal = ip->i_goal;
|
||||
else
|
||||
goal = rbm->rgd->rd_last_alloc + rbm->rgd->rd_data0;
|
||||
|
||||
if (WARN_ON_ONCE(gfs2_rbm_from_block(rbm, goal))) {
|
||||
rbm->bii = 0;
|
||||
rbm->offset = 0;
|
||||
|
@ -2346,17 +2426,21 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
|
|||
{
|
||||
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
|
||||
struct buffer_head *dibh;
|
||||
struct gfs2_rbm rbm = { .rgd = ip->i_res.rs_rbm.rgd, };
|
||||
unsigned int ndata;
|
||||
struct gfs2_rbm rbm = { .rgd = ip->i_res.rs_rgd, };
|
||||
u64 block; /* block, within the file system scope */
|
||||
int error;
|
||||
u32 minext = 1;
|
||||
int error = -ENOSPC;
|
||||
|
||||
gfs2_set_alloc_start(&rbm, ip, dinode);
|
||||
error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, ip, false);
|
||||
BUG_ON(ip->i_res.rs_reserved < *nblocks);
|
||||
|
||||
rgrp_lock_local(rbm.rgd);
|
||||
if (gfs2_rs_active(&ip->i_res)) {
|
||||
gfs2_set_alloc_start(&rbm, ip, dinode);
|
||||
error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &minext, &ip->i_res, false);
|
||||
}
|
||||
if (error == -ENOSPC) {
|
||||
gfs2_set_alloc_start(&rbm, ip, dinode);
|
||||
error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, NULL, false);
|
||||
error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &minext, NULL, false);
|
||||
}
|
||||
|
||||
/* Since all blocks are reserved in advance, this shouldn't happen */
|
||||
|
@ -2371,14 +2455,8 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
|
|||
gfs2_alloc_extent(&rbm, dinode, nblocks);
|
||||
block = gfs2_rbm_to_block(&rbm);
|
||||
rbm.rgd->rd_last_alloc = block - rbm.rgd->rd_data0;
|
||||
if (gfs2_rs_active(&ip->i_res))
|
||||
gfs2_adjust_reservation(ip, &rbm, *nblocks);
|
||||
ndata = *nblocks;
|
||||
if (dinode)
|
||||
ndata--;
|
||||
|
||||
if (!dinode) {
|
||||
ip->i_goal = block + ndata - 1;
|
||||
ip->i_goal = block + *nblocks - 1;
|
||||
error = gfs2_meta_inode_buffer(ip, &dibh);
|
||||
if (error == 0) {
|
||||
struct gfs2_dinode *di =
|
||||
|
@ -2389,12 +2467,20 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
|
|||
brelse(dibh);
|
||||
}
|
||||
}
|
||||
if (rbm.rgd->rd_free < *nblocks) {
|
||||
spin_lock(&rbm.rgd->rd_rsspin);
|
||||
gfs2_adjust_reservation(ip, &rbm, *nblocks);
|
||||
if (rbm.rgd->rd_free < *nblocks || rbm.rgd->rd_reserved < *nblocks) {
|
||||
fs_warn(sdp, "nblocks=%u\n", *nblocks);
|
||||
spin_unlock(&rbm.rgd->rd_rsspin);
|
||||
goto rgrp_error;
|
||||
}
|
||||
|
||||
BUG_ON(rbm.rgd->rd_reserved < *nblocks);
|
||||
BUG_ON(rbm.rgd->rd_free_clone < *nblocks);
|
||||
BUG_ON(rbm.rgd->rd_free < *nblocks);
|
||||
rbm.rgd->rd_reserved -= *nblocks;
|
||||
rbm.rgd->rd_free_clone -= *nblocks;
|
||||
rbm.rgd->rd_free -= *nblocks;
|
||||
spin_unlock(&rbm.rgd->rd_rsspin);
|
||||
if (dinode) {
|
||||
rbm.rgd->rd_dinodes++;
|
||||
*generation = rbm.rgd->rd_igeneration++;
|
||||
|
@ -2404,6 +2490,7 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
|
|||
|
||||
gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh);
|
||||
gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data);
|
||||
rgrp_unlock_local(rbm.rgd);
|
||||
|
||||
gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0);
|
||||
if (dinode)
|
||||
|
@ -2411,13 +2498,13 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
|
|||
|
||||
gfs2_quota_change(ip, *nblocks, ip->i_inode.i_uid, ip->i_inode.i_gid);
|
||||
|
||||
rbm.rgd->rd_free_clone -= *nblocks;
|
||||
trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks,
|
||||
dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
|
||||
*bn = block;
|
||||
return 0;
|
||||
|
||||
rgrp_error:
|
||||
rgrp_unlock_local(rbm.rgd);
|
||||
gfs2_rgrp_error(rbm.rgd);
|
||||
return -EIO;
|
||||
}
|
||||
|
@ -2437,12 +2524,14 @@ void __gfs2_free_blocks(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
|
|||
{
|
||||
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
|
||||
|
||||
rgrp_lock_local(rgd);
|
||||
rgblk_free(sdp, rgd, bstart, blen, GFS2_BLKST_FREE);
|
||||
trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE);
|
||||
rgd->rd_free += blen;
|
||||
rgd->rd_flags &= ~GFS2_RGF_TRIMMED;
|
||||
gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
|
||||
gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
|
||||
rgrp_unlock_local(rgd);
|
||||
|
||||
/* Directories keep their data in the metadata address space */
|
||||
if (meta || ip->i_depth || gfs2_is_jdata(ip))
|
||||
|
@ -2478,17 +2567,20 @@ void gfs2_unlink_di(struct inode *inode)
|
|||
rgd = gfs2_blk2rgrpd(sdp, blkno, true);
|
||||
if (!rgd)
|
||||
return;
|
||||
rgrp_lock_local(rgd);
|
||||
rgblk_free(sdp, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
|
||||
trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
|
||||
gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
|
||||
gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
|
||||
be32_add_cpu(&rgd->rd_rgl->rl_unlinked, 1);
|
||||
rgrp_unlock_local(rgd);
|
||||
}
|
||||
|
||||
void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
|
||||
{
|
||||
struct gfs2_sbd *sdp = rgd->rd_sbd;
|
||||
|
||||
rgrp_lock_local(rgd);
|
||||
rgblk_free(sdp, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
|
||||
if (!rgd->rd_dinodes)
|
||||
gfs2_consist_rgrpd(rgd);
|
||||
|
@ -2497,6 +2589,7 @@ void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
|
|||
|
||||
gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
|
||||
gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
|
||||
rgrp_unlock_local(rgd);
|
||||
be32_add_cpu(&rgd->rd_rgl->rl_unlinked, -1);
|
||||
|
||||
gfs2_statfs_change(sdp, 0, +1, -1);
|
||||
|
@ -2511,6 +2604,10 @@ void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
|
|||
* @no_addr: The block number to check
|
||||
* @type: The block type we are looking for
|
||||
*
|
||||
* The inode glock of @no_addr must be held. The @type to check for is either
|
||||
* GFS2_BLKST_DINODE or GFS2_BLKST_UNLINKED; checking for type GFS2_BLKST_FREE
|
||||
* or GFS2_BLKST_USED would make no sense.
|
||||
*
|
||||
* Returns: 0 if the block type matches the expected type
|
||||
* -ESTALE if it doesn't match
|
||||
* or -ve errno if something went wrong while checking
|
||||
|
@ -2534,6 +2631,13 @@ int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
|
|||
rbm.rgd = rgd;
|
||||
error = gfs2_rbm_from_block(&rbm, no_addr);
|
||||
if (!WARN_ON_ONCE(error)) {
|
||||
/*
|
||||
* No need to take the local resource group lock here; the
|
||||
* inode glock of @no_addr provides the necessary
|
||||
* synchronization in case the block is an inode. (In case
|
||||
* the block is not an inode, the block type will not match
|
||||
* the @type we are looking for.)
|
||||
*/
|
||||
if (gfs2_testbit(&rbm, false) != type)
|
||||
error = -ESTALE;
|
||||
}
|
||||
|
@ -2578,7 +2682,7 @@ void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
|
|||
return;
|
||||
rgd = gfs2_blk2rgrpd(sdp, block, 1);
|
||||
} else {
|
||||
rgd = ip->i_res.rs_rbm.rgd;
|
||||
rgd = ip->i_res.rs_rgd;
|
||||
if (!rgd || !rgrp_contains_block(rgd, block))
|
||||
rgd = gfs2_blk2rgrpd(sdp, block, 1);
|
||||
}
|
||||
|
@ -2633,9 +2737,8 @@ void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist)
|
|||
sizeof(struct gfs2_holder),
|
||||
GFP_NOFS | __GFP_NOFAIL);
|
||||
for (x = 0; x < rlist->rl_rgrps; x++)
|
||||
gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
|
||||
LM_ST_EXCLUSIVE, 0,
|
||||
&rlist->rl_ghs[x]);
|
||||
gfs2_holder_init(rlist->rl_rgd[x]->rd_gl, LM_ST_EXCLUSIVE,
|
||||
LM_FLAG_NODE_SCOPE, &rlist->rl_ghs[x]);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2658,3 +2761,14 @@ void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
|
|||
}
|
||||
}
|
||||
|
||||
void rgrp_lock_local(struct gfs2_rgrpd *rgd)
|
||||
{
|
||||
BUG_ON(!gfs2_glock_is_held_excl(rgd->rd_gl) &&
|
||||
!test_bit(SDF_NORECOVERY, &rgd->rd_sbd->sd_flags));
|
||||
mutex_lock(&rgd->rd_mutex);
|
||||
}
|
||||
|
||||
void rgrp_unlock_local(struct gfs2_rgrpd *rgd)
|
||||
{
|
||||
mutex_unlock(&rgd->rd_mutex);
|
||||
}
|
||||
|
|
|
@ -77,7 +77,7 @@ extern int gfs2_fitrim(struct file *filp, void __user *argp);
|
|||
/* This is how to tell if a reservation is in the rgrp tree: */
|
||||
static inline bool gfs2_rs_active(const struct gfs2_blkreserv *rs)
|
||||
{
|
||||
return rs && !RB_EMPTY_NODE(&rs->rs_node);
|
||||
return !RB_EMPTY_NODE(&rs->rs_node);
|
||||
}
|
||||
|
||||
static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
|
||||
|
@ -88,4 +88,8 @@ static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
|
|||
}
|
||||
|
||||
extern void check_and_update_goal(struct gfs2_inode *ip);
|
||||
|
||||
extern void rgrp_lock_local(struct gfs2_rgrpd *rgd);
|
||||
extern void rgrp_unlock_local(struct gfs2_rgrpd *rgd);
|
||||
|
||||
#endif /* __RGRP_DOT_H__ */
|
||||
|
|
|
@ -81,19 +81,12 @@ void gfs2_jindex_free(struct gfs2_sbd *sdp)
|
|||
static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid)
|
||||
{
|
||||
struct gfs2_jdesc *jd;
|
||||
int found = 0;
|
||||
|
||||
list_for_each_entry(jd, head, jd_list) {
|
||||
if (jd->jd_jid == jid) {
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
if (jd->jd_jid == jid)
|
||||
return jd;
|
||||
}
|
||||
|
||||
if (!found)
|
||||
jd = NULL;
|
||||
|
||||
return jd;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid)
|
||||
|
@ -165,7 +158,6 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
|
|||
{
|
||||
struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
|
||||
struct gfs2_glock *j_gl = ip->i_gl;
|
||||
struct gfs2_holder freeze_gh;
|
||||
struct gfs2_log_header_host head;
|
||||
int error;
|
||||
|
||||
|
@ -173,12 +165,6 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
|
|||
if (error)
|
||||
return error;
|
||||
|
||||
error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
|
||||
LM_FLAG_NOEXP | GL_EXACT,
|
||||
&freeze_gh);
|
||||
if (error)
|
||||
goto fail_threads;
|
||||
|
||||
j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
|
||||
if (gfs2_withdrawn(sdp)) {
|
||||
error = -EIO;
|
||||
|
@ -205,13 +191,9 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
|
|||
|
||||
set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
|
||||
|
||||
gfs2_glock_dq_uninit(&freeze_gh);
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
gfs2_glock_dq_uninit(&freeze_gh);
|
||||
fail_threads:
|
||||
if (sdp->sd_quotad_process)
|
||||
kthread_stop(sdp->sd_quotad_process);
|
||||
sdp->sd_quotad_process = NULL;
|
||||
|
@ -452,7 +434,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
|
|||
}
|
||||
|
||||
if (error)
|
||||
gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
|
||||
gfs2_freeze_unlock(&sdp->sd_freeze_gh);
|
||||
|
||||
out:
|
||||
while (!list_empty(&list)) {
|
||||
|
@ -607,30 +589,9 @@ out:
|
|||
|
||||
int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
|
||||
{
|
||||
struct gfs2_holder freeze_gh;
|
||||
int error = 0;
|
||||
int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
|
||||
|
||||
gfs2_holder_mark_uninitialized(&freeze_gh);
|
||||
if (sdp->sd_freeze_gl &&
|
||||
!gfs2_glock_is_locked_by_me(sdp->sd_freeze_gl)) {
|
||||
if (!log_write_allowed) {
|
||||
error = gfs2_glock_nq_init(sdp->sd_freeze_gl,
|
||||
LM_ST_SHARED, LM_FLAG_TRY |
|
||||
LM_FLAG_NOEXP | GL_EXACT,
|
||||
&freeze_gh);
|
||||
if (error == GLR_TRYFAILED)
|
||||
error = 0;
|
||||
} else {
|
||||
error = gfs2_glock_nq_init(sdp->sd_freeze_gl,
|
||||
LM_ST_SHARED,
|
||||
LM_FLAG_NOEXP | GL_EXACT,
|
||||
&freeze_gh);
|
||||
if (error && !gfs2_withdrawn(sdp))
|
||||
return error;
|
||||
}
|
||||
}
|
||||
|
||||
gfs2_flush_delete_work(sdp);
|
||||
if (!log_write_allowed && current == sdp->sd_quotad_process)
|
||||
fs_warn(sdp, "The quotad daemon is withdrawing.\n");
|
||||
|
@ -650,18 +611,15 @@ int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
|
|||
|
||||
gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
|
||||
GFS2_LFC_MAKE_FS_RO);
|
||||
wait_event(sdp->sd_reserving_log_wait,
|
||||
atomic_read(&sdp->sd_reserving_log) == 0);
|
||||
gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) ==
|
||||
sdp->sd_jdesc->jd_blocks);
|
||||
wait_event_timeout(sdp->sd_log_waitq,
|
||||
gfs2_log_is_empty(sdp),
|
||||
HZ * 5);
|
||||
gfs2_assert_warn(sdp, gfs2_log_is_empty(sdp));
|
||||
} else {
|
||||
wait_event_timeout(sdp->sd_reserving_log_wait,
|
||||
atomic_read(&sdp->sd_reserving_log) == 0,
|
||||
wait_event_timeout(sdp->sd_log_waitq,
|
||||
gfs2_log_is_empty(sdp),
|
||||
HZ * 5);
|
||||
}
|
||||
if (gfs2_holder_initialized(&freeze_gh))
|
||||
gfs2_glock_dq_uninit(&freeze_gh);
|
||||
|
||||
gfs2_quota_cleanup(sdp);
|
||||
|
||||
if (!log_write_allowed)
|
||||
|
@ -770,10 +728,8 @@ void gfs2_freeze_func(struct work_struct *work)
|
|||
struct super_block *sb = sdp->sd_vfs;
|
||||
|
||||
atomic_inc(&sb->s_active);
|
||||
error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
|
||||
LM_FLAG_NOEXP | GL_EXACT, &freeze_gh);
|
||||
error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
|
||||
if (error) {
|
||||
fs_info(sdp, "GFS2: couldn't get freeze lock : %d\n", error);
|
||||
gfs2_assert_withdraw(sdp, 0);
|
||||
} else {
|
||||
atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
|
||||
|
@ -783,7 +739,7 @@ void gfs2_freeze_func(struct work_struct *work)
|
|||
error);
|
||||
gfs2_assert_withdraw(sdp, 0);
|
||||
}
|
||||
gfs2_glock_dq_uninit(&freeze_gh);
|
||||
gfs2_freeze_unlock(&freeze_gh);
|
||||
}
|
||||
deactivate_super(sb);
|
||||
clear_bit_unlock(SDF_FS_FROZEN, &sdp->sd_flags);
|
||||
|
@ -851,7 +807,7 @@ static int gfs2_unfreeze(struct super_block *sb)
|
|||
return 0;
|
||||
}
|
||||
|
||||
gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
|
||||
gfs2_freeze_unlock(&sdp->sd_freeze_gh);
|
||||
mutex_unlock(&sdp->sd_freeze_mutex);
|
||||
return wait_on_bit(&sdp->sd_flags, SDF_FS_FROZEN, TASK_INTERRUPTIBLE);
|
||||
}
|
||||
|
@ -1227,7 +1183,8 @@ static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
|
|||
goto out_qs;
|
||||
}
|
||||
|
||||
error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
|
||||
error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
|
||||
LM_FLAG_NODE_SCOPE, &gh);
|
||||
if (error)
|
||||
goto out_qs;
|
||||
|
||||
|
|
|
@ -11,6 +11,10 @@
|
|||
#include <linux/dcache.h>
|
||||
#include "incore.h"
|
||||
|
||||
/* Supported fs format version range */
|
||||
#define GFS2_FS_FORMAT_MIN (1801)
|
||||
#define GFS2_FS_FORMAT_MAX (1802)
|
||||
|
||||
extern void gfs2_lm_unmount(struct gfs2_sbd *sdp);
|
||||
|
||||
static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp)
|
||||
|
@ -54,7 +58,9 @@ extern struct file_system_type gfs2meta_fs_type;
|
|||
extern const struct export_operations gfs2_export_ops;
|
||||
extern const struct super_operations gfs2_super_ops;
|
||||
extern const struct dentry_operations gfs2_dops;
|
||||
extern const struct xattr_handler *gfs2_xattr_handlers[];
|
||||
|
||||
extern const struct xattr_handler *gfs2_xattr_handlers_max[];
|
||||
extern const struct xattr_handler **gfs2_xattr_handlers_min;
|
||||
|
||||
#endif /* __SUPER_DOT_H__ */
|
||||
|
||||
|
|
|
@ -560,6 +560,7 @@ TRACE_EVENT(gfs2_block_alloc,
|
|||
__field( u8, block_state )
|
||||
__field( u64, rd_addr )
|
||||
__field( u32, rd_free_clone )
|
||||
__field( u32, rd_requested )
|
||||
__field( u32, rd_reserved )
|
||||
),
|
||||
|
||||
|
@ -571,17 +572,20 @@ TRACE_EVENT(gfs2_block_alloc,
|
|||
__entry->block_state = block_state;
|
||||
__entry->rd_addr = rgd->rd_addr;
|
||||
__entry->rd_free_clone = rgd->rd_free_clone;
|
||||
__entry->rd_requested = rgd->rd_requested;
|
||||
__entry->rd_reserved = rgd->rd_reserved;
|
||||
),
|
||||
|
||||
TP_printk("%u,%u bmap %llu alloc %llu/%lu %s rg:%llu rf:%u rr:%lu",
|
||||
TP_printk("%u,%u bmap %llu alloc %llu/%lu %s rg:%llu rf:%u rq:%u rr:%u",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
(unsigned long long)__entry->inum,
|
||||
(unsigned long long)__entry->start,
|
||||
(unsigned long)__entry->len,
|
||||
block_state_name(__entry->block_state),
|
||||
(unsigned long long)__entry->rd_addr,
|
||||
__entry->rd_free_clone, (unsigned long)__entry->rd_reserved)
|
||||
__entry->rd_free_clone,
|
||||
__entry->rd_requested,
|
||||
__entry->rd_reserved)
|
||||
);
|
||||
|
||||
/* Keep track of multi-block reservations as they are allocated/freed */
|
||||
|
@ -595,33 +599,40 @@ TRACE_EVENT(gfs2_rs,
|
|||
__field( dev_t, dev )
|
||||
__field( u64, rd_addr )
|
||||
__field( u32, rd_free_clone )
|
||||
__field( u32, rd_requested )
|
||||
__field( u32, rd_reserved )
|
||||
__field( u64, inum )
|
||||
__field( u64, start )
|
||||
__field( u32, free )
|
||||
__field( u32, requested )
|
||||
__field( u32, reserved )
|
||||
__field( u8, func )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = rs->rs_rbm.rgd->rd_sbd->sd_vfs->s_dev;
|
||||
__entry->rd_addr = rs->rs_rbm.rgd->rd_addr;
|
||||
__entry->rd_free_clone = rs->rs_rbm.rgd->rd_free_clone;
|
||||
__entry->rd_reserved = rs->rs_rbm.rgd->rd_reserved;
|
||||
__entry->dev = rs->rs_rgd->rd_sbd->sd_vfs->s_dev;
|
||||
__entry->rd_addr = rs->rs_rgd->rd_addr;
|
||||
__entry->rd_free_clone = rs->rs_rgd->rd_free_clone;
|
||||
__entry->rd_requested = rs->rs_rgd->rd_requested;
|
||||
__entry->rd_reserved = rs->rs_rgd->rd_reserved;
|
||||
__entry->inum = container_of(rs, struct gfs2_inode,
|
||||
i_res)->i_no_addr;
|
||||
__entry->start = gfs2_rbm_to_block(&rs->rs_rbm);
|
||||
__entry->free = rs->rs_free;
|
||||
__entry->start = rs->rs_start;
|
||||
__entry->requested = rs->rs_requested;
|
||||
__entry->reserved = rs->rs_reserved;
|
||||
__entry->func = func;
|
||||
),
|
||||
|
||||
TP_printk("%u,%u bmap %llu resrv %llu rg:%llu rf:%lu rr:%lu %s f:%lu",
|
||||
TP_printk("%u,%u bmap %llu resrv %llu rg:%llu rf:%u rq:%u rr:%u %s q:%u r:%u",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
(unsigned long long)__entry->inum,
|
||||
(unsigned long long)__entry->start,
|
||||
(unsigned long long)__entry->rd_addr,
|
||||
(unsigned long)__entry->rd_free_clone,
|
||||
(unsigned long)__entry->rd_reserved,
|
||||
rs_func_name(__entry->func), (unsigned long)__entry->free)
|
||||
__entry->rd_free_clone,
|
||||
__entry->rd_requested,
|
||||
__entry->rd_reserved,
|
||||
rs_func_name(__entry->func),
|
||||
__entry->requested,
|
||||
__entry->reserved)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_GFS2_H */
|
||||
|
|
102
fs/gfs2/trans.c
102
fs/gfs2/trans.c
|
@ -31,17 +31,17 @@ static void gfs2_print_trans(struct gfs2_sbd *sdp, const struct gfs2_trans *tr)
|
|||
fs_warn(sdp, "blocks=%u revokes=%u reserved=%u touched=%u\n",
|
||||
tr->tr_blocks, tr->tr_revokes, tr->tr_reserved,
|
||||
test_bit(TR_TOUCHED, &tr->tr_flags));
|
||||
fs_warn(sdp, "Buf %u/%u Databuf %u/%u Revoke %u/%u\n",
|
||||
fs_warn(sdp, "Buf %u/%u Databuf %u/%u Revoke %u\n",
|
||||
tr->tr_num_buf_new, tr->tr_num_buf_rm,
|
||||
tr->tr_num_databuf_new, tr->tr_num_databuf_rm,
|
||||
tr->tr_num_revoke, tr->tr_num_revoke_rm);
|
||||
tr->tr_num_revoke);
|
||||
}
|
||||
|
||||
int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
|
||||
unsigned int revokes)
|
||||
int __gfs2_trans_begin(struct gfs2_trans *tr, struct gfs2_sbd *sdp,
|
||||
unsigned int blocks, unsigned int revokes,
|
||||
unsigned long ip)
|
||||
{
|
||||
struct gfs2_trans *tr;
|
||||
int error;
|
||||
unsigned int extra_revokes;
|
||||
|
||||
if (current->journal_info) {
|
||||
gfs2_print_trans(sdp, current->journal_info);
|
||||
|
@ -52,39 +52,72 @@ int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
|
|||
if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
|
||||
return -EROFS;
|
||||
|
||||
tr = kmem_cache_zalloc(gfs2_trans_cachep, GFP_NOFS);
|
||||
if (!tr)
|
||||
return -ENOMEM;
|
||||
|
||||
tr->tr_ip = _RET_IP_;
|
||||
tr->tr_ip = ip;
|
||||
tr->tr_blocks = blocks;
|
||||
tr->tr_revokes = revokes;
|
||||
tr->tr_reserved = 1;
|
||||
set_bit(TR_ALLOCED, &tr->tr_flags);
|
||||
if (blocks)
|
||||
tr->tr_reserved += 6 + blocks;
|
||||
if (revokes)
|
||||
tr->tr_reserved += gfs2_struct2blk(sdp, revokes);
|
||||
tr->tr_reserved = GFS2_LOG_FLUSH_MIN_BLOCKS;
|
||||
if (blocks) {
|
||||
/*
|
||||
* The reserved blocks are either used for data or metadata.
|
||||
* We can have mixed data and metadata, each with its own log
|
||||
* descriptor block; see calc_reserved().
|
||||
*/
|
||||
tr->tr_reserved += blocks + 1 + DIV_ROUND_UP(blocks - 1, databuf_limit(sdp));
|
||||
}
|
||||
INIT_LIST_HEAD(&tr->tr_databuf);
|
||||
INIT_LIST_HEAD(&tr->tr_buf);
|
||||
INIT_LIST_HEAD(&tr->tr_list);
|
||||
INIT_LIST_HEAD(&tr->tr_ail1_list);
|
||||
INIT_LIST_HEAD(&tr->tr_ail2_list);
|
||||
|
||||
if (gfs2_assert_warn(sdp, tr->tr_reserved <= sdp->sd_jdesc->jd_blocks))
|
||||
return -EINVAL;
|
||||
|
||||
sb_start_intwrite(sdp->sd_vfs);
|
||||
|
||||
error = gfs2_log_reserve(sdp, tr->tr_reserved);
|
||||
if (error)
|
||||
goto fail;
|
||||
/*
|
||||
* Try the reservations under sd_log_flush_lock to prevent log flushes
|
||||
* from creating inconsistencies between the number of allocated and
|
||||
* reserved revokes. If that fails, do a full-block allocation outside
|
||||
* of the lock to avoid stalling log flushes. Then, allot the
|
||||
* appropriate number of blocks to revokes, use as many revokes locally
|
||||
* as needed, and "release" the surplus into the revokes pool.
|
||||
*/
|
||||
|
||||
down_read(&sdp->sd_log_flush_lock);
|
||||
if (gfs2_log_try_reserve(sdp, tr, &extra_revokes))
|
||||
goto reserved;
|
||||
up_read(&sdp->sd_log_flush_lock);
|
||||
gfs2_log_reserve(sdp, tr, &extra_revokes);
|
||||
down_read(&sdp->sd_log_flush_lock);
|
||||
|
||||
reserved:
|
||||
gfs2_log_release_revokes(sdp, extra_revokes);
|
||||
if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
|
||||
gfs2_log_release_revokes(sdp, tr->tr_revokes);
|
||||
up_read(&sdp->sd_log_flush_lock);
|
||||
gfs2_log_release(sdp, tr->tr_reserved);
|
||||
sb_end_intwrite(sdp->sd_vfs);
|
||||
return -EROFS;
|
||||
}
|
||||
|
||||
current->journal_info = tr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
fail:
|
||||
sb_end_intwrite(sdp->sd_vfs);
|
||||
kmem_cache_free(gfs2_trans_cachep, tr);
|
||||
int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
|
||||
unsigned int revokes)
|
||||
{
|
||||
struct gfs2_trans *tr;
|
||||
int error;
|
||||
|
||||
tr = kmem_cache_zalloc(gfs2_trans_cachep, GFP_NOFS);
|
||||
if (!tr)
|
||||
return -ENOMEM;
|
||||
error = __gfs2_trans_begin(tr, sdp, blocks, revokes, _RET_IP_);
|
||||
if (error)
|
||||
kmem_cache_free(gfs2_trans_cachep, tr);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -92,37 +125,39 @@ void gfs2_trans_end(struct gfs2_sbd *sdp)
|
|||
{
|
||||
struct gfs2_trans *tr = current->journal_info;
|
||||
s64 nbuf;
|
||||
int alloced = test_bit(TR_ALLOCED, &tr->tr_flags);
|
||||
|
||||
current->journal_info = NULL;
|
||||
|
||||
if (!test_bit(TR_TOUCHED, &tr->tr_flags)) {
|
||||
gfs2_log_release_revokes(sdp, tr->tr_revokes);
|
||||
up_read(&sdp->sd_log_flush_lock);
|
||||
gfs2_log_release(sdp, tr->tr_reserved);
|
||||
if (alloced) {
|
||||
if (!test_bit(TR_ONSTACK, &tr->tr_flags))
|
||||
gfs2_trans_free(sdp, tr);
|
||||
sb_end_intwrite(sdp->sd_vfs);
|
||||
}
|
||||
sb_end_intwrite(sdp->sd_vfs);
|
||||
return;
|
||||
}
|
||||
|
||||
gfs2_log_release_revokes(sdp, tr->tr_revokes - tr->tr_num_revoke);
|
||||
|
||||
nbuf = tr->tr_num_buf_new + tr->tr_num_databuf_new;
|
||||
nbuf -= tr->tr_num_buf_rm;
|
||||
nbuf -= tr->tr_num_databuf_rm;
|
||||
|
||||
if (gfs2_assert_withdraw(sdp, (nbuf <= tr->tr_blocks) &&
|
||||
(tr->tr_num_revoke <= tr->tr_revokes)))
|
||||
if (gfs2_assert_withdraw(sdp, nbuf <= tr->tr_blocks) ||
|
||||
gfs2_assert_withdraw(sdp, tr->tr_num_revoke <= tr->tr_revokes))
|
||||
gfs2_print_trans(sdp, tr);
|
||||
|
||||
gfs2_log_commit(sdp, tr);
|
||||
if (alloced && !test_bit(TR_ATTACHED, &tr->tr_flags))
|
||||
if (!test_bit(TR_ONSTACK, &tr->tr_flags) &&
|
||||
!test_bit(TR_ATTACHED, &tr->tr_flags))
|
||||
gfs2_trans_free(sdp, tr);
|
||||
up_read(&sdp->sd_log_flush_lock);
|
||||
|
||||
if (sdp->sd_vfs->s_flags & SB_SYNCHRONOUS)
|
||||
gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
|
||||
GFS2_LFC_TRANS_END);
|
||||
if (alloced)
|
||||
sb_end_intwrite(sdp->sd_vfs);
|
||||
sb_end_intwrite(sdp->sd_vfs);
|
||||
}
|
||||
|
||||
static struct gfs2_bufdata *gfs2_alloc_bufdata(struct gfs2_glock *gl,
|
||||
|
@ -262,7 +297,6 @@ void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
|
|||
void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len)
|
||||
{
|
||||
struct gfs2_bufdata *bd, *tmp;
|
||||
struct gfs2_trans *tr = current->journal_info;
|
||||
unsigned int n = len;
|
||||
|
||||
gfs2_log_lock(sdp);
|
||||
|
@ -274,7 +308,7 @@ void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len)
|
|||
if (bd->bd_gl)
|
||||
gfs2_glock_remove_revoke(bd->bd_gl);
|
||||
kmem_cache_free(gfs2_bufdata_cachep, bd);
|
||||
tr->tr_num_revoke_rm++;
|
||||
gfs2_log_release_revokes(sdp, 1);
|
||||
if (--n == 0)
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -27,13 +27,16 @@ struct gfs2_glock;
|
|||
* block, or all of the blocks in the rg, whichever is smaller */
|
||||
static inline unsigned int gfs2_rg_blocks(const struct gfs2_inode *ip, unsigned requested)
|
||||
{
|
||||
struct gfs2_rgrpd *rgd = ip->i_res.rs_rbm.rgd;
|
||||
struct gfs2_rgrpd *rgd = ip->i_res.rs_rgd;
|
||||
|
||||
if (requested < rgd->rd_length)
|
||||
return requested + 1;
|
||||
return rgd->rd_length;
|
||||
}
|
||||
|
||||
extern int __gfs2_trans_begin(struct gfs2_trans *tr, struct gfs2_sbd *sdp,
|
||||
unsigned int blocks, unsigned int revokes,
|
||||
unsigned long ip);
|
||||
extern int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
|
||||
unsigned int revokes);
|
||||
|
||||
|
|
|
@ -91,12 +91,39 @@ out_unlock:
|
|||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_freeze_lock - hold the freeze glock
|
||||
* @sdp: the superblock
|
||||
* @freeze_gh: pointer to the requested holder
|
||||
* @caller_flags: any additional flags needed by the caller
|
||||
*/
|
||||
int gfs2_freeze_lock(struct gfs2_sbd *sdp, struct gfs2_holder *freeze_gh,
|
||||
int caller_flags)
|
||||
{
|
||||
int flags = LM_FLAG_NOEXP | GL_EXACT | caller_flags;
|
||||
int error;
|
||||
|
||||
error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, flags,
|
||||
freeze_gh);
|
||||
if (error && error != GLR_TRYFAILED)
|
||||
fs_err(sdp, "can't lock the freeze lock: %d\n", error);
|
||||
return error;
|
||||
}
|
||||
|
||||
void gfs2_freeze_unlock(struct gfs2_holder *freeze_gh)
|
||||
{
|
||||
if (gfs2_holder_initialized(freeze_gh))
|
||||
gfs2_glock_dq_uninit(freeze_gh);
|
||||
}
|
||||
|
||||
static void signal_our_withdraw(struct gfs2_sbd *sdp)
|
||||
{
|
||||
struct gfs2_glock *gl = sdp->sd_live_gh.gh_gl;
|
||||
struct gfs2_glock *live_gl = sdp->sd_live_gh.gh_gl;
|
||||
struct inode *inode = sdp->sd_jdesc->jd_inode;
|
||||
struct gfs2_inode *ip = GFS2_I(inode);
|
||||
struct gfs2_glock *i_gl = ip->i_gl;
|
||||
u64 no_formal_ino = ip->i_no_formal_ino;
|
||||
int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
|
||||
int ret = 0;
|
||||
int tries;
|
||||
|
||||
|
@ -117,8 +144,21 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
|
|||
* therefore we need to clear SDF_JOURNAL_LIVE manually.
|
||||
*/
|
||||
clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
|
||||
if (!sb_rdonly(sdp->sd_vfs))
|
||||
ret = gfs2_make_fs_ro(sdp);
|
||||
if (!sb_rdonly(sdp->sd_vfs)) {
|
||||
struct gfs2_holder freeze_gh;
|
||||
|
||||
gfs2_holder_mark_uninitialized(&freeze_gh);
|
||||
if (sdp->sd_freeze_gl &&
|
||||
!gfs2_glock_is_locked_by_me(sdp->sd_freeze_gl)) {
|
||||
ret = gfs2_freeze_lock(sdp, &freeze_gh,
|
||||
log_write_allowed ? 0 : LM_FLAG_TRY);
|
||||
if (ret == GLR_TRYFAILED)
|
||||
ret = 0;
|
||||
}
|
||||
if (!ret)
|
||||
ret = gfs2_make_fs_ro(sdp);
|
||||
gfs2_freeze_unlock(&freeze_gh);
|
||||
}
|
||||
|
||||
if (sdp->sd_lockstruct.ls_ops->lm_lock == NULL) { /* lock_nolock */
|
||||
if (!ret)
|
||||
|
@ -141,7 +181,8 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
|
|||
atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
|
||||
thaw_super(sdp->sd_vfs);
|
||||
} else {
|
||||
wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
|
||||
wait_on_bit(&i_gl->gl_flags, GLF_DEMOTE,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -161,15 +202,15 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
|
|||
* on other nodes to be successful, otherwise we remain the owner of
|
||||
* the glock as far as dlm is concerned.
|
||||
*/
|
||||
if (gl->gl_ops->go_free) {
|
||||
set_bit(GLF_FREEING, &gl->gl_flags);
|
||||
wait_on_bit(&gl->gl_flags, GLF_FREEING, TASK_UNINTERRUPTIBLE);
|
||||
if (i_gl->gl_ops->go_free) {
|
||||
set_bit(GLF_FREEING, &i_gl->gl_flags);
|
||||
wait_on_bit(&i_gl->gl_flags, GLF_FREEING, TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Dequeue the "live" glock, but keep a reference so it's never freed.
|
||||
*/
|
||||
gfs2_glock_hold(gl);
|
||||
gfs2_glock_hold(live_gl);
|
||||
gfs2_glock_dq_wait(&sdp->sd_live_gh);
|
||||
/*
|
||||
* We enqueue the "live" glock in EX so that all other nodes
|
||||
|
@ -208,7 +249,7 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
|
|||
gfs2_glock_nq(&sdp->sd_live_gh);
|
||||
}
|
||||
|
||||
gfs2_glock_queue_put(gl); /* drop the extra reference we acquired */
|
||||
gfs2_glock_queue_put(live_gl); /* drop extra reference we acquired */
|
||||
clear_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags);
|
||||
|
||||
/*
|
||||
|
|
|
@ -149,6 +149,9 @@ int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function,
|
|||
|
||||
extern int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
|
||||
bool verbose);
|
||||
extern int gfs2_freeze_lock(struct gfs2_sbd *sdp,
|
||||
struct gfs2_holder *freeze_gh, int caller_flags);
|
||||
extern void gfs2_freeze_unlock(struct gfs2_holder *freeze_gh);
|
||||
|
||||
#define gfs2_io_error(sdp) \
|
||||
gfs2_io_error_i((sdp), __func__, __FILE__, __LINE__)
|
||||
|
|
|
@ -70,6 +70,20 @@ static int ea_check_size(struct gfs2_sbd *sdp, unsigned int nsize, size_t dsize)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool gfs2_eatype_valid(struct gfs2_sbd *sdp, u8 type)
|
||||
{
|
||||
switch(sdp->sd_sb.sb_fs_format) {
|
||||
case GFS2_FS_FORMAT_MAX:
|
||||
return true;
|
||||
|
||||
case GFS2_FS_FORMAT_MIN:
|
||||
return type <= GFS2_EATYPE_SECURITY;
|
||||
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
typedef int (*ea_call_t) (struct gfs2_inode *ip, struct buffer_head *bh,
|
||||
struct gfs2_ea_header *ea,
|
||||
struct gfs2_ea_header *prev, void *private);
|
||||
|
@ -77,6 +91,7 @@ typedef int (*ea_call_t) (struct gfs2_inode *ip, struct buffer_head *bh,
|
|||
static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh,
|
||||
ea_call_t ea_call, void *data)
|
||||
{
|
||||
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
|
||||
struct gfs2_ea_header *ea, *prev = NULL;
|
||||
int error = 0;
|
||||
|
||||
|
@ -89,9 +104,8 @@ static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh,
|
|||
if (!(bh->b_data <= (char *)ea && (char *)GFS2_EA2NEXT(ea) <=
|
||||
bh->b_data + bh->b_size))
|
||||
goto fail;
|
||||
if (!GFS2_EATYPE_VALID(ea->ea_type))
|
||||
if (!gfs2_eatype_valid(sdp, ea->ea_type))
|
||||
goto fail;
|
||||
|
||||
error = ea_call(ip, bh, ea, prev, data);
|
||||
if (error)
|
||||
return error;
|
||||
|
@ -259,7 +273,8 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rg_gh);
|
||||
error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
|
||||
LM_FLAG_NODE_SCOPE, &rg_gh);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
@ -344,6 +359,7 @@ static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
|
|||
struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
|
||||
void *private)
|
||||
{
|
||||
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
|
||||
struct ea_list *ei = private;
|
||||
struct gfs2_ea_request *er = ei->ei_er;
|
||||
unsigned int ea_size;
|
||||
|
@ -353,6 +369,8 @@ static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
|
|||
if (ea->ea_type == GFS2_EATYPE_UNUSED)
|
||||
return 0;
|
||||
|
||||
BUG_ON(ea->ea_type > GFS2_EATYPE_SECURITY &&
|
||||
sdp->sd_sb.sb_fs_format == GFS2_FS_FORMAT_MIN);
|
||||
switch (ea->ea_type) {
|
||||
case GFS2_EATYPE_USR:
|
||||
prefix = "user.";
|
||||
|
@ -366,8 +384,12 @@ static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
|
|||
prefix = "security.";
|
||||
l = 9;
|
||||
break;
|
||||
case GFS2_EATYPE_TRUSTED:
|
||||
prefix = "trusted.";
|
||||
l = 8;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
return 0;
|
||||
}
|
||||
|
||||
ea_size = l + ea->ea_name_len + 1;
|
||||
|
@ -1386,7 +1408,8 @@ static int ea_dealloc_block(struct gfs2_inode *ip)
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
|
||||
error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
|
||||
LM_FLAG_NODE_SCOPE, &gh);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
@ -1464,7 +1487,25 @@ static const struct xattr_handler gfs2_xattr_security_handler = {
|
|||
.set = gfs2_xattr_set,
|
||||
};
|
||||
|
||||
const struct xattr_handler *gfs2_xattr_handlers[] = {
|
||||
static bool
|
||||
gfs2_xattr_trusted_list(struct dentry *dentry)
|
||||
{
|
||||
return capable(CAP_SYS_ADMIN);
|
||||
}
|
||||
|
||||
static const struct xattr_handler gfs2_xattr_trusted_handler = {
|
||||
.prefix = XATTR_TRUSTED_PREFIX,
|
||||
.flags = GFS2_EATYPE_TRUSTED,
|
||||
.list = gfs2_xattr_trusted_list,
|
||||
.get = gfs2_xattr_get,
|
||||
.set = gfs2_xattr_set,
|
||||
};
|
||||
|
||||
const struct xattr_handler *gfs2_xattr_handlers_max[] = {
|
||||
/* GFS2_FS_FORMAT_MAX */
|
||||
&gfs2_xattr_trusted_handler,
|
||||
|
||||
/* GFS2_FS_FORMAT_MIN */
|
||||
&gfs2_xattr_user_handler,
|
||||
&gfs2_xattr_security_handler,
|
||||
&posix_acl_access_xattr_handler,
|
||||
|
@ -1472,3 +1513,4 @@ const struct xattr_handler *gfs2_xattr_handlers[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
const struct xattr_handler **gfs2_xattr_handlers_min = gfs2_xattr_handlers_max + 1;
|
||||
|
|
|
@ -47,7 +47,7 @@
|
|||
#define GFS2_FORMAT_DE 1200
|
||||
#define GFS2_FORMAT_QU 1500
|
||||
/* These are part of the superblock */
|
||||
#define GFS2_FORMAT_FS 1801
|
||||
#define GFS2_FORMAT_FS 1802
|
||||
#define GFS2_FORMAT_MULTI 1900
|
||||
|
||||
/*
|
||||
|
@ -389,8 +389,9 @@ struct gfs2_leaf {
|
|||
#define GFS2_EATYPE_USR 1
|
||||
#define GFS2_EATYPE_SYS 2
|
||||
#define GFS2_EATYPE_SECURITY 3
|
||||
#define GFS2_EATYPE_TRUSTED 4
|
||||
|
||||
#define GFS2_EATYPE_LAST 3
|
||||
#define GFS2_EATYPE_LAST 4
|
||||
#define GFS2_EATYPE_VALID(x) ((x) <= GFS2_EATYPE_LAST)
|
||||
|
||||
#define GFS2_EAFLAG_LAST 0x01 /* last ea in block */
|
||||
|
|
Loading…
Reference in New Issue