gfs2: allow quota_check and inplace_reserve to return available blocks

struct gfs2_alloc_parms is passed to gfs2_quota_check() and
gfs2_inplace_reserve() with ap->target containing the number of
blocks being requested for allocation in the current operation.

We add a new field to struct gfs2_alloc_parms called 'allowed'.
gfs2_quota_check() and gfs2_inplace_reserve() return the max
blocks allowed by quota and the max blocks allowed by the chosen
rgrp respectively in 'allowed'.

A new field 'min_target', when non-zero, tells gfs2_quota_check()
and gfs2_inplace_reserve() to not return -EDQUOT/-ENOSPC when
there are atleast 'min_target' blocks allowable/available. The
assumption is that the caller is ok with just 'min_target' blocks
and will likely proceed with allocating them.

Signed-off-by: Abhi Das <adas@redhat.com>
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
Acked-by: Steven Whitehouse <swhiteho@redhat.com>
This commit is contained in:
Abhi Das 2015-03-18 12:04:37 -05:00 committed by Bob Peterson
parent b8fbf471ed
commit 25435e5ed6
4 changed files with 58 additions and 19 deletions

View File

@ -302,7 +302,9 @@ struct gfs2_blkreserv {
*/
struct gfs2_alloc_parms {
u64 target;
u32 min_target;
u32 aflags;
u64 allowed;
};
enum {

View File

@ -1094,15 +1094,33 @@ static int print_message(struct gfs2_quota_data *qd, char *type)
return 0;
}
/**
* gfs2_quota_check - check if allocating new blocks will exceed quota
* @ip: The inode for which this check is being performed
* @uid: The uid to check against
* @gid: The gid to check against
* @ap: The allocation parameters. ap->target contains the requested
* blocks. ap->min_target, if set, contains the minimum blks
* requested.
*
* Returns: 0 on success.
* min_req = ap->min_target ? ap->min_target : ap->target;
* quota must allow atleast min_req blks for success and
* ap->allowed is set to the number of blocks allowed
*
* -EDQUOT otherwise, quota violation. ap->allowed is set to number
* of blocks available.
*/
int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
struct gfs2_alloc_parms *ap)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_quota_data *qd;
s64 value;
s64 value, warn, limit;
unsigned int x;
int error = 0;
ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
return 0;
@ -1116,29 +1134,37 @@ int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
qid_eq(qd->qd_id, make_kqid_gid(gid))))
continue;
warn = (s64)be64_to_cpu(qd->qd_qb.qb_warn);
limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit);
value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
spin_lock(&qd_lock);
value += qd->qd_change + ap->target;
value += qd->qd_change;
spin_unlock(&qd_lock);
if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
print_message(qd, "exceeded");
quota_send_warning(qd->qd_id,
sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN);
error = -EDQUOT;
break;
} else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
(s64)be64_to_cpu(qd->qd_qb.qb_warn) < value &&
if (limit > 0 && (limit - value) < ap->allowed)
ap->allowed = limit - value;
/* If we can't meet the target */
if (limit && limit < (value + (s64)ap->target)) {
/* If no min_target specified or we don't meet
* min_target, return -EDQUOT */
if (!ap->min_target || ap->min_target > ap->allowed) {
print_message(qd, "exceeded");
quota_send_warning(qd->qd_id,
sdp->sd_vfs->s_dev,
QUOTA_NL_BHARDWARN);
error = -EDQUOT;
break;
}
} else if (warn && warn < value &&
time_after_eq(jiffies, qd->qd_last_warn +
gfs2_tune_get(sdp,
gt_quota_warn_period) * HZ)) {
gfs2_tune_get(sdp, gt_quota_warn_period)
* HZ)) {
quota_send_warning(qd->qd_id,
sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
error = print_message(qd, "warning");
qd->qd_last_warn = jiffies;
}
}
return error;
}

View File

@ -1946,10 +1946,18 @@ static inline int fast_to_acquire(struct gfs2_rgrpd *rgd)
* @ip: the inode to reserve space for
* @ap: the allocation parameters
*
* Returns: errno
* We try our best to find an rgrp that has at least ap->target blocks
* available. After a couple of passes (loops == 2), the prospects of finding
* such an rgrp diminish. At this stage, we return the first rgrp that has
* atleast ap->min_target blocks available. Either way, we set ap->allowed to
* the number of blocks available in the chosen rgrp.
*
* Returns: 0 on success,
* -ENOMEM if a suitable rgrp can't be found
* errno otherwise
*/
int gfs2_inplace_reserve(struct gfs2_inode *ip, const struct gfs2_alloc_parms *ap)
int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_rgrpd *begin = NULL;
@ -2012,7 +2020,7 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, const struct gfs2_alloc_parms *a
/* Skip unuseable resource groups */
if ((rs->rs_rbm.rgd->rd_flags & (GFS2_RGF_NOALLOC |
GFS2_RDF_ERROR)) ||
(ap->target > rs->rs_rbm.rgd->rd_extfail_pt))
(loops == 0 && ap->target > rs->rs_rbm.rgd->rd_extfail_pt))
goto skip_rgrp;
if (sdp->sd_args.ar_rgrplvb)
@ -2027,11 +2035,13 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, const struct gfs2_alloc_parms *a
goto check_rgrp;
/* If rgrp has enough free space, use it */
if (rs->rs_rbm.rgd->rd_free_clone >= ap->target) {
if (rs->rs_rbm.rgd->rd_free_clone >= ap->target ||
(loops == 2 && ap->min_target &&
rs->rs_rbm.rgd->rd_free_clone >= ap->min_target)) {
ip->i_rgd = rs->rs_rbm.rgd;
ap->allowed = ip->i_rgd->rd_free_clone;
return 0;
}
check_rgrp:
/* Check for unlinked inodes which can be reclaimed */
if (rs->rs_rbm.rgd->rd_flags & GFS2_RDF_CHECK)

View File

@ -41,7 +41,8 @@ extern void gfs2_rgrp_go_unlock(struct gfs2_holder *gh);
extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip);
#define GFS2_AF_ORLOV 1
extern int gfs2_inplace_reserve(struct gfs2_inode *ip, const struct gfs2_alloc_parms *ap);
extern int gfs2_inplace_reserve(struct gfs2_inode *ip,
struct gfs2_alloc_parms *ap);
extern void gfs2_inplace_release(struct gfs2_inode *ip);
extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,