ext4: add flag to ext4_has_free_blocks
This patch adds an allocation request flag to the ext4_has_free_blocks function which enables the use of reserved blocks. This will allow a punch hole to proceed even if the disk is full. Punching a hole may require additional blocks to first split the extents. Because ext4_has_free_blocks is a low level function, the flag needs to be passed down through several functions listed below: ext4_ext_insert_extent ext4_ext_create_new_leaf ext4_ext_grow_indepth ext4_ext_split ext4_ext_new_meta_block ext4_mb_new_blocks ext4_claim_free_blocks ext4_has_free_blocks [ext4 punch hole patch series 1/5 v7] Signed-off-by: Allison Henderson <achender@us.ibm.com> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu> Reviewed-by: Mingming Cao <cmm@us.ibm.com>
This commit is contained in:
parent
ae81230686
commit
55f020db66
|
@ -369,7 +369,8 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
|
|||
* Check if filesystem has nblocks free & available for allocation.
|
||||
* On success return 1, return 0 on failure.
|
||||
*/
|
||||
static int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
|
||||
static int ext4_has_free_blocks(struct ext4_sb_info *sbi,
|
||||
s64 nblocks, unsigned int flags)
|
||||
{
|
||||
s64 free_blocks, dirty_blocks, root_blocks;
|
||||
struct percpu_counter *fbc = &sbi->s_freeblocks_counter;
|
||||
|
@ -393,7 +394,9 @@ static int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
|
|||
/* Hm, nope. Are (enough) root reserved blocks available? */
|
||||
if (sbi->s_resuid == current_fsuid() ||
|
||||
((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
|
||||
capable(CAP_SYS_RESOURCE)) {
|
||||
capable(CAP_SYS_RESOURCE) ||
|
||||
(flags & EXT4_MB_USE_ROOT_BLOCKS)) {
|
||||
|
||||
if (free_blocks >= (nblocks + dirty_blocks))
|
||||
return 1;
|
||||
}
|
||||
|
@ -402,9 +405,9 @@ static int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
|
|||
}
|
||||
|
||||
int ext4_claim_free_blocks(struct ext4_sb_info *sbi,
|
||||
s64 nblocks)
|
||||
s64 nblocks, unsigned int flags)
|
||||
{
|
||||
if (ext4_has_free_blocks(sbi, nblocks)) {
|
||||
if (ext4_has_free_blocks(sbi, nblocks, flags)) {
|
||||
percpu_counter_add(&sbi->s_dirtyblocks_counter, nblocks);
|
||||
return 0;
|
||||
} else
|
||||
|
@ -425,7 +428,7 @@ int ext4_claim_free_blocks(struct ext4_sb_info *sbi,
|
|||
*/
|
||||
int ext4_should_retry_alloc(struct super_block *sb, int *retries)
|
||||
{
|
||||
if (!ext4_has_free_blocks(EXT4_SB(sb), 1) ||
|
||||
if (!ext4_has_free_blocks(EXT4_SB(sb), 1, 0) ||
|
||||
(*retries)++ > 3 ||
|
||||
!EXT4_SB(sb)->s_journal)
|
||||
return 0;
|
||||
|
@ -448,7 +451,8 @@ int ext4_should_retry_alloc(struct super_block *sb, int *retries)
|
|||
* error stores in errp pointer
|
||||
*/
|
||||
ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
|
||||
ext4_fsblk_t goal, unsigned long *count, int *errp)
|
||||
ext4_fsblk_t goal, unsigned int flags,
|
||||
unsigned long *count, int *errp)
|
||||
{
|
||||
struct ext4_allocation_request ar;
|
||||
ext4_fsblk_t ret;
|
||||
|
@ -458,6 +462,7 @@ ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
|
|||
ar.inode = inode;
|
||||
ar.goal = goal;
|
||||
ar.len = count ? *count : 1;
|
||||
ar.flags = flags;
|
||||
|
||||
ret = ext4_mb_new_blocks(handle, &ar, errp);
|
||||
if (count)
|
||||
|
|
|
@ -108,7 +108,8 @@ typedef unsigned int ext4_group_t;
|
|||
#define EXT4_MB_DELALLOC_RESERVED 0x0400
|
||||
/* We are doing stream allocation */
|
||||
#define EXT4_MB_STREAM_ALLOC 0x0800
|
||||
|
||||
/* Use reserved root blocks if needed */
|
||||
#define EXT4_MB_USE_ROOT_BLOCKS 0x1000
|
||||
|
||||
struct ext4_allocation_request {
|
||||
/* target inode for block we're allocating */
|
||||
|
@ -514,6 +515,8 @@ struct ext4_new_group_data {
|
|||
/* Convert extent to initialized after IO complete */
|
||||
#define EXT4_GET_BLOCKS_IO_CONVERT_EXT (EXT4_GET_BLOCKS_CONVERT|\
|
||||
EXT4_GET_BLOCKS_CREATE_UNINIT_EXT)
|
||||
/* Punch out blocks of an extent */
|
||||
#define EXT4_GET_BLOCKS_PUNCH_OUT_EXT 0x0020
|
||||
|
||||
/*
|
||||
* Flags used by ext4_free_blocks
|
||||
|
@ -1718,8 +1721,12 @@ extern int ext4_bg_has_super(struct super_block *sb, ext4_group_t group);
|
|||
extern unsigned long ext4_bg_num_gdb(struct super_block *sb,
|
||||
ext4_group_t group);
|
||||
extern ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
|
||||
ext4_fsblk_t goal, unsigned long *count, int *errp);
|
||||
extern int ext4_claim_free_blocks(struct ext4_sb_info *sbi, s64 nblocks);
|
||||
ext4_fsblk_t goal,
|
||||
unsigned int flags,
|
||||
unsigned long *count,
|
||||
int *errp);
|
||||
extern int ext4_claim_free_blocks(struct ext4_sb_info *sbi,
|
||||
s64 nblocks, unsigned int flags);
|
||||
extern ext4_fsblk_t ext4_count_free_blocks(struct super_block *);
|
||||
extern void ext4_check_blocks_bitmap(struct super_block *);
|
||||
extern struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb,
|
||||
|
|
|
@ -192,12 +192,13 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
|
|||
static ext4_fsblk_t
|
||||
ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
|
||||
struct ext4_ext_path *path,
|
||||
struct ext4_extent *ex, int *err)
|
||||
struct ext4_extent *ex, int *err, unsigned int flags)
|
||||
{
|
||||
ext4_fsblk_t goal, newblock;
|
||||
|
||||
goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
|
||||
newblock = ext4_new_meta_blocks(handle, inode, goal, NULL, err);
|
||||
newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
|
||||
NULL, err);
|
||||
return newblock;
|
||||
}
|
||||
|
||||
|
@ -792,8 +793,9 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
|
|||
* - initializes subtree
|
||||
*/
|
||||
static int ext4_ext_split(handle_t *handle, struct inode *inode,
|
||||
struct ext4_ext_path *path,
|
||||
struct ext4_extent *newext, int at)
|
||||
unsigned int flags,
|
||||
struct ext4_ext_path *path,
|
||||
struct ext4_extent *newext, int at)
|
||||
{
|
||||
struct buffer_head *bh = NULL;
|
||||
int depth = ext_depth(inode);
|
||||
|
@ -847,7 +849,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
|
|||
ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
|
||||
for (a = 0; a < depth - at; a++) {
|
||||
newblock = ext4_ext_new_meta_block(handle, inode, path,
|
||||
newext, &err);
|
||||
newext, &err, flags);
|
||||
if (newblock == 0)
|
||||
goto cleanup;
|
||||
ablocks[a] = newblock;
|
||||
|
@ -1056,8 +1058,9 @@ cleanup:
|
|||
* just created block
|
||||
*/
|
||||
static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
|
||||
struct ext4_ext_path *path,
|
||||
struct ext4_extent *newext)
|
||||
unsigned int flags,
|
||||
struct ext4_ext_path *path,
|
||||
struct ext4_extent *newext)
|
||||
{
|
||||
struct ext4_ext_path *curp = path;
|
||||
struct ext4_extent_header *neh;
|
||||
|
@ -1065,7 +1068,8 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
|
|||
ext4_fsblk_t newblock;
|
||||
int err = 0;
|
||||
|
||||
newblock = ext4_ext_new_meta_block(handle, inode, path, newext, &err);
|
||||
newblock = ext4_ext_new_meta_block(handle, inode, path,
|
||||
newext, &err, flags);
|
||||
if (newblock == 0)
|
||||
return err;
|
||||
|
||||
|
@ -1140,8 +1144,9 @@ out:
|
|||
* if no free index is found, then it requests in-depth growing.
|
||||
*/
|
||||
static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
|
||||
struct ext4_ext_path *path,
|
||||
struct ext4_extent *newext)
|
||||
unsigned int flags,
|
||||
struct ext4_ext_path *path,
|
||||
struct ext4_extent *newext)
|
||||
{
|
||||
struct ext4_ext_path *curp;
|
||||
int depth, i, err = 0;
|
||||
|
@ -1161,7 +1166,7 @@ repeat:
|
|||
if (EXT_HAS_FREE_INDEX(curp)) {
|
||||
/* if we found index with free entry, then use that
|
||||
* entry: create all needed subtree and add new leaf */
|
||||
err = ext4_ext_split(handle, inode, path, newext, i);
|
||||
err = ext4_ext_split(handle, inode, flags, path, newext, i);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
@ -1174,7 +1179,8 @@ repeat:
|
|||
err = PTR_ERR(path);
|
||||
} else {
|
||||
/* tree is full, time to grow in depth */
|
||||
err = ext4_ext_grow_indepth(handle, inode, path, newext);
|
||||
err = ext4_ext_grow_indepth(handle, inode, flags,
|
||||
path, newext);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
@ -1693,6 +1699,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
|
|||
int depth, len, err;
|
||||
ext4_lblk_t next;
|
||||
unsigned uninitialized = 0;
|
||||
int flags = 0;
|
||||
|
||||
if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
|
||||
EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
|
||||
|
@ -1767,7 +1774,9 @@ repeat:
|
|||
* There is no free space in the found leaf.
|
||||
* We're gonna add a new leaf in the tree.
|
||||
*/
|
||||
err = ext4_ext_create_new_leaf(handle, inode, path, newext);
|
||||
if (flag & EXT4_GET_BLOCKS_PUNCH_OUT_EXT)
|
||||
flags = EXT4_MB_USE_ROOT_BLOCKS;
|
||||
err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext);
|
||||
if (err)
|
||||
goto cleanup;
|
||||
depth = ext_depth(inode);
|
||||
|
|
|
@ -639,8 +639,8 @@ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
|
|||
while (target > 0) {
|
||||
count = target;
|
||||
/* allocating blocks for indirect blocks and direct blocks */
|
||||
current_block = ext4_new_meta_blocks(handle, inode,
|
||||
goal, &count, err);
|
||||
current_block = ext4_new_meta_blocks(handle, inode, goal,
|
||||
0, &count, err);
|
||||
if (*err)
|
||||
goto failed_out;
|
||||
|
||||
|
@ -1930,7 +1930,7 @@ repeat:
|
|||
* We do still charge estimated metadata to the sb though;
|
||||
* we cannot afford to run out of free blocks.
|
||||
*/
|
||||
if (ext4_claim_free_blocks(sbi, md_needed + 1)) {
|
||||
if (ext4_claim_free_blocks(sbi, md_needed + 1, 0)) {
|
||||
dquot_release_reservation_block(inode, 1);
|
||||
if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
|
||||
yield();
|
||||
|
|
|
@ -4236,7 +4236,9 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
|
|||
* there is enough free blocks to do block allocation
|
||||
* and verify allocation doesn't exceed the quota limits.
|
||||
*/
|
||||
while (ar->len && ext4_claim_free_blocks(sbi, ar->len)) {
|
||||
while (ar->len &&
|
||||
ext4_claim_free_blocks(sbi, ar->len, ar->flags)) {
|
||||
|
||||
/* let others to free the space */
|
||||
yield();
|
||||
ar->len = ar->len >> 1;
|
||||
|
@ -4246,9 +4248,15 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
|
|||
return 0;
|
||||
}
|
||||
reserv_blks = ar->len;
|
||||
while (ar->len && dquot_alloc_block(ar->inode, ar->len)) {
|
||||
ar->flags |= EXT4_MB_HINT_NOPREALLOC;
|
||||
ar->len--;
|
||||
if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
|
||||
dquot_alloc_block_nofail(ar->inode, ar->len);
|
||||
} else {
|
||||
while (ar->len &&
|
||||
dquot_alloc_block(ar->inode, ar->len)) {
|
||||
|
||||
ar->flags |= EXT4_MB_HINT_NOPREALLOC;
|
||||
ar->len--;
|
||||
}
|
||||
}
|
||||
inquota = ar->len;
|
||||
if (ar->len == 0) {
|
||||
|
|
|
@ -820,8 +820,8 @@ inserted:
|
|||
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
|
||||
goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
|
||||
|
||||
block = ext4_new_meta_blocks(handle, inode,
|
||||
goal, NULL, &error);
|
||||
block = ext4_new_meta_blocks(handle, inode, goal, 0,
|
||||
NULL, &error);
|
||||
if (error)
|
||||
goto cleanup;
|
||||
|
||||
|
|
Loading…
Reference in New Issue