Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4
* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: ext4: remove write-only variables from ext4_ordered_write_end ext4: unexport jbd2_journal_update_superblock ext4: Cleanup whitespace and other miscellaneous style issues ext4: improve ext4_fill_flex_info() a bit ext4: Cleanup the block reservation code path ext4: don't assume extents can't cross block groups when truncating ext4: Fix lack of credits BUG() when deleting a badly fragmented inode ext4: Fix ext4_ext_journal_restart() ext4: fix ext4_da_write_begin error path jbd2: don't abort if flushing file data failed ext4: don't read inode block if the buffer has a write error ext4: Don't allow lg prealloc list to be grow large. ext4: Convert the usage of NR_CPUS to nr_cpu_ids. ext4: Improve error handling in mballoc ext4: lock block groups when initializing ext4: sync up block and inode bitmap reading functions ext4: Allow read/only mounts with corrupted block group checksums ext4: Fix data corruption when writing to prealloc area
This commit is contained in:
commit
8f616cd524
|
@ -47,6 +47,7 @@ ext4_acl_from_disk(const void *value, size_t size)
|
|||
goto fail;
|
||||
acl->a_entries[n].e_tag = le16_to_cpu(entry->e_tag);
|
||||
acl->a_entries[n].e_perm = le16_to_cpu(entry->e_perm);
|
||||
|
||||
switch (acl->a_entries[n].e_tag) {
|
||||
case ACL_USER_OBJ:
|
||||
case ACL_GROUP_OBJ:
|
||||
|
@ -103,8 +104,7 @@ ext4_acl_to_disk(const struct posix_acl *acl, size_t *size)
|
|||
switch (acl->a_entries[n].e_tag) {
|
||||
case ACL_USER:
|
||||
case ACL_GROUP:
|
||||
entry->e_id =
|
||||
cpu_to_le32(acl->a_entries[n].e_id);
|
||||
entry->e_id = cpu_to_le32(acl->a_entries[n].e_id);
|
||||
e += sizeof(ext4_acl_entry);
|
||||
break;
|
||||
|
||||
|
|
|
@ -314,25 +314,28 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
|
|||
if (unlikely(!bh)) {
|
||||
ext4_error(sb, __func__,
|
||||
"Cannot read block bitmap - "
|
||||
"block_group = %d, block_bitmap = %llu",
|
||||
(int)block_group, (unsigned long long)bitmap_blk);
|
||||
"block_group = %lu, block_bitmap = %llu",
|
||||
block_group, bitmap_blk);
|
||||
return NULL;
|
||||
}
|
||||
if (bh_uptodate_or_lock(bh))
|
||||
return bh;
|
||||
|
||||
spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
|
||||
if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
|
||||
ext4_init_block_bitmap(sb, bh, block_group, desc);
|
||||
set_buffer_uptodate(bh);
|
||||
unlock_buffer(bh);
|
||||
spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
|
||||
return bh;
|
||||
}
|
||||
spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
|
||||
if (bh_submit_read(bh) < 0) {
|
||||
put_bh(bh);
|
||||
ext4_error(sb, __func__,
|
||||
"Cannot read block bitmap - "
|
||||
"block_group = %d, block_bitmap = %llu",
|
||||
(int)block_group, (unsigned long long)bitmap_blk);
|
||||
"block_group = %lu, block_bitmap = %llu",
|
||||
block_group, bitmap_blk);
|
||||
return NULL;
|
||||
}
|
||||
ext4_valid_block_bitmap(sb, desc, block_group, bh);
|
||||
|
|
|
@ -1044,7 +1044,6 @@ extern void ext4_mb_update_group_info(struct ext4_group_info *grp,
|
|||
|
||||
|
||||
/* inode.c */
|
||||
void ext4_da_release_space(struct inode *inode, int used, int to_free);
|
||||
int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
|
||||
struct buffer_head *bh, ext4_fsblk_t blocknr);
|
||||
struct buffer_head *ext4_getblk(handle_t *, struct inode *,
|
||||
|
|
|
@ -99,7 +99,7 @@ static int ext4_ext_journal_restart(handle_t *handle, int needed)
|
|||
if (handle->h_buffer_credits > needed)
|
||||
return 0;
|
||||
err = ext4_journal_extend(handle, needed);
|
||||
if (err)
|
||||
if (err <= 0)
|
||||
return err;
|
||||
return ext4_journal_restart(handle, needed);
|
||||
}
|
||||
|
@ -1910,9 +1910,13 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
|
|||
BUG_ON(b != ex_ee_block + ex_ee_len - 1);
|
||||
}
|
||||
|
||||
/* at present, extent can't cross block group: */
|
||||
/* leaf + bitmap + group desc + sb + inode */
|
||||
credits = 5;
|
||||
/*
|
||||
* 3 for leaf, sb, and inode plus 2 (bmap and group
|
||||
* descriptor) for each block group; assume two block
|
||||
* groups plus ex_ee_len/blocks_per_block_group for
|
||||
* the worst case
|
||||
*/
|
||||
credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
|
||||
if (ex == EXT_FIRST_EXTENT(eh)) {
|
||||
correct_index = 1;
|
||||
credits += (ext_depth(inode)) + 1;
|
||||
|
@ -2323,7 +2327,10 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
|
|||
unsigned int newdepth;
|
||||
/* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */
|
||||
if (allocated <= EXT4_EXT_ZERO_LEN) {
|
||||
/* Mark first half uninitialized.
|
||||
/*
|
||||
* iblock == ee_block is handled by the zerouout
|
||||
* at the beginning.
|
||||
* Mark first half uninitialized.
|
||||
* Mark second half initialized and zero out the
|
||||
* initialized extent
|
||||
*/
|
||||
|
@ -2346,7 +2353,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
|
|||
ex->ee_len = orig_ex.ee_len;
|
||||
ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
|
||||
ext4_ext_dirty(handle, inode, path + depth);
|
||||
/* zeroed the full extent */
|
||||
/* blocks available from iblock */
|
||||
return allocated;
|
||||
|
||||
} else if (err)
|
||||
|
@ -2374,6 +2381,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
|
|||
err = PTR_ERR(path);
|
||||
return err;
|
||||
}
|
||||
/* get the second half extent details */
|
||||
ex = path[depth].p_ext;
|
||||
err = ext4_ext_get_access(handle, inode,
|
||||
path + depth);
|
||||
|
@ -2403,6 +2411,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
|
|||
ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
|
||||
ext4_ext_dirty(handle, inode, path + depth);
|
||||
/* zeroed the full extent */
|
||||
/* blocks available from iblock */
|
||||
return allocated;
|
||||
|
||||
} else if (err)
|
||||
|
@ -2418,7 +2427,6 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
|
|||
*/
|
||||
orig_ex.ee_len = cpu_to_le16(ee_len -
|
||||
ext4_ext_get_actual_len(ex3));
|
||||
if (newdepth != depth) {
|
||||
depth = newdepth;
|
||||
ext4_ext_drop_refs(path);
|
||||
path = ext4_ext_find_extent(inode, iblock, path);
|
||||
|
@ -2434,7 +2442,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
|
|||
err = ext4_ext_get_access(handle, inode, path + depth);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
||||
allocated = max_blocks;
|
||||
|
||||
/* If extent has less than EXT4_EXT_ZERO_LEN and we are trying
|
||||
|
@ -2452,6 +2460,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
|
|||
ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
|
||||
ext4_ext_dirty(handle, inode, path + depth);
|
||||
/* zero out the first half */
|
||||
/* blocks available from iblock */
|
||||
return allocated;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -97,34 +97,44 @@ unsigned ext4_init_inode_bitmap(struct super_block *sb, struct buffer_head *bh,
|
|||
* Return buffer_head of bitmap on success or NULL.
|
||||
*/
|
||||
static struct buffer_head *
|
||||
read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
|
||||
ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
|
||||
{
|
||||
struct ext4_group_desc *desc;
|
||||
struct buffer_head *bh = NULL;
|
||||
ext4_fsblk_t bitmap_blk;
|
||||
|
||||
desc = ext4_get_group_desc(sb, block_group, NULL);
|
||||
if (!desc)
|
||||
goto error_out;
|
||||
if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
|
||||
bh = sb_getblk(sb, ext4_inode_bitmap(sb, desc));
|
||||
if (!buffer_uptodate(bh)) {
|
||||
lock_buffer(bh);
|
||||
if (!buffer_uptodate(bh)) {
|
||||
ext4_init_inode_bitmap(sb, bh, block_group,
|
||||
desc);
|
||||
set_buffer_uptodate(bh);
|
||||
}
|
||||
unlock_buffer(bh);
|
||||
}
|
||||
} else {
|
||||
bh = sb_bread(sb, ext4_inode_bitmap(sb, desc));
|
||||
}
|
||||
if (!bh)
|
||||
ext4_error(sb, "read_inode_bitmap",
|
||||
return NULL;
|
||||
bitmap_blk = ext4_inode_bitmap(sb, desc);
|
||||
bh = sb_getblk(sb, bitmap_blk);
|
||||
if (unlikely(!bh)) {
|
||||
ext4_error(sb, __func__,
|
||||
"Cannot read inode bitmap - "
|
||||
"block_group = %lu, inode_bitmap = %llu",
|
||||
block_group, ext4_inode_bitmap(sb, desc));
|
||||
error_out:
|
||||
block_group, bitmap_blk);
|
||||
return NULL;
|
||||
}
|
||||
if (bh_uptodate_or_lock(bh))
|
||||
return bh;
|
||||
|
||||
spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
|
||||
if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
|
||||
ext4_init_inode_bitmap(sb, bh, block_group, desc);
|
||||
set_buffer_uptodate(bh);
|
||||
unlock_buffer(bh);
|
||||
spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
|
||||
return bh;
|
||||
}
|
||||
spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
|
||||
if (bh_submit_read(bh) < 0) {
|
||||
put_bh(bh);
|
||||
ext4_error(sb, __func__,
|
||||
"Cannot read inode bitmap - "
|
||||
"block_group = %lu, inode_bitmap = %llu",
|
||||
block_group, bitmap_blk);
|
||||
return NULL;
|
||||
}
|
||||
return bh;
|
||||
}
|
||||
|
||||
|
@ -200,7 +210,7 @@ void ext4_free_inode (handle_t *handle, struct inode * inode)
|
|||
}
|
||||
block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
|
||||
bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
|
||||
bitmap_bh = read_inode_bitmap(sb, block_group);
|
||||
bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
|
||||
if (!bitmap_bh)
|
||||
goto error_return;
|
||||
|
||||
|
@ -623,7 +633,7 @@ got_group:
|
|||
goto fail;
|
||||
|
||||
brelse(bitmap_bh);
|
||||
bitmap_bh = read_inode_bitmap(sb, group);
|
||||
bitmap_bh = ext4_read_inode_bitmap(sb, group);
|
||||
if (!bitmap_bh)
|
||||
goto fail;
|
||||
|
||||
|
@ -728,7 +738,7 @@ got:
|
|||
|
||||
/* When marking the block group with
|
||||
* ~EXT4_BG_INODE_UNINIT we don't want to depend
|
||||
* on the value of bg_itable_unsed even though
|
||||
* on the value of bg_itable_unused even though
|
||||
* mke2fs could have initialized the same for us.
|
||||
* Instead we calculated the value below
|
||||
*/
|
||||
|
@ -891,7 +901,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
|
|||
|
||||
block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
|
||||
bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
|
||||
bitmap_bh = read_inode_bitmap(sb, block_group);
|
||||
bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
|
||||
if (!bitmap_bh) {
|
||||
ext4_warning(sb, __func__,
|
||||
"inode bitmap error for orphan %lu", ino);
|
||||
|
@ -969,7 +979,7 @@ unsigned long ext4_count_free_inodes (struct super_block * sb)
|
|||
continue;
|
||||
desc_count += le16_to_cpu(gdp->bg_free_inodes_count);
|
||||
brelse(bitmap_bh);
|
||||
bitmap_bh = read_inode_bitmap(sb, i);
|
||||
bitmap_bh = ext4_read_inode_bitmap(sb, i);
|
||||
if (!bitmap_bh)
|
||||
continue;
|
||||
|
||||
|
|
164
fs/ext4/inode.c
164
fs/ext4/inode.c
|
@ -191,6 +191,7 @@ static int ext4_journal_test_restart(handle_t *handle, struct inode *inode)
|
|||
void ext4_delete_inode (struct inode * inode)
|
||||
{
|
||||
handle_t *handle;
|
||||
int err;
|
||||
|
||||
if (ext4_should_order_data(inode))
|
||||
ext4_begin_ordered_truncate(inode, 0);
|
||||
|
@ -199,8 +200,9 @@ void ext4_delete_inode (struct inode * inode)
|
|||
if (is_bad_inode(inode))
|
||||
goto no_delete;
|
||||
|
||||
handle = start_transaction(inode);
|
||||
handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3);
|
||||
if (IS_ERR(handle)) {
|
||||
ext4_std_error(inode->i_sb, PTR_ERR(handle));
|
||||
/*
|
||||
* If we're going to skip the normal cleanup, we still need to
|
||||
* make sure that the in-core orphan linked list is properly
|
||||
|
@ -213,8 +215,34 @@ void ext4_delete_inode (struct inode * inode)
|
|||
if (IS_SYNC(inode))
|
||||
handle->h_sync = 1;
|
||||
inode->i_size = 0;
|
||||
err = ext4_mark_inode_dirty(handle, inode);
|
||||
if (err) {
|
||||
ext4_warning(inode->i_sb, __func__,
|
||||
"couldn't mark inode dirty (err %d)", err);
|
||||
goto stop_handle;
|
||||
}
|
||||
if (inode->i_blocks)
|
||||
ext4_truncate(inode);
|
||||
|
||||
/*
|
||||
* ext4_ext_truncate() doesn't reserve any slop when it
|
||||
* restarts journal transactions; therefore there may not be
|
||||
* enough credits left in the handle to remove the inode from
|
||||
* the orphan list and set the dtime field.
|
||||
*/
|
||||
if (handle->h_buffer_credits < 3) {
|
||||
err = ext4_journal_extend(handle, 3);
|
||||
if (err > 0)
|
||||
err = ext4_journal_restart(handle, 3);
|
||||
if (err != 0) {
|
||||
ext4_warning(inode->i_sb, __func__,
|
||||
"couldn't extend journal (err %d)", err);
|
||||
stop_handle:
|
||||
ext4_journal_stop(handle);
|
||||
goto no_delete;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Kill off the orphan record which ext4_truncate created.
|
||||
* AKPM: I think this can be inside the above `if'.
|
||||
|
@ -952,6 +980,67 @@ out:
|
|||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate the number of metadata blocks need to reserve
|
||||
* to allocate @blocks for non extent file based file
|
||||
*/
|
||||
static int ext4_indirect_calc_metadata_amount(struct inode *inode, int blocks)
|
||||
{
|
||||
int icap = EXT4_ADDR_PER_BLOCK(inode->i_sb);
|
||||
int ind_blks, dind_blks, tind_blks;
|
||||
|
||||
/* number of new indirect blocks needed */
|
||||
ind_blks = (blocks + icap - 1) / icap;
|
||||
|
||||
dind_blks = (ind_blks + icap - 1) / icap;
|
||||
|
||||
tind_blks = 1;
|
||||
|
||||
return ind_blks + dind_blks + tind_blks;
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate the number of metadata blocks need to reserve
|
||||
* to allocate given number of blocks
|
||||
*/
|
||||
static int ext4_calc_metadata_amount(struct inode *inode, int blocks)
|
||||
{
|
||||
if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
|
||||
return ext4_ext_calc_metadata_amount(inode, blocks);
|
||||
|
||||
return ext4_indirect_calc_metadata_amount(inode, blocks);
|
||||
}
|
||||
|
||||
static void ext4_da_update_reserve_space(struct inode *inode, int used)
|
||||
{
|
||||
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
|
||||
int total, mdb, mdb_free;
|
||||
|
||||
spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
|
||||
/* recalculate the number of metablocks still need to be reserved */
|
||||
total = EXT4_I(inode)->i_reserved_data_blocks - used;
|
||||
mdb = ext4_calc_metadata_amount(inode, total);
|
||||
|
||||
/* figure out how many metablocks to release */
|
||||
BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
|
||||
mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb;
|
||||
|
||||
/* Account for allocated meta_blocks */
|
||||
mdb_free -= EXT4_I(inode)->i_allocated_meta_blocks;
|
||||
|
||||
/* update fs free blocks counter for truncate case */
|
||||
percpu_counter_add(&sbi->s_freeblocks_counter, mdb_free);
|
||||
|
||||
/* update per-inode reservations */
|
||||
BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks);
|
||||
EXT4_I(inode)->i_reserved_data_blocks -= used;
|
||||
|
||||
BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
|
||||
EXT4_I(inode)->i_reserved_meta_blocks = mdb;
|
||||
EXT4_I(inode)->i_allocated_meta_blocks = 0;
|
||||
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
|
||||
}
|
||||
|
||||
/* Maximum number of blocks we map for direct IO at once. */
|
||||
#define DIO_MAX_BLOCKS 4096
|
||||
/*
|
||||
|
@ -965,10 +1054,9 @@ out:
|
|||
|
||||
|
||||
/*
|
||||
* The ext4_get_blocks_wrap() function try to look up the requested blocks,
|
||||
* and returns if the blocks are already mapped.
|
||||
*
|
||||
*
|
||||
* ext4_ext4 get_block() wrapper function
|
||||
* It will do a look up first, and returns if the blocks already mapped.
|
||||
* Otherwise it takes the write lock of the i_data_sem and allocate blocks
|
||||
* and store the allocated blocks in the result buffer head and mark it
|
||||
* mapped.
|
||||
|
@ -1069,7 +1157,7 @@ int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
|
|||
* which were deferred till now
|
||||
*/
|
||||
if ((retval > 0) && buffer_delay(bh))
|
||||
ext4_da_release_space(inode, retval, 0);
|
||||
ext4_da_update_reserve_space(inode, retval);
|
||||
}
|
||||
|
||||
up_write((&EXT4_I(inode)->i_data_sem));
|
||||
|
@ -1336,12 +1424,8 @@ static int ext4_ordered_write_end(struct file *file,
|
|||
{
|
||||
handle_t *handle = ext4_journal_current_handle();
|
||||
struct inode *inode = mapping->host;
|
||||
unsigned from, to;
|
||||
int ret = 0, ret2;
|
||||
|
||||
from = pos & (PAGE_CACHE_SIZE - 1);
|
||||
to = from + len;
|
||||
|
||||
ret = ext4_jbd2_file_inode(handle, inode);
|
||||
|
||||
if (ret == 0) {
|
||||
|
@ -1437,36 +1521,6 @@ static int ext4_journalled_write_end(struct file *file,
|
|||
|
||||
return ret ? ret : copied;
|
||||
}
|
||||
/*
|
||||
* Calculate the number of metadata blocks need to reserve
|
||||
* to allocate @blocks for non extent file based file
|
||||
*/
|
||||
static int ext4_indirect_calc_metadata_amount(struct inode *inode, int blocks)
|
||||
{
|
||||
int icap = EXT4_ADDR_PER_BLOCK(inode->i_sb);
|
||||
int ind_blks, dind_blks, tind_blks;
|
||||
|
||||
/* number of new indirect blocks needed */
|
||||
ind_blks = (blocks + icap - 1) / icap;
|
||||
|
||||
dind_blks = (ind_blks + icap - 1) / icap;
|
||||
|
||||
tind_blks = 1;
|
||||
|
||||
return ind_blks + dind_blks + tind_blks;
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate the number of metadata blocks need to reserve
|
||||
* to allocate given number of blocks
|
||||
*/
|
||||
static int ext4_calc_metadata_amount(struct inode *inode, int blocks)
|
||||
{
|
||||
if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
|
||||
return ext4_ext_calc_metadata_amount(inode, blocks);
|
||||
|
||||
return ext4_indirect_calc_metadata_amount(inode, blocks);
|
||||
}
|
||||
|
||||
static int ext4_da_reserve_space(struct inode *inode, int nrblocks)
|
||||
{
|
||||
|
@ -1490,7 +1544,6 @@ static int ext4_da_reserve_space(struct inode *inode, int nrblocks)
|
|||
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
/* reduce fs free blocks counter */
|
||||
percpu_counter_sub(&sbi->s_freeblocks_counter, total);
|
||||
|
||||
|
@ -1501,35 +1554,31 @@ static int ext4_da_reserve_space(struct inode *inode, int nrblocks)
|
|||
return 0; /* success */
|
||||
}
|
||||
|
||||
void ext4_da_release_space(struct inode *inode, int used, int to_free)
|
||||
static void ext4_da_release_space(struct inode *inode, int to_free)
|
||||
{
|
||||
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
|
||||
int total, mdb, mdb_free, release;
|
||||
|
||||
spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
|
||||
/* recalculate the number of metablocks still need to be reserved */
|
||||
total = EXT4_I(inode)->i_reserved_data_blocks - used - to_free;
|
||||
total = EXT4_I(inode)->i_reserved_data_blocks - to_free;
|
||||
mdb = ext4_calc_metadata_amount(inode, total);
|
||||
|
||||
/* figure out how many metablocks to release */
|
||||
BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
|
||||
mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb;
|
||||
|
||||
/* Account for allocated meta_blocks */
|
||||
mdb_free -= EXT4_I(inode)->i_allocated_meta_blocks;
|
||||
|
||||
release = to_free + mdb_free;
|
||||
|
||||
/* update fs free blocks counter for truncate case */
|
||||
percpu_counter_add(&sbi->s_freeblocks_counter, release);
|
||||
|
||||
/* update per-inode reservations */
|
||||
BUG_ON(used + to_free > EXT4_I(inode)->i_reserved_data_blocks);
|
||||
EXT4_I(inode)->i_reserved_data_blocks -= (used + to_free);
|
||||
BUG_ON(to_free > EXT4_I(inode)->i_reserved_data_blocks);
|
||||
EXT4_I(inode)->i_reserved_data_blocks -= to_free;
|
||||
|
||||
BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
|
||||
EXT4_I(inode)->i_reserved_meta_blocks = mdb;
|
||||
EXT4_I(inode)->i_allocated_meta_blocks = 0;
|
||||
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
|
||||
}
|
||||
|
||||
|
@ -1551,7 +1600,7 @@ static void ext4_da_page_release_reservation(struct page *page,
|
|||
}
|
||||
curr_off = next_off;
|
||||
} while ((bh = bh->b_this_page) != head);
|
||||
ext4_da_release_space(page->mapping->host, 0, to_release);
|
||||
ext4_da_release_space(page->mapping->host, to_release);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2280,8 +2329,11 @@ retry:
|
|||
}
|
||||
|
||||
page = __grab_cache_page(mapping, index);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
if (!page) {
|
||||
ext4_journal_stop(handle);
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
*pagep = page;
|
||||
|
||||
ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
|
||||
|
@ -3590,6 +3642,16 @@ static int __ext4_get_inode_loc(struct inode *inode,
|
|||
}
|
||||
if (!buffer_uptodate(bh)) {
|
||||
lock_buffer(bh);
|
||||
|
||||
/*
|
||||
* If the buffer has the write error flag, we have failed
|
||||
* to write out another inode in the same block. In this
|
||||
* case, we don't have to read the block because we may
|
||||
* read the old inode data successfully.
|
||||
*/
|
||||
if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
|
||||
set_buffer_uptodate(bh);
|
||||
|
||||
if (buffer_uptodate(bh)) {
|
||||
/* someone brought it uptodate while we waited */
|
||||
unlock_buffer(bh);
|
||||
|
|
|
@ -787,13 +787,16 @@ static int ext4_mb_init_cache(struct page *page, char *incore)
|
|||
if (bh_uptodate_or_lock(bh[i]))
|
||||
continue;
|
||||
|
||||
spin_lock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
|
||||
if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
|
||||
ext4_init_block_bitmap(sb, bh[i],
|
||||
first_group + i, desc);
|
||||
set_buffer_uptodate(bh[i]);
|
||||
unlock_buffer(bh[i]);
|
||||
spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
|
||||
continue;
|
||||
}
|
||||
spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
|
||||
get_bh(bh[i]);
|
||||
bh[i]->b_end_io = end_buffer_read_sync;
|
||||
submit_bh(READ, bh[i]);
|
||||
|
@ -2477,7 +2480,7 @@ err_freesgi:
|
|||
int ext4_mb_init(struct super_block *sb, int needs_recovery)
|
||||
{
|
||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||
unsigned i;
|
||||
unsigned i, j;
|
||||
unsigned offset;
|
||||
unsigned max;
|
||||
int ret;
|
||||
|
@ -2537,7 +2540,7 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
|
|||
sbi->s_mb_history_filter = EXT4_MB_HISTORY_DEFAULT;
|
||||
sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC;
|
||||
|
||||
i = sizeof(struct ext4_locality_group) * NR_CPUS;
|
||||
i = sizeof(struct ext4_locality_group) * nr_cpu_ids;
|
||||
sbi->s_locality_groups = kmalloc(i, GFP_KERNEL);
|
||||
if (sbi->s_locality_groups == NULL) {
|
||||
clear_opt(sbi->s_mount_opt, MBALLOC);
|
||||
|
@ -2545,11 +2548,12 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
|
|||
kfree(sbi->s_mb_maxs);
|
||||
return -ENOMEM;
|
||||
}
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
for (i = 0; i < nr_cpu_ids; i++) {
|
||||
struct ext4_locality_group *lg;
|
||||
lg = &sbi->s_locality_groups[i];
|
||||
mutex_init(&lg->lg_mutex);
|
||||
INIT_LIST_HEAD(&lg->lg_prealloc_list);
|
||||
for (j = 0; j < PREALLOC_TB_SIZE; j++)
|
||||
INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
|
||||
spin_lock_init(&lg->lg_prealloc_lock);
|
||||
}
|
||||
|
||||
|
@ -3260,6 +3264,7 @@ static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
|
|||
struct ext4_prealloc_space *pa)
|
||||
{
|
||||
unsigned int len = ac->ac_o_ex.fe_len;
|
||||
|
||||
ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
|
||||
&ac->ac_b_ex.fe_group,
|
||||
&ac->ac_b_ex.fe_start);
|
||||
|
@ -3282,6 +3287,7 @@ static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
|
|||
static noinline_for_stack int
|
||||
ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
|
||||
{
|
||||
int order, i;
|
||||
struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
|
||||
struct ext4_locality_group *lg;
|
||||
struct ext4_prealloc_space *pa;
|
||||
|
@ -3322,11 +3328,18 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
|
|||
lg = ac->ac_lg;
|
||||
if (lg == NULL)
|
||||
return 0;
|
||||
order = fls(ac->ac_o_ex.fe_len) - 1;
|
||||
if (order > PREALLOC_TB_SIZE - 1)
|
||||
/* The max size of hash table is PREALLOC_TB_SIZE */
|
||||
order = PREALLOC_TB_SIZE - 1;
|
||||
|
||||
for (i = order; i < PREALLOC_TB_SIZE; i++) {
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(pa, &lg->lg_prealloc_list, pa_inode_list) {
|
||||
list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
|
||||
pa_inode_list) {
|
||||
spin_lock(&pa->pa_lock);
|
||||
if (pa->pa_deleted == 0 && pa->pa_free >= ac->ac_o_ex.fe_len) {
|
||||
if (pa->pa_deleted == 0 &&
|
||||
pa->pa_free >= ac->ac_o_ex.fe_len) {
|
||||
atomic_inc(&pa->pa_count);
|
||||
ext4_mb_use_group_pa(ac, pa);
|
||||
spin_unlock(&pa->pa_lock);
|
||||
|
@ -3337,7 +3350,7 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
|
|||
spin_unlock(&pa->pa_lock);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3560,6 +3573,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
|
|||
pa->pa_free = pa->pa_len;
|
||||
atomic_set(&pa->pa_count, 1);
|
||||
spin_lock_init(&pa->pa_lock);
|
||||
INIT_LIST_HEAD(&pa->pa_inode_list);
|
||||
pa->pa_deleted = 0;
|
||||
pa->pa_linear = 1;
|
||||
|
||||
|
@ -3580,10 +3594,10 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
|
|||
list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
|
||||
ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
|
||||
|
||||
spin_lock(pa->pa_obj_lock);
|
||||
list_add_tail_rcu(&pa->pa_inode_list, &lg->lg_prealloc_list);
|
||||
spin_unlock(pa->pa_obj_lock);
|
||||
|
||||
/*
|
||||
* We will later add the new pa to the right bucket
|
||||
* after updating the pa_free in ext4_mb_release_context
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3733,20 +3747,23 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
|
|||
|
||||
bitmap_bh = ext4_read_block_bitmap(sb, group);
|
||||
if (bitmap_bh == NULL) {
|
||||
/* error handling here */
|
||||
ext4_mb_release_desc(&e4b);
|
||||
BUG_ON(bitmap_bh == NULL);
|
||||
ext4_error(sb, __func__, "Error in reading block "
|
||||
"bitmap for %lu\n", group);
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = ext4_mb_load_buddy(sb, group, &e4b);
|
||||
BUG_ON(err != 0); /* error handling here */
|
||||
if (err) {
|
||||
ext4_error(sb, __func__, "Error in loading buddy "
|
||||
"information for %lu\n", group);
|
||||
put_bh(bitmap_bh);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (needed == 0)
|
||||
needed = EXT4_BLOCKS_PER_GROUP(sb) + 1;
|
||||
|
||||
grp = ext4_get_group_info(sb, group);
|
||||
INIT_LIST_HEAD(&list);
|
||||
|
||||
ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
|
||||
repeat:
|
||||
ext4_lock_group(sb, group);
|
||||
|
@ -3903,13 +3920,18 @@ repeat:
|
|||
ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
|
||||
|
||||
err = ext4_mb_load_buddy(sb, group, &e4b);
|
||||
BUG_ON(err != 0); /* error handling here */
|
||||
if (err) {
|
||||
ext4_error(sb, __func__, "Error in loading buddy "
|
||||
"information for %lu\n", group);
|
||||
continue;
|
||||
}
|
||||
|
||||
bitmap_bh = ext4_read_block_bitmap(sb, group);
|
||||
if (bitmap_bh == NULL) {
|
||||
/* error handling here */
|
||||
ext4_error(sb, __func__, "Error in reading block "
|
||||
"bitmap for %lu\n", group);
|
||||
ext4_mb_release_desc(&e4b);
|
||||
BUG_ON(bitmap_bh == NULL);
|
||||
continue;
|
||||
}
|
||||
|
||||
ext4_lock_group(sb, group);
|
||||
|
@ -4112,22 +4134,168 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
|
|||
|
||||
}
|
||||
|
||||
static noinline_for_stack void
|
||||
ext4_mb_discard_lg_preallocations(struct super_block *sb,
|
||||
struct ext4_locality_group *lg,
|
||||
int order, int total_entries)
|
||||
{
|
||||
ext4_group_t group = 0;
|
||||
struct ext4_buddy e4b;
|
||||
struct list_head discard_list;
|
||||
struct ext4_prealloc_space *pa, *tmp;
|
||||
struct ext4_allocation_context *ac;
|
||||
|
||||
mb_debug("discard locality group preallocation\n");
|
||||
|
||||
INIT_LIST_HEAD(&discard_list);
|
||||
ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
|
||||
|
||||
spin_lock(&lg->lg_prealloc_lock);
|
||||
list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
|
||||
pa_inode_list) {
|
||||
spin_lock(&pa->pa_lock);
|
||||
if (atomic_read(&pa->pa_count)) {
|
||||
/*
|
||||
* This is the pa that we just used
|
||||
* for block allocation. So don't
|
||||
* free that
|
||||
*/
|
||||
spin_unlock(&pa->pa_lock);
|
||||
continue;
|
||||
}
|
||||
if (pa->pa_deleted) {
|
||||
spin_unlock(&pa->pa_lock);
|
||||
continue;
|
||||
}
|
||||
/* only lg prealloc space */
|
||||
BUG_ON(!pa->pa_linear);
|
||||
|
||||
/* seems this one can be freed ... */
|
||||
pa->pa_deleted = 1;
|
||||
spin_unlock(&pa->pa_lock);
|
||||
|
||||
list_del_rcu(&pa->pa_inode_list);
|
||||
list_add(&pa->u.pa_tmp_list, &discard_list);
|
||||
|
||||
total_entries--;
|
||||
if (total_entries <= 5) {
|
||||
/*
|
||||
* we want to keep only 5 entries
|
||||
* allowing it to grow to 8. This
|
||||
* mak sure we don't call discard
|
||||
* soon for this list.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(&lg->lg_prealloc_lock);
|
||||
|
||||
list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
|
||||
|
||||
ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
|
||||
if (ext4_mb_load_buddy(sb, group, &e4b)) {
|
||||
ext4_error(sb, __func__, "Error in loading buddy "
|
||||
"information for %lu\n", group);
|
||||
continue;
|
||||
}
|
||||
ext4_lock_group(sb, group);
|
||||
list_del(&pa->pa_group_list);
|
||||
ext4_mb_release_group_pa(&e4b, pa, ac);
|
||||
ext4_unlock_group(sb, group);
|
||||
|
||||
ext4_mb_release_desc(&e4b);
|
||||
list_del(&pa->u.pa_tmp_list);
|
||||
call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
|
||||
}
|
||||
if (ac)
|
||||
kmem_cache_free(ext4_ac_cachep, ac);
|
||||
}
|
||||
|
||||
/*
|
||||
* We have incremented pa_count. So it cannot be freed at this
|
||||
* point. Also we hold lg_mutex. So no parallel allocation is
|
||||
* possible from this lg. That means pa_free cannot be updated.
|
||||
*
|
||||
* A parallel ext4_mb_discard_group_preallocations is possible.
|
||||
* which can cause the lg_prealloc_list to be updated.
|
||||
*/
|
||||
|
||||
static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
|
||||
{
|
||||
int order, added = 0, lg_prealloc_count = 1;
|
||||
struct super_block *sb = ac->ac_sb;
|
||||
struct ext4_locality_group *lg = ac->ac_lg;
|
||||
struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
|
||||
|
||||
order = fls(pa->pa_free) - 1;
|
||||
if (order > PREALLOC_TB_SIZE - 1)
|
||||
/* The max size of hash table is PREALLOC_TB_SIZE */
|
||||
order = PREALLOC_TB_SIZE - 1;
|
||||
/* Add the prealloc space to lg */
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
|
||||
pa_inode_list) {
|
||||
spin_lock(&tmp_pa->pa_lock);
|
||||
if (tmp_pa->pa_deleted) {
|
||||
spin_unlock(&pa->pa_lock);
|
||||
continue;
|
||||
}
|
||||
if (!added && pa->pa_free < tmp_pa->pa_free) {
|
||||
/* Add to the tail of the previous entry */
|
||||
list_add_tail_rcu(&pa->pa_inode_list,
|
||||
&tmp_pa->pa_inode_list);
|
||||
added = 1;
|
||||
/*
|
||||
* we want to count the total
|
||||
* number of entries in the list
|
||||
*/
|
||||
}
|
||||
spin_unlock(&tmp_pa->pa_lock);
|
||||
lg_prealloc_count++;
|
||||
}
|
||||
if (!added)
|
||||
list_add_tail_rcu(&pa->pa_inode_list,
|
||||
&lg->lg_prealloc_list[order]);
|
||||
rcu_read_unlock();
|
||||
|
||||
/* Now trim the list to be not more than 8 elements */
|
||||
if (lg_prealloc_count > 8) {
|
||||
ext4_mb_discard_lg_preallocations(sb, lg,
|
||||
order, lg_prealloc_count);
|
||||
return;
|
||||
}
|
||||
return ;
|
||||
}
|
||||
|
||||
/*
|
||||
* release all resource we used in allocation
|
||||
*/
|
||||
static int ext4_mb_release_context(struct ext4_allocation_context *ac)
|
||||
{
|
||||
if (ac->ac_pa) {
|
||||
if (ac->ac_pa->pa_linear) {
|
||||
struct ext4_prealloc_space *pa = ac->ac_pa;
|
||||
if (pa) {
|
||||
if (pa->pa_linear) {
|
||||
/* see comment in ext4_mb_use_group_pa() */
|
||||
spin_lock(&ac->ac_pa->pa_lock);
|
||||
ac->ac_pa->pa_pstart += ac->ac_b_ex.fe_len;
|
||||
ac->ac_pa->pa_lstart += ac->ac_b_ex.fe_len;
|
||||
ac->ac_pa->pa_free -= ac->ac_b_ex.fe_len;
|
||||
ac->ac_pa->pa_len -= ac->ac_b_ex.fe_len;
|
||||
spin_unlock(&ac->ac_pa->pa_lock);
|
||||
spin_lock(&pa->pa_lock);
|
||||
pa->pa_pstart += ac->ac_b_ex.fe_len;
|
||||
pa->pa_lstart += ac->ac_b_ex.fe_len;
|
||||
pa->pa_free -= ac->ac_b_ex.fe_len;
|
||||
pa->pa_len -= ac->ac_b_ex.fe_len;
|
||||
spin_unlock(&pa->pa_lock);
|
||||
/*
|
||||
* We want to add the pa to the right bucket.
|
||||
* Remove it from the list and while adding
|
||||
* make sure the list to which we are adding
|
||||
* doesn't grow big.
|
||||
*/
|
||||
if (likely(pa->pa_free)) {
|
||||
spin_lock(pa->pa_obj_lock);
|
||||
list_del_rcu(&pa->pa_inode_list);
|
||||
spin_unlock(pa->pa_obj_lock);
|
||||
ext4_mb_add_n_trim(ac);
|
||||
}
|
||||
ext4_mb_put_pa(ac, ac->ac_sb, ac->ac_pa);
|
||||
}
|
||||
ext4_mb_put_pa(ac, ac->ac_sb, pa);
|
||||
}
|
||||
if (ac->ac_bitmap_page)
|
||||
page_cache_release(ac->ac_bitmap_page);
|
||||
|
@ -4420,11 +4588,15 @@ do_more:
|
|||
count -= overflow;
|
||||
}
|
||||
bitmap_bh = ext4_read_block_bitmap(sb, block_group);
|
||||
if (!bitmap_bh)
|
||||
if (!bitmap_bh) {
|
||||
err = -EIO;
|
||||
goto error_return;
|
||||
}
|
||||
gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
|
||||
if (!gdp)
|
||||
if (!gdp) {
|
||||
err = -EIO;
|
||||
goto error_return;
|
||||
}
|
||||
|
||||
if (in_range(ext4_block_bitmap(sb, gdp), block, count) ||
|
||||
in_range(ext4_inode_bitmap(sb, gdp), block, count) ||
|
||||
|
|
|
@ -164,11 +164,17 @@ struct ext4_free_extent {
|
|||
* Locality group:
|
||||
* we try to group all related changes together
|
||||
* so that writeback can flush/allocate them together as well
|
||||
* Size of lg_prealloc_list hash is determined by MB_DEFAULT_GROUP_PREALLOC
|
||||
* (512). We store prealloc space into the hash based on the pa_free blocks
|
||||
* order value.ie, fls(pa_free)-1;
|
||||
*/
|
||||
#define PREALLOC_TB_SIZE 10
|
||||
struct ext4_locality_group {
|
||||
/* for allocator */
|
||||
struct mutex lg_mutex; /* to serialize allocates */
|
||||
struct list_head lg_prealloc_list;/* list of preallocations */
|
||||
/* to serialize allocates */
|
||||
struct mutex lg_mutex;
|
||||
/* list of preallocations */
|
||||
struct list_head lg_prealloc_list[PREALLOC_TB_SIZE];
|
||||
spinlock_t lg_prealloc_lock;
|
||||
};
|
||||
|
||||
|
|
|
@ -507,14 +507,14 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
|
|||
return 0;
|
||||
|
||||
exit_inode:
|
||||
//ext4_journal_release_buffer(handle, iloc.bh);
|
||||
/* ext4_journal_release_buffer(handle, iloc.bh); */
|
||||
brelse(iloc.bh);
|
||||
exit_dindj:
|
||||
//ext4_journal_release_buffer(handle, dind);
|
||||
/* ext4_journal_release_buffer(handle, dind); */
|
||||
exit_primary:
|
||||
//ext4_journal_release_buffer(handle, *primary);
|
||||
/* ext4_journal_release_buffer(handle, *primary); */
|
||||
exit_sbh:
|
||||
//ext4_journal_release_buffer(handle, *primary);
|
||||
/* ext4_journal_release_buffer(handle, *primary); */
|
||||
exit_dind:
|
||||
brelse(dind);
|
||||
exit_bh:
|
||||
|
@ -946,7 +946,8 @@ exit_put:
|
|||
return err;
|
||||
} /* ext4_group_add */
|
||||
|
||||
/* Extend the filesystem to the new number of blocks specified. This entry
|
||||
/*
|
||||
* Extend the filesystem to the new number of blocks specified. This entry
|
||||
* point is only used to extend the current filesystem to the end of the last
|
||||
* existing group. It can be accessed via ioctl, or by "remount,resize=<size>"
|
||||
* for emergencies (because it has no dependencies on reserved blocks).
|
||||
|
|
|
@ -50,8 +50,7 @@ static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
|
|||
static int ext4_create_journal(struct super_block *, struct ext4_super_block *,
|
||||
unsigned int);
|
||||
static void ext4_commit_super(struct super_block *sb,
|
||||
struct ext4_super_block * es,
|
||||
int sync);
|
||||
struct ext4_super_block *es, int sync);
|
||||
static void ext4_mark_recovery_complete(struct super_block *sb,
|
||||
struct ext4_super_block *es);
|
||||
static void ext4_clear_journal_err(struct super_block *sb,
|
||||
|
@ -278,8 +277,7 @@ static const char *ext4_decode_error(struct super_block * sb, int errno,
|
|||
/* __ext4_std_error decodes expected errors from journaling functions
|
||||
* automatically and invokes the appropriate error response. */
|
||||
|
||||
void __ext4_std_error (struct super_block * sb, const char * function,
|
||||
int errno)
|
||||
void __ext4_std_error(struct super_block *sb, const char *function, int errno)
|
||||
{
|
||||
char nbuf[16];
|
||||
const char *errstr;
|
||||
|
@ -647,7 +645,8 @@ static void ext4_clear_inode(struct inode *inode)
|
|||
&EXT4_I(inode)->jinode);
|
||||
}
|
||||
|
||||
static inline void ext4_show_quota_options(struct seq_file *seq, struct super_block *sb)
|
||||
static inline void ext4_show_quota_options(struct seq_file *seq,
|
||||
struct super_block *sb)
|
||||
{
|
||||
#if defined(CONFIG_QUOTA)
|
||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||
|
@ -1506,14 +1505,13 @@ static int ext4_fill_flex_info(struct super_block *sb)
|
|||
|
||||
flex_group_count = (sbi->s_groups_count + groups_per_flex - 1) /
|
||||
groups_per_flex;
|
||||
sbi->s_flex_groups = kmalloc(flex_group_count *
|
||||
sbi->s_flex_groups = kzalloc(flex_group_count *
|
||||
sizeof(struct flex_groups), GFP_KERNEL);
|
||||
if (sbi->s_flex_groups == NULL) {
|
||||
printk(KERN_ERR "EXT4-fs: not enough memory\n");
|
||||
printk(KERN_ERR "EXT4-fs: not enough memory for "
|
||||
"%lu flex groups\n", flex_group_count);
|
||||
goto failed;
|
||||
}
|
||||
memset(sbi->s_flex_groups, 0, flex_group_count *
|
||||
sizeof(struct flex_groups));
|
||||
|
||||
gdp = ext4_get_group_desc(sb, 1, &bh);
|
||||
block_bitmap = ext4_block_bitmap(sb, gdp) - 1;
|
||||
|
@ -1597,16 +1595,14 @@ static int ext4_check_descriptors(struct super_block *sb)
|
|||
(EXT4_BLOCKS_PER_GROUP(sb) - 1);
|
||||
|
||||
block_bitmap = ext4_block_bitmap(sb, gdp);
|
||||
if (block_bitmap < first_block || block_bitmap > last_block)
|
||||
{
|
||||
if (block_bitmap < first_block || block_bitmap > last_block) {
|
||||
printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
|
||||
"Block bitmap for group %lu not in group "
|
||||
"(block %llu)!", i, block_bitmap);
|
||||
return 0;
|
||||
}
|
||||
inode_bitmap = ext4_inode_bitmap(sb, gdp);
|
||||
if (inode_bitmap < first_block || inode_bitmap > last_block)
|
||||
{
|
||||
if (inode_bitmap < first_block || inode_bitmap > last_block) {
|
||||
printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
|
||||
"Inode bitmap for group %lu not in group "
|
||||
"(block %llu)!", i, inode_bitmap);
|
||||
|
@ -1614,20 +1610,22 @@ static int ext4_check_descriptors(struct super_block *sb)
|
|||
}
|
||||
inode_table = ext4_inode_table(sb, gdp);
|
||||
if (inode_table < first_block ||
|
||||
inode_table + sbi->s_itb_per_group - 1 > last_block)
|
||||
{
|
||||
inode_table + sbi->s_itb_per_group - 1 > last_block) {
|
||||
printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
|
||||
"Inode table for group %lu not in group "
|
||||
"(block %llu)!", i, inode_table);
|
||||
return 0;
|
||||
}
|
||||
spin_lock(sb_bgl_lock(sbi, i));
|
||||
if (!ext4_group_desc_csum_verify(sbi, i, gdp)) {
|
||||
printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
|
||||
"Checksum for group %lu failed (%u!=%u)\n",
|
||||
i, le16_to_cpu(ext4_group_desc_csum(sbi, i,
|
||||
gdp)), le16_to_cpu(gdp->bg_checksum));
|
||||
if (!(sb->s_flags & MS_RDONLY))
|
||||
return 0;
|
||||
}
|
||||
spin_unlock(sb_bgl_lock(sbi, i));
|
||||
if (!flexbg_flag)
|
||||
first_block += EXT4_BLOCKS_PER_GROUP(sb);
|
||||
}
|
||||
|
@ -2794,8 +2792,7 @@ static int ext4_create_journal(struct super_block * sb,
|
|||
}
|
||||
|
||||
static void ext4_commit_super(struct super_block *sb,
|
||||
struct ext4_super_block * es,
|
||||
int sync)
|
||||
struct ext4_super_block *es, int sync)
|
||||
{
|
||||
struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
|
||||
|
||||
|
@ -2961,6 +2958,7 @@ static int ext4_remount (struct super_block * sb, int * flags, char * data)
|
|||
ext4_fsblk_t n_blocks_count = 0;
|
||||
unsigned long old_sb_flags;
|
||||
struct ext4_mount_options old_opts;
|
||||
ext4_group_t g;
|
||||
int err;
|
||||
#ifdef CONFIG_QUOTA
|
||||
int i;
|
||||
|
@ -3038,6 +3036,26 @@ static int ext4_remount (struct super_block * sb, int * flags, char * data)
|
|||
goto restore_opts;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure the group descriptor checksums
|
||||
* are sane. If they aren't, refuse to
|
||||
* remount r/w.
|
||||
*/
|
||||
for (g = 0; g < sbi->s_groups_count; g++) {
|
||||
struct ext4_group_desc *gdp =
|
||||
ext4_get_group_desc(sb, g, NULL);
|
||||
|
||||
if (!ext4_group_desc_csum_verify(sbi, g, gdp)) {
|
||||
printk(KERN_ERR
|
||||
"EXT4-fs: ext4_remount: "
|
||||
"Checksum for group %lu failed (%u!=%u)\n",
|
||||
g, le16_to_cpu(ext4_group_desc_csum(sbi, g, gdp)),
|
||||
le16_to_cpu(gdp->bg_checksum));
|
||||
err = -EINVAL;
|
||||
goto restore_opts;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If we have an unprocessed orphan list hanging
|
||||
* around from a previously readonly bdev mount,
|
||||
|
@ -3331,7 +3349,7 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
|
|||
}
|
||||
/* Journaling quota? */
|
||||
if (EXT4_SB(sb)->s_qf_names[type]) {
|
||||
/* Quotafile not of fs root? */
|
||||
/* Quotafile not in fs root? */
|
||||
if (nd.path.dentry->d_parent->d_inode != sb->s_root->d_inode)
|
||||
printk(KERN_WARNING
|
||||
"EXT4-fs: Quota file not on filesystem root. "
|
||||
|
|
|
@ -262,8 +262,18 @@ static int journal_finish_inode_data_buffers(journal_t *journal,
|
|||
jinode->i_flags |= JI_COMMIT_RUNNING;
|
||||
spin_unlock(&journal->j_list_lock);
|
||||
err = filemap_fdatawait(jinode->i_vfs_inode->i_mapping);
|
||||
if (err) {
|
||||
/*
|
||||
* Because AS_EIO is cleared by
|
||||
* wait_on_page_writeback_range(), set it again so
|
||||
* that user process can get -EIO from fsync().
|
||||
*/
|
||||
set_bit(AS_EIO,
|
||||
&jinode->i_vfs_inode->i_mapping->flags);
|
||||
|
||||
if (!ret)
|
||||
ret = err;
|
||||
}
|
||||
spin_lock(&journal->j_list_lock);
|
||||
jinode->i_flags &= ~JI_COMMIT_RUNNING;
|
||||
wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
|
||||
|
@ -670,8 +680,14 @@ start_journal_io:
|
|||
* commit block, which happens below in such setting.
|
||||
*/
|
||||
err = journal_finish_inode_data_buffers(journal, commit_transaction);
|
||||
if (err)
|
||||
jbd2_journal_abort(journal, err);
|
||||
if (err) {
|
||||
char b[BDEVNAME_SIZE];
|
||||
|
||||
printk(KERN_WARNING
|
||||
"JBD2: Detected IO errors while flushing file data "
|
||||
"on %s\n", bdevname(journal->j_fs_dev, b));
|
||||
err = 0;
|
||||
}
|
||||
|
||||
/* Lo and behold: we have just managed to send a transaction to
|
||||
the log. Before we can commit it, wait for the IO so far to
|
||||
|
|
|
@ -68,7 +68,6 @@ EXPORT_SYMBOL(jbd2_journal_set_features);
|
|||
EXPORT_SYMBOL(jbd2_journal_create);
|
||||
EXPORT_SYMBOL(jbd2_journal_load);
|
||||
EXPORT_SYMBOL(jbd2_journal_destroy);
|
||||
EXPORT_SYMBOL(jbd2_journal_update_superblock);
|
||||
EXPORT_SYMBOL(jbd2_journal_abort);
|
||||
EXPORT_SYMBOL(jbd2_journal_errno);
|
||||
EXPORT_SYMBOL(jbd2_journal_ack_err);
|
||||
|
|
Loading…
Reference in New Issue