Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4
* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: (60 commits) ext4: prevent memory leaks from ext4_mb_init_backend() on error path ext4: use EXT4_BAD_INO for buddy cache to avoid colliding with valid inode # ext4: use ext4_msg() instead of printk in mballoc ext4: use ext4_kvzalloc()/ext4_kvmalloc() for s_group_desc and s_group_info ext4: introduce ext4_kvmalloc(), ext4_kzalloc(), and ext4_kvfree() ext4: use the correct error exit path in ext4_init_inode_table() ext4: add missing kfree() on error return path in add_new_gdb() ext4: change umode_t in tracepoint headers to be an explicit __u16 ext4: fix races in ext4_sync_parent() ext4: Fix overflow caused by missing cast in ext4_fallocate() ext4: add action of moving index in ext4_ext_rm_idx for Punch Hole ext4: simplify parameters of reserve_backup_gdb() ext4: simplify parameters of add_new_gdb() ext4: remove lock_buffer in bclean() and setup_new_group_blocks() ext4: simplify journal handling in setup_new_group_blocks() ext4: let setup_new_group_blocks() set multiple bits at a time ext4: fix a typo in ext4_group_extend() ext4: let ext4_group_add_blocks() handle 0 blocks quickly ext4: let ext4_group_add_blocks() return an error code ext4: rename ext4_add_groupblocks() to ext4_group_add_blocks() ... Fix up conflict in fs/ext4/inode.c: commitaacfc19c62
("fs: simplify the blockdev_direct_IO prototype") had changed the ext4_ind_direct_IO() function for the new simplified calling convention, while commitdae1e52cb1
("ext4: move ext4_ind_* functions from inode.c to indirect.c") moved the function to another file.
This commit is contained in:
commit
60ad446682
|
@ -7,7 +7,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
|
|||
ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \
|
||||
ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
|
||||
ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
|
||||
mmp.o
|
||||
mmp.o indirect.o
|
||||
|
||||
ext4-$(CONFIG_EXT4_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
|
||||
ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o
|
||||
|
|
|
@ -620,3 +620,51 @@ unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
|
|||
|
||||
}
|
||||
|
||||
/**
|
||||
* ext4_inode_to_goal_block - return a hint for block allocation
|
||||
* @inode: inode for block allocation
|
||||
*
|
||||
* Return the ideal location to start allocating blocks for a
|
||||
* newly created inode.
|
||||
*/
|
||||
ext4_fsblk_t ext4_inode_to_goal_block(struct inode *inode)
|
||||
{
|
||||
struct ext4_inode_info *ei = EXT4_I(inode);
|
||||
ext4_group_t block_group;
|
||||
ext4_grpblk_t colour;
|
||||
int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
|
||||
ext4_fsblk_t bg_start;
|
||||
ext4_fsblk_t last_block;
|
||||
|
||||
block_group = ei->i_block_group;
|
||||
if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
|
||||
/*
|
||||
* If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
|
||||
* block groups per flexgroup, reserve the first block
|
||||
* group for directories and special files. Regular
|
||||
* files will start at the second block group. This
|
||||
* tends to speed up directory access and improves
|
||||
* fsck times.
|
||||
*/
|
||||
block_group &= ~(flex_size-1);
|
||||
if (S_ISREG(inode->i_mode))
|
||||
block_group++;
|
||||
}
|
||||
bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
|
||||
last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
|
||||
|
||||
/*
|
||||
* If we are doing delayed allocation, we don't need take
|
||||
* colour into account.
|
||||
*/
|
||||
if (test_opt(inode->i_sb, DELALLOC))
|
||||
return bg_start;
|
||||
|
||||
if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
|
||||
colour = (current->pid % 16) *
|
||||
(EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
|
||||
else
|
||||
colour = (current->pid % 16) * ((last_block - bg_start) / 16);
|
||||
return bg_start + colour;
|
||||
}
|
||||
|
||||
|
|
|
@ -246,3 +246,24 @@ int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk,
|
|||
return 1;
|
||||
}
|
||||
|
||||
int ext4_check_blockref(const char *function, unsigned int line,
|
||||
struct inode *inode, __le32 *p, unsigned int max)
|
||||
{
|
||||
struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
|
||||
__le32 *bref = p;
|
||||
unsigned int blk;
|
||||
|
||||
while (bref < p+max) {
|
||||
blk = le32_to_cpu(*bref++);
|
||||
if (blk &&
|
||||
unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
|
||||
blk, 1))) {
|
||||
es->s_last_error_block = cpu_to_le64(blk);
|
||||
ext4_error_inode(inode, function, line, blk,
|
||||
"invalid block");
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -526,6 +526,7 @@ struct ext4_new_group_data {
|
|||
#define EXT4_FREE_BLOCKS_METADATA 0x0001
|
||||
#define EXT4_FREE_BLOCKS_FORGET 0x0002
|
||||
#define EXT4_FREE_BLOCKS_VALIDATED 0x0004
|
||||
#define EXT4_FREE_BLOCKS_NO_QUOT_UPDATE 0x0008
|
||||
|
||||
/*
|
||||
* ioctl commands
|
||||
|
@ -939,6 +940,8 @@ struct ext4_inode_info {
|
|||
#define ext4_find_next_zero_bit find_next_zero_bit_le
|
||||
#define ext4_find_next_bit find_next_bit_le
|
||||
|
||||
extern void ext4_set_bits(void *bm, int cur, int len);
|
||||
|
||||
/*
|
||||
* Maximal mount counts between two filesystem checks
|
||||
*/
|
||||
|
@ -1126,7 +1129,8 @@ struct ext4_sb_info {
|
|||
struct journal_s *s_journal;
|
||||
struct list_head s_orphan;
|
||||
struct mutex s_orphan_lock;
|
||||
struct mutex s_resize_lock;
|
||||
unsigned long s_resize_flags; /* Flags indicating if there
|
||||
is a resizer */
|
||||
unsigned long s_commit_interval;
|
||||
u32 s_max_batch_time;
|
||||
u32 s_min_batch_time;
|
||||
|
@ -1214,6 +1218,9 @@ struct ext4_sb_info {
|
|||
|
||||
/* Kernel thread for multiple mount protection */
|
||||
struct task_struct *s_mmp_tsk;
|
||||
|
||||
/* record the last minlen when FITRIM is called. */
|
||||
atomic_t s_last_trim_minblks;
|
||||
};
|
||||
|
||||
static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
|
||||
|
@ -1743,6 +1750,7 @@ extern unsigned ext4_init_block_bitmap(struct super_block *sb,
|
|||
struct ext4_group_desc *desc);
|
||||
#define ext4_free_blocks_after_init(sb, group, desc) \
|
||||
ext4_init_block_bitmap(sb, NULL, group, desc)
|
||||
ext4_fsblk_t ext4_inode_to_goal_block(struct inode *);
|
||||
|
||||
/* dir.c */
|
||||
extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *,
|
||||
|
@ -1793,7 +1801,7 @@ extern void ext4_free_blocks(handle_t *handle, struct inode *inode,
|
|||
unsigned long count, int flags);
|
||||
extern int ext4_mb_add_groupinfo(struct super_block *sb,
|
||||
ext4_group_t i, struct ext4_group_desc *desc);
|
||||
extern void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
|
||||
extern int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
|
||||
ext4_fsblk_t block, unsigned long count);
|
||||
extern int ext4_trim_fs(struct super_block *, struct fstrim_range *);
|
||||
|
||||
|
@ -1834,6 +1842,17 @@ extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
|
|||
extern qsize_t *ext4_get_reserved_space(struct inode *inode);
|
||||
extern void ext4_da_update_reserve_space(struct inode *inode,
|
||||
int used, int quota_claim);
|
||||
|
||||
/* indirect.c */
|
||||
extern int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
|
||||
struct ext4_map_blocks *map, int flags);
|
||||
extern ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
|
||||
const struct iovec *iov, loff_t offset,
|
||||
unsigned long nr_segs);
|
||||
extern int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock);
|
||||
extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks, int chunk);
|
||||
extern void ext4_ind_truncate(struct inode *inode);
|
||||
|
||||
/* ioctl.c */
|
||||
extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
|
||||
extern long ext4_compat_ioctl(struct file *, unsigned int, unsigned long);
|
||||
|
@ -1855,6 +1874,9 @@ extern int ext4_group_extend(struct super_block *sb,
|
|||
ext4_fsblk_t n_blocks_count);
|
||||
|
||||
/* super.c */
|
||||
extern void *ext4_kvmalloc(size_t size, gfp_t flags);
|
||||
extern void *ext4_kvzalloc(size_t size, gfp_t flags);
|
||||
extern void ext4_kvfree(void *ptr);
|
||||
extern void __ext4_error(struct super_block *, const char *, unsigned int,
|
||||
const char *, ...)
|
||||
__attribute__ ((format (printf, 4, 5)));
|
||||
|
@ -2067,11 +2089,19 @@ struct ext4_group_info {
|
|||
* 5 free 8-block regions. */
|
||||
};
|
||||
|
||||
#define EXT4_GROUP_INFO_NEED_INIT_BIT 0
|
||||
#define EXT4_GROUP_INFO_NEED_INIT_BIT 0
|
||||
#define EXT4_GROUP_INFO_WAS_TRIMMED_BIT 1
|
||||
|
||||
#define EXT4_MB_GRP_NEED_INIT(grp) \
|
||||
(test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state)))
|
||||
|
||||
#define EXT4_MB_GRP_WAS_TRIMMED(grp) \
|
||||
(test_bit(EXT4_GROUP_INFO_WAS_TRIMMED_BIT, &((grp)->bb_state)))
|
||||
#define EXT4_MB_GRP_SET_TRIMMED(grp) \
|
||||
(set_bit(EXT4_GROUP_INFO_WAS_TRIMMED_BIT, &((grp)->bb_state)))
|
||||
#define EXT4_MB_GRP_CLEAR_TRIMMED(grp) \
|
||||
(clear_bit(EXT4_GROUP_INFO_WAS_TRIMMED_BIT, &((grp)->bb_state)))
|
||||
|
||||
#define EXT4_MAX_CONTENTION 8
|
||||
#define EXT4_CONTENTION_THRESHOLD 2
|
||||
|
||||
|
@ -2122,6 +2152,19 @@ static inline void ext4_mark_super_dirty(struct super_block *sb)
|
|||
sb->s_dirt =1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Block validity checking
|
||||
*/
|
||||
#define ext4_check_indirect_blockref(inode, bh) \
|
||||
ext4_check_blockref(__func__, __LINE__, inode, \
|
||||
(__le32 *)(bh)->b_data, \
|
||||
EXT4_ADDR_PER_BLOCK((inode)->i_sb))
|
||||
|
||||
#define ext4_ind_check_inode(inode) \
|
||||
ext4_check_blockref(__func__, __LINE__, inode, \
|
||||
EXT4_I(inode)->i_data, \
|
||||
EXT4_NDIR_BLOCKS)
|
||||
|
||||
/*
|
||||
* Inodes and files operations
|
||||
*/
|
||||
|
@ -2151,6 +2194,8 @@ extern void ext4_exit_system_zone(void);
|
|||
extern int ext4_data_block_valid(struct ext4_sb_info *sbi,
|
||||
ext4_fsblk_t start_blk,
|
||||
unsigned int count);
|
||||
extern int ext4_check_blockref(const char *, unsigned int,
|
||||
struct inode *, __le32 *, unsigned int);
|
||||
|
||||
/* extents.c */
|
||||
extern int ext4_ext_tree_init(handle_t *handle, struct inode *);
|
||||
|
@ -2230,6 +2275,10 @@ static inline void set_bitmap_uptodate(struct buffer_head *bh)
|
|||
extern wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];
|
||||
extern struct mutex ext4__aio_mutex[EXT4_WQ_HASH_SZ];
|
||||
|
||||
#define EXT4_RESIZING 0
|
||||
extern int ext4_resize_begin(struct super_block *sb);
|
||||
extern void ext4_resize_end(struct super_block *sb);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _EXT4_H */
|
||||
|
|
|
@ -114,12 +114,6 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
|
|||
struct ext4_ext_path *path,
|
||||
ext4_lblk_t block)
|
||||
{
|
||||
struct ext4_inode_info *ei = EXT4_I(inode);
|
||||
ext4_fsblk_t bg_start;
|
||||
ext4_fsblk_t last_block;
|
||||
ext4_grpblk_t colour;
|
||||
ext4_group_t block_group;
|
||||
int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
|
||||
int depth;
|
||||
|
||||
if (path) {
|
||||
|
@ -161,36 +155,7 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
|
|||
}
|
||||
|
||||
/* OK. use inode's group */
|
||||
block_group = ei->i_block_group;
|
||||
if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
|
||||
/*
|
||||
* If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
|
||||
* block groups per flexgroup, reserve the first block
|
||||
* group for directories and special files. Regular
|
||||
* files will start at the second block group. This
|
||||
* tends to speed up directory access and improves
|
||||
* fsck times.
|
||||
*/
|
||||
block_group &= ~(flex_size-1);
|
||||
if (S_ISREG(inode->i_mode))
|
||||
block_group++;
|
||||
}
|
||||
bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
|
||||
last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
|
||||
|
||||
/*
|
||||
* If we are doing delayed allocation, we don't need take
|
||||
* colour into account.
|
||||
*/
|
||||
if (test_opt(inode->i_sb, DELALLOC))
|
||||
return bg_start;
|
||||
|
||||
if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
|
||||
colour = (current->pid % 16) *
|
||||
(EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
|
||||
else
|
||||
colour = (current->pid % 16) * ((last_block - bg_start) / 16);
|
||||
return bg_start + colour + block;
|
||||
return ext4_inode_to_goal_block(inode);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -776,6 +741,16 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
|
|||
logical, le32_to_cpu(curp->p_idx->ei_block));
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
|
||||
>= le16_to_cpu(curp->p_hdr->eh_max))) {
|
||||
EXT4_ERROR_INODE(inode,
|
||||
"eh_entries %d >= eh_max %d!",
|
||||
le16_to_cpu(curp->p_hdr->eh_entries),
|
||||
le16_to_cpu(curp->p_hdr->eh_max));
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
|
||||
if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
|
||||
/* insert after */
|
||||
|
@ -805,13 +780,6 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
|
|||
ext4_idx_store_pblock(ix, ptr);
|
||||
le16_add_cpu(&curp->p_hdr->eh_entries, 1);
|
||||
|
||||
if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
|
||||
> le16_to_cpu(curp->p_hdr->eh_max))) {
|
||||
EXT4_ERROR_INODE(inode,
|
||||
"logical %d == ei_block %d!",
|
||||
logical, le32_to_cpu(curp->p_idx->ei_block));
|
||||
return -EIO;
|
||||
}
|
||||
if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
|
||||
EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
|
||||
return -EIO;
|
||||
|
@ -1446,8 +1414,7 @@ ext4_ext_next_allocated_block(struct ext4_ext_path *path)
|
|||
* ext4_ext_next_leaf_block:
|
||||
* returns first allocated block from next leaf or EXT_MAX_BLOCKS
|
||||
*/
|
||||
static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
|
||||
struct ext4_ext_path *path)
|
||||
static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
|
||||
{
|
||||
int depth;
|
||||
|
||||
|
@ -1757,7 +1724,6 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
|
|||
goto merge;
|
||||
}
|
||||
|
||||
repeat:
|
||||
depth = ext_depth(inode);
|
||||
eh = path[depth].p_hdr;
|
||||
if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
|
||||
|
@ -1765,9 +1731,10 @@ repeat:
|
|||
|
||||
/* probably next leaf has space for us? */
|
||||
fex = EXT_LAST_EXTENT(eh);
|
||||
next = ext4_ext_next_leaf_block(inode, path);
|
||||
if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
|
||||
&& next != EXT_MAX_BLOCKS) {
|
||||
next = EXT_MAX_BLOCKS;
|
||||
if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
|
||||
next = ext4_ext_next_leaf_block(path);
|
||||
if (next != EXT_MAX_BLOCKS) {
|
||||
ext_debug("next leaf block - %d\n", next);
|
||||
BUG_ON(npath != NULL);
|
||||
npath = ext4_ext_find_extent(inode, next, NULL);
|
||||
|
@ -1779,7 +1746,7 @@ repeat:
|
|||
ext_debug("next leaf isn't full(%d)\n",
|
||||
le16_to_cpu(eh->eh_entries));
|
||||
path = npath;
|
||||
goto repeat;
|
||||
goto has_space;
|
||||
}
|
||||
ext_debug("next leaf has no free space(%d,%d)\n",
|
||||
le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
|
||||
|
@ -1839,7 +1806,7 @@ has_space:
|
|||
ext4_ext_pblock(newext),
|
||||
ext4_ext_is_uninitialized(newext),
|
||||
ext4_ext_get_actual_len(newext),
|
||||
nearex, len, nearex + 1, nearex + 2);
|
||||
nearex, len, nearex, nearex + 1);
|
||||
memmove(nearex + 1, nearex, len);
|
||||
path[depth].p_ext = nearex;
|
||||
}
|
||||
|
@ -2052,7 +2019,7 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
|
|||
}
|
||||
|
||||
/*
|
||||
* ext4_ext_in_cache()
|
||||
* ext4_ext_check_cache()
|
||||
* Checks to see if the given block is in the cache.
|
||||
* If it is, the cached extent is stored in the given
|
||||
* cache extent pointer. If the cached extent is a hole,
|
||||
|
@ -2134,8 +2101,6 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
|
|||
/*
|
||||
* ext4_ext_rm_idx:
|
||||
* removes index from the index block.
|
||||
* It's used in truncate case only, thus all requests are for
|
||||
* last index in the block only.
|
||||
*/
|
||||
static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
|
||||
struct ext4_ext_path *path)
|
||||
|
@ -2153,6 +2118,13 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
|
|||
err = ext4_ext_get_access(handle, inode, path);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
|
||||
int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
|
||||
len *= sizeof(struct ext4_extent_idx);
|
||||
memmove(path->p_idx, path->p_idx + 1, len);
|
||||
}
|
||||
|
||||
le16_add_cpu(&path->p_hdr->eh_entries, -1);
|
||||
err = ext4_ext_dirty(handle, inode, path);
|
||||
if (err)
|
||||
|
@ -2534,8 +2506,7 @@ ext4_ext_more_to_rm(struct ext4_ext_path *path)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
|
||||
ext4_lblk_t end)
|
||||
static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
int depth = ext_depth(inode);
|
||||
|
@ -2575,7 +2546,7 @@ again:
|
|||
if (i == depth) {
|
||||
/* this is leaf block */
|
||||
err = ext4_ext_rm_leaf(handle, inode, path,
|
||||
start, end);
|
||||
start, EXT_MAX_BLOCKS - 1);
|
||||
/* root level has p_bh == NULL, brelse() eats this */
|
||||
brelse(path[i].p_bh);
|
||||
path[i].p_bh = NULL;
|
||||
|
@ -3107,12 +3078,10 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
|
|||
struct ext4_ext_path *path)
|
||||
{
|
||||
struct ext4_extent *ex;
|
||||
struct ext4_extent_header *eh;
|
||||
int depth;
|
||||
int err = 0;
|
||||
|
||||
depth = ext_depth(inode);
|
||||
eh = path[depth].p_hdr;
|
||||
ex = path[depth].p_ext;
|
||||
|
||||
ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
|
||||
|
@ -3357,8 +3326,8 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
|
|||
trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
|
||||
|
||||
/* check in cache */
|
||||
if (ext4_ext_in_cache(inode, map->m_lblk, &newex) &&
|
||||
((flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) == 0)) {
|
||||
if (!(flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) &&
|
||||
ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
|
||||
if (!newex.ee_start_lo && !newex.ee_start_hi) {
|
||||
if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
|
||||
/*
|
||||
|
@ -3497,8 +3466,27 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
|
|||
|
||||
ext4_ext_mark_uninitialized(ex);
|
||||
|
||||
err = ext4_ext_remove_space(inode, map->m_lblk,
|
||||
map->m_lblk + punched_out);
|
||||
ext4_ext_invalidate_cache(inode);
|
||||
|
||||
err = ext4_ext_rm_leaf(handle, inode, path,
|
||||
map->m_lblk, map->m_lblk + punched_out);
|
||||
|
||||
if (!err && path->p_hdr->eh_entries == 0) {
|
||||
/*
|
||||
* Punch hole freed all of this sub tree,
|
||||
* so we need to correct eh_depth
|
||||
*/
|
||||
err = ext4_ext_get_access(handle, inode, path);
|
||||
if (err == 0) {
|
||||
ext_inode_hdr(inode)->eh_depth = 0;
|
||||
ext_inode_hdr(inode)->eh_max =
|
||||
cpu_to_le16(ext4_ext_space_root(
|
||||
inode, 0));
|
||||
|
||||
err = ext4_ext_dirty(
|
||||
handle, inode, path);
|
||||
}
|
||||
}
|
||||
|
||||
goto out2;
|
||||
}
|
||||
|
@ -3596,17 +3584,18 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
|
|||
}
|
||||
|
||||
err = check_eofblocks_fl(handle, inode, map->m_lblk, path, ar.len);
|
||||
if (err)
|
||||
goto out2;
|
||||
|
||||
err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
|
||||
if (!err)
|
||||
err = ext4_ext_insert_extent(handle, inode, path,
|
||||
&newex, flags);
|
||||
if (err) {
|
||||
int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ?
|
||||
EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0;
|
||||
/* free data blocks we just allocated */
|
||||
/* not a good idea to call discard here directly,
|
||||
* but otherwise we'd need to call it every free() */
|
||||
ext4_discard_preallocations(inode);
|
||||
ext4_free_blocks(handle, inode, NULL, ext4_ext_pblock(&newex),
|
||||
ext4_ext_get_actual_len(&newex), 0);
|
||||
ext4_ext_get_actual_len(&newex), fb_flags);
|
||||
goto out2;
|
||||
}
|
||||
|
||||
|
@ -3699,7 +3688,7 @@ void ext4_ext_truncate(struct inode *inode)
|
|||
|
||||
last_block = (inode->i_size + sb->s_blocksize - 1)
|
||||
>> EXT4_BLOCK_SIZE_BITS(sb);
|
||||
err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
|
||||
err = ext4_ext_remove_space(inode, last_block);
|
||||
|
||||
/* In a multi-transaction truncate, we only make the final
|
||||
* transaction synchronous.
|
||||
|
@ -3835,7 +3824,7 @@ retry:
|
|||
blkbits) >> blkbits))
|
||||
new_size = offset + len;
|
||||
else
|
||||
new_size = (map.m_lblk + ret) << blkbits;
|
||||
new_size = ((loff_t) map.m_lblk + ret) << blkbits;
|
||||
|
||||
ext4_falloc_update_inode(inode, mode, new_size,
|
||||
(map.m_flags & EXT4_MAP_NEW));
|
||||
|
|
|
@ -129,15 +129,30 @@ static int ext4_sync_parent(struct inode *inode)
|
|||
{
|
||||
struct writeback_control wbc;
|
||||
struct dentry *dentry = NULL;
|
||||
struct inode *next;
|
||||
int ret = 0;
|
||||
|
||||
while (inode && ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) {
|
||||
if (!ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY))
|
||||
return 0;
|
||||
inode = igrab(inode);
|
||||
while (ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) {
|
||||
ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY);
|
||||
dentry = list_entry(inode->i_dentry.next,
|
||||
struct dentry, d_alias);
|
||||
if (!dentry || !dentry->d_parent || !dentry->d_parent->d_inode)
|
||||
dentry = NULL;
|
||||
spin_lock(&inode->i_lock);
|
||||
if (!list_empty(&inode->i_dentry)) {
|
||||
dentry = list_first_entry(&inode->i_dentry,
|
||||
struct dentry, d_alias);
|
||||
dget(dentry);
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
if (!dentry)
|
||||
break;
|
||||
inode = dentry->d_parent->d_inode;
|
||||
next = igrab(dentry->d_parent->d_inode);
|
||||
dput(dentry);
|
||||
if (!next)
|
||||
break;
|
||||
iput(inode);
|
||||
inode = next;
|
||||
ret = sync_mapping_buffers(inode->i_mapping);
|
||||
if (ret)
|
||||
break;
|
||||
|
@ -148,6 +163,7 @@ static int ext4_sync_parent(struct inode *inode)
|
|||
if (ret)
|
||||
break;
|
||||
}
|
||||
iput(inode);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -1287,7 +1287,7 @@ extern int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
|
|||
group, used_blks,
|
||||
ext4_itable_unused_count(sb, gdp));
|
||||
ret = 1;
|
||||
goto out;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
blk = ext4_inode_table(sb, gdp) + used_blks;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
1596
fs/ext4/inode.c
1596
fs/ext4/inode.c
File diff suppressed because it is too large
Load Diff
|
@ -202,8 +202,9 @@ setversion_out:
|
|||
struct super_block *sb = inode->i_sb;
|
||||
int err, err2=0;
|
||||
|
||||
if (!capable(CAP_SYS_RESOURCE))
|
||||
return -EPERM;
|
||||
err = ext4_resize_begin(sb);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (get_user(n_blocks_count, (__u32 __user *)arg))
|
||||
return -EFAULT;
|
||||
|
@ -221,6 +222,7 @@ setversion_out:
|
|||
if (err == 0)
|
||||
err = err2;
|
||||
mnt_drop_write(filp->f_path.mnt);
|
||||
ext4_resize_end(sb);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -271,8 +273,9 @@ mext_out:
|
|||
struct super_block *sb = inode->i_sb;
|
||||
int err, err2=0;
|
||||
|
||||
if (!capable(CAP_SYS_RESOURCE))
|
||||
return -EPERM;
|
||||
err = ext4_resize_begin(sb);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (copy_from_user(&input, (struct ext4_new_group_input __user *)arg,
|
||||
sizeof(input)))
|
||||
|
@ -291,6 +294,7 @@ mext_out:
|
|||
if (err == 0)
|
||||
err = err2;
|
||||
mnt_drop_write(filp->f_path.mnt);
|
||||
ext4_resize_end(sb);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -75,8 +75,8 @@
|
|||
*
|
||||
* The inode preallocation space is used looking at the _logical_ start
|
||||
* block. If only the logical file block falls within the range of prealloc
|
||||
* space we will consume the particular prealloc space. This make sure that
|
||||
* that the we have contiguous physical blocks representing the file blocks
|
||||
* space we will consume the particular prealloc space. This makes sure that
|
||||
* we have contiguous physical blocks representing the file blocks
|
||||
*
|
||||
* The important thing to be noted in case of inode prealloc space is that
|
||||
* we don't modify the values associated to inode prealloc space except
|
||||
|
@ -84,7 +84,7 @@
|
|||
*
|
||||
* If we are not able to find blocks in the inode prealloc space and if we
|
||||
* have the group allocation flag set then we look at the locality group
|
||||
* prealloc space. These are per CPU prealloc list repreasented as
|
||||
* prealloc space. These are per CPU prealloc list represented as
|
||||
*
|
||||
* ext4_sb_info.s_locality_groups[smp_processor_id()]
|
||||
*
|
||||
|
@ -128,12 +128,13 @@
|
|||
* we are doing a group prealloc we try to normalize the request to
|
||||
* sbi->s_mb_group_prealloc. Default value of s_mb_group_prealloc is
|
||||
* 512 blocks. This can be tuned via
|
||||
* /sys/fs/ext4/<partition/mb_group_prealloc. The value is represented in
|
||||
* /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
|
||||
* terms of number of blocks. If we have mounted the file system with -O
|
||||
* stripe=<value> option the group prealloc request is normalized to the
|
||||
* stripe value (sbi->s_stripe)
|
||||
* the smallest multiple of the stripe value (sbi->s_stripe) which is
|
||||
* greater than the default mb_group_prealloc.
|
||||
*
|
||||
* The regular allocator(using the buddy cache) supports few tunables.
|
||||
* The regular allocator (using the buddy cache) supports a few tunables.
|
||||
*
|
||||
* /sys/fs/ext4/<partition>/mb_min_to_scan
|
||||
* /sys/fs/ext4/<partition>/mb_max_to_scan
|
||||
|
@ -152,7 +153,7 @@
|
|||
* best extent in the found extents. Searching for the blocks starts with
|
||||
* the group specified as the goal value in allocation context via
|
||||
* ac_g_ex. Each group is first checked based on the criteria whether it
|
||||
* can used for allocation. ext4_mb_good_group explains how the groups are
|
||||
* can be used for allocation. ext4_mb_good_group explains how the groups are
|
||||
* checked.
|
||||
*
|
||||
* Both the prealloc space are getting populated as above. So for the first
|
||||
|
@ -492,10 +493,11 @@ static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
|
|||
b2 = (unsigned char *) bitmap;
|
||||
for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
|
||||
if (b1[i] != b2[i]) {
|
||||
printk(KERN_ERR "corruption in group %u "
|
||||
"at byte %u(%u): %x in copy != %x "
|
||||
"on disk/prealloc\n",
|
||||
e4b->bd_group, i, i * 8, b1[i], b2[i]);
|
||||
ext4_msg(e4b->bd_sb, KERN_ERR,
|
||||
"corruption in group %u "
|
||||
"at byte %u(%u): %x in copy != %x "
|
||||
"on disk/prealloc",
|
||||
e4b->bd_group, i, i * 8, b1[i], b2[i]);
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
@ -1125,7 +1127,7 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
|
|||
grp = ext4_get_group_info(sb, group);
|
||||
|
||||
e4b->bd_blkbits = sb->s_blocksize_bits;
|
||||
e4b->bd_info = ext4_get_group_info(sb, group);
|
||||
e4b->bd_info = grp;
|
||||
e4b->bd_sb = sb;
|
||||
e4b->bd_group = group;
|
||||
e4b->bd_buddy_page = NULL;
|
||||
|
@ -1281,7 +1283,7 @@ static void mb_clear_bits(void *bm, int cur, int len)
|
|||
}
|
||||
}
|
||||
|
||||
static void mb_set_bits(void *bm, int cur, int len)
|
||||
void ext4_set_bits(void *bm, int cur, int len)
|
||||
{
|
||||
__u32 *addr;
|
||||
|
||||
|
@ -1510,7 +1512,7 @@ static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
|
|||
}
|
||||
mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
|
||||
|
||||
mb_set_bits(EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
|
||||
ext4_set_bits(EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
|
||||
mb_check_buddy(e4b);
|
||||
|
||||
return ret;
|
||||
|
@ -2223,8 +2225,8 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
|
|||
EXT4_DESC_PER_BLOCK_BITS(sb);
|
||||
meta_group_info = kmalloc(metalen, GFP_KERNEL);
|
||||
if (meta_group_info == NULL) {
|
||||
printk(KERN_ERR "EXT4-fs: can't allocate mem for a "
|
||||
"buddy group\n");
|
||||
ext4_msg(sb, KERN_ERR, "EXT4-fs: can't allocate mem "
|
||||
"for a buddy group");
|
||||
goto exit_meta_group_info;
|
||||
}
|
||||
sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
|
||||
|
@ -2237,7 +2239,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
|
|||
|
||||
meta_group_info[i] = kmem_cache_alloc(cachep, GFP_KERNEL);
|
||||
if (meta_group_info[i] == NULL) {
|
||||
printk(KERN_ERR "EXT4-fs: can't allocate buddy mem\n");
|
||||
ext4_msg(sb, KERN_ERR, "EXT4-fs: can't allocate buddy mem");
|
||||
goto exit_group_info;
|
||||
}
|
||||
memset(meta_group_info[i], 0, kmem_cache_size(cachep));
|
||||
|
@ -2279,8 +2281,10 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
|
|||
|
||||
exit_group_info:
|
||||
/* If a meta_group_info table has been allocated, release it now */
|
||||
if (group % EXT4_DESC_PER_BLOCK(sb) == 0)
|
||||
if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
|
||||
kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
|
||||
sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL;
|
||||
}
|
||||
exit_meta_group_info:
|
||||
return -ENOMEM;
|
||||
} /* ext4_mb_add_groupinfo */
|
||||
|
@ -2328,23 +2332,26 @@ static int ext4_mb_init_backend(struct super_block *sb)
|
|||
/* An 8TB filesystem with 64-bit pointers requires a 4096 byte
|
||||
* kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
|
||||
* So a two level scheme suffices for now. */
|
||||
sbi->s_group_info = kzalloc(array_size, GFP_KERNEL);
|
||||
sbi->s_group_info = ext4_kvzalloc(array_size, GFP_KERNEL);
|
||||
if (sbi->s_group_info == NULL) {
|
||||
printk(KERN_ERR "EXT4-fs: can't allocate buddy meta group\n");
|
||||
ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
|
||||
return -ENOMEM;
|
||||
}
|
||||
sbi->s_buddy_cache = new_inode(sb);
|
||||
if (sbi->s_buddy_cache == NULL) {
|
||||
printk(KERN_ERR "EXT4-fs: can't get new inode\n");
|
||||
ext4_msg(sb, KERN_ERR, "can't get new inode");
|
||||
goto err_freesgi;
|
||||
}
|
||||
sbi->s_buddy_cache->i_ino = get_next_ino();
|
||||
/* To avoid potentially colliding with an valid on-disk inode number,
|
||||
* use EXT4_BAD_INO for the buddy cache inode number. This inode is
|
||||
* not in the inode hash, so it should never be found by iget(), but
|
||||
* this will avoid confusion if it ever shows up during debugging. */
|
||||
sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
|
||||
EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
|
||||
for (i = 0; i < ngroups; i++) {
|
||||
desc = ext4_get_group_desc(sb, i, NULL);
|
||||
if (desc == NULL) {
|
||||
printk(KERN_ERR
|
||||
"EXT4-fs: can't read descriptor %u\n", i);
|
||||
ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i);
|
||||
goto err_freebuddy;
|
||||
}
|
||||
if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
|
||||
|
@ -2362,7 +2369,7 @@ err_freebuddy:
|
|||
kfree(sbi->s_group_info[i]);
|
||||
iput(sbi->s_buddy_cache);
|
||||
err_freesgi:
|
||||
kfree(sbi->s_group_info);
|
||||
ext4_kvfree(sbi->s_group_info);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -2404,14 +2411,15 @@ static int ext4_groupinfo_create_slab(size_t size)
|
|||
slab_size, 0, SLAB_RECLAIM_ACCOUNT,
|
||||
NULL);
|
||||
|
||||
ext4_groupinfo_caches[cache_index] = cachep;
|
||||
|
||||
mutex_unlock(&ext4_grpinfo_slab_create_mutex);
|
||||
if (!cachep) {
|
||||
printk(KERN_EMERG "EXT4: no memory for groupinfo slab cache\n");
|
||||
printk(KERN_EMERG
|
||||
"EXT4-fs: no memory for groupinfo slab cache\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ext4_groupinfo_caches[cache_index] = cachep;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2457,12 +2465,6 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
|
|||
i++;
|
||||
} while (i <= sb->s_blocksize_bits + 1);
|
||||
|
||||
/* init file for buddy data */
|
||||
ret = ext4_mb_init_backend(sb);
|
||||
if (ret != 0) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
spin_lock_init(&sbi->s_md_lock);
|
||||
spin_lock_init(&sbi->s_bal_lock);
|
||||
|
||||
|
@ -2472,6 +2474,18 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
|
|||
sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
|
||||
sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
|
||||
sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC;
|
||||
/*
|
||||
* If there is a s_stripe > 1, then we set the s_mb_group_prealloc
|
||||
* to the lowest multiple of s_stripe which is bigger than
|
||||
* the s_mb_group_prealloc as determined above. We want
|
||||
* the preallocation size to be an exact multiple of the
|
||||
* RAID stripe size so that preallocations don't fragment
|
||||
* the stripes.
|
||||
*/
|
||||
if (sbi->s_stripe > 1) {
|
||||
sbi->s_mb_group_prealloc = roundup(
|
||||
sbi->s_mb_group_prealloc, sbi->s_stripe);
|
||||
}
|
||||
|
||||
sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
|
||||
if (sbi->s_locality_groups == NULL) {
|
||||
|
@ -2487,6 +2501,12 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
|
|||
spin_lock_init(&lg->lg_prealloc_lock);
|
||||
}
|
||||
|
||||
/* init file for buddy data */
|
||||
ret = ext4_mb_init_backend(sb);
|
||||
if (ret != 0) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (sbi->s_proc)
|
||||
proc_create_data("mb_groups", S_IRUGO, sbi->s_proc,
|
||||
&ext4_mb_seq_groups_fops, sb);
|
||||
|
@ -2544,32 +2564,32 @@ int ext4_mb_release(struct super_block *sb)
|
|||
EXT4_DESC_PER_BLOCK_BITS(sb);
|
||||
for (i = 0; i < num_meta_group_infos; i++)
|
||||
kfree(sbi->s_group_info[i]);
|
||||
kfree(sbi->s_group_info);
|
||||
ext4_kvfree(sbi->s_group_info);
|
||||
}
|
||||
kfree(sbi->s_mb_offsets);
|
||||
kfree(sbi->s_mb_maxs);
|
||||
if (sbi->s_buddy_cache)
|
||||
iput(sbi->s_buddy_cache);
|
||||
if (sbi->s_mb_stats) {
|
||||
printk(KERN_INFO
|
||||
"EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
|
||||
ext4_msg(sb, KERN_INFO,
|
||||
"mballoc: %u blocks %u reqs (%u success)",
|
||||
atomic_read(&sbi->s_bal_allocated),
|
||||
atomic_read(&sbi->s_bal_reqs),
|
||||
atomic_read(&sbi->s_bal_success));
|
||||
printk(KERN_INFO
|
||||
"EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
|
||||
"%u 2^N hits, %u breaks, %u lost\n",
|
||||
ext4_msg(sb, KERN_INFO,
|
||||
"mballoc: %u extents scanned, %u goal hits, "
|
||||
"%u 2^N hits, %u breaks, %u lost",
|
||||
atomic_read(&sbi->s_bal_ex_scanned),
|
||||
atomic_read(&sbi->s_bal_goals),
|
||||
atomic_read(&sbi->s_bal_2orders),
|
||||
atomic_read(&sbi->s_bal_breaks),
|
||||
atomic_read(&sbi->s_mb_lost_chunks));
|
||||
printk(KERN_INFO
|
||||
"EXT4-fs: mballoc: %lu generated and it took %Lu\n",
|
||||
sbi->s_mb_buddies_generated++,
|
||||
ext4_msg(sb, KERN_INFO,
|
||||
"mballoc: %lu generated and it took %Lu",
|
||||
sbi->s_mb_buddies_generated,
|
||||
sbi->s_mb_generation_time);
|
||||
printk(KERN_INFO
|
||||
"EXT4-fs: mballoc: %u preallocated, %u discarded\n",
|
||||
ext4_msg(sb, KERN_INFO,
|
||||
"mballoc: %u preallocated, %u discarded",
|
||||
atomic_read(&sbi->s_mb_preallocated),
|
||||
atomic_read(&sbi->s_mb_discarded));
|
||||
}
|
||||
|
@ -2628,6 +2648,15 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
|
|||
rb_erase(&entry->node, &(db->bb_free_root));
|
||||
mb_free_blocks(NULL, &e4b, entry->start_blk, entry->count);
|
||||
|
||||
/*
|
||||
* Clear the trimmed flag for the group so that the next
|
||||
* ext4_trim_fs can trim it.
|
||||
* If the volume is mounted with -o discard, online discard
|
||||
* is supported and the free blocks will be trimmed online.
|
||||
*/
|
||||
if (!test_opt(sb, DISCARD))
|
||||
EXT4_MB_GRP_CLEAR_TRIMMED(db);
|
||||
|
||||
if (!db->bb_free_root.rb_node) {
|
||||
/* No more items in the per group rb tree
|
||||
* balance refcounts from ext4_mb_free_metadata()
|
||||
|
@ -2771,8 +2800,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
|
|||
* We leak some of the blocks here.
|
||||
*/
|
||||
ext4_lock_group(sb, ac->ac_b_ex.fe_group);
|
||||
mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
|
||||
ac->ac_b_ex.fe_len);
|
||||
ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
|
||||
ac->ac_b_ex.fe_len);
|
||||
ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
|
||||
err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
|
||||
if (!err)
|
||||
|
@ -2790,7 +2819,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
|
|||
}
|
||||
}
|
||||
#endif
|
||||
mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,ac->ac_b_ex.fe_len);
|
||||
ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
|
||||
ac->ac_b_ex.fe_len);
|
||||
if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
|
||||
gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
|
||||
ext4_free_blks_set(sb, gdp,
|
||||
|
@ -2830,8 +2860,9 @@ out_err:
|
|||
|
||||
/*
|
||||
* here we normalize request for locality group
|
||||
* Group request are normalized to s_strip size if we set the same via mount
|
||||
* option. If not we set it to s_mb_group_prealloc which can be configured via
|
||||
* Group request are normalized to s_mb_group_prealloc, which goes to
|
||||
* s_strip if we set the same via mount option.
|
||||
* s_mb_group_prealloc can be configured via
|
||||
* /sys/fs/ext4/<partition>/mb_group_prealloc
|
||||
*
|
||||
* XXX: should we try to preallocate more than the group has now?
|
||||
|
@ -2842,10 +2873,7 @@ static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
|
|||
struct ext4_locality_group *lg = ac->ac_lg;
|
||||
|
||||
BUG_ON(lg == NULL);
|
||||
if (EXT4_SB(sb)->s_stripe)
|
||||
ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_stripe;
|
||||
else
|
||||
ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
|
||||
ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
|
||||
mb_debug(1, "#%u: goal %u blocks for locality group\n",
|
||||
current->pid, ac->ac_g_ex.fe_len);
|
||||
}
|
||||
|
@ -3001,9 +3029,10 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
|
|||
|
||||
if (start + size <= ac->ac_o_ex.fe_logical &&
|
||||
start > ac->ac_o_ex.fe_logical) {
|
||||
printk(KERN_ERR "start %lu, size %lu, fe_logical %lu\n",
|
||||
(unsigned long) start, (unsigned long) size,
|
||||
(unsigned long) ac->ac_o_ex.fe_logical);
|
||||
ext4_msg(ac->ac_sb, KERN_ERR,
|
||||
"start %lu, size %lu, fe_logical %lu",
|
||||
(unsigned long) start, (unsigned long) size,
|
||||
(unsigned long) ac->ac_o_ex.fe_logical);
|
||||
}
|
||||
BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
|
||||
start > ac->ac_o_ex.fe_logical);
|
||||
|
@ -3262,7 +3291,7 @@ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
|
|||
|
||||
while (n) {
|
||||
entry = rb_entry(n, struct ext4_free_data, node);
|
||||
mb_set_bits(bitmap, entry->start_blk, entry->count);
|
||||
ext4_set_bits(bitmap, entry->start_blk, entry->count);
|
||||
n = rb_next(n);
|
||||
}
|
||||
return;
|
||||
|
@ -3304,7 +3333,7 @@ void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
|
|||
if (unlikely(len == 0))
|
||||
continue;
|
||||
BUG_ON(groupnr != group);
|
||||
mb_set_bits(bitmap, start, len);
|
||||
ext4_set_bits(bitmap, start, len);
|
||||
preallocated += len;
|
||||
count++;
|
||||
}
|
||||
|
@ -3584,10 +3613,11 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
|
|||
bit = next + 1;
|
||||
}
|
||||
if (free != pa->pa_free) {
|
||||
printk(KERN_CRIT "pa %p: logic %lu, phys. %lu, len %lu\n",
|
||||
pa, (unsigned long) pa->pa_lstart,
|
||||
(unsigned long) pa->pa_pstart,
|
||||
(unsigned long) pa->pa_len);
|
||||
ext4_msg(e4b->bd_sb, KERN_CRIT,
|
||||
"pa %p: logic %lu, phys. %lu, len %lu",
|
||||
pa, (unsigned long) pa->pa_lstart,
|
||||
(unsigned long) pa->pa_pstart,
|
||||
(unsigned long) pa->pa_len);
|
||||
ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
|
||||
free, pa->pa_free);
|
||||
/*
|
||||
|
@ -3775,7 +3805,8 @@ repeat:
|
|||
* use preallocation while we're discarding it */
|
||||
spin_unlock(&pa->pa_lock);
|
||||
spin_unlock(&ei->i_prealloc_lock);
|
||||
printk(KERN_ERR "uh-oh! used pa while discarding\n");
|
||||
ext4_msg(sb, KERN_ERR,
|
||||
"uh-oh! used pa while discarding");
|
||||
WARN_ON(1);
|
||||
schedule_timeout_uninterruptible(HZ);
|
||||
goto repeat;
|
||||
|
@ -3852,12 +3883,13 @@ static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
|
|||
(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED))
|
||||
return;
|
||||
|
||||
printk(KERN_ERR "EXT4-fs: Can't allocate:"
|
||||
" Allocation context details:\n");
|
||||
printk(KERN_ERR "EXT4-fs: status %d flags %d\n",
|
||||
ext4_msg(ac->ac_sb, KERN_ERR, "EXT4-fs: Can't allocate:"
|
||||
" Allocation context details:");
|
||||
ext4_msg(ac->ac_sb, KERN_ERR, "EXT4-fs: status %d flags %d",
|
||||
ac->ac_status, ac->ac_flags);
|
||||
printk(KERN_ERR "EXT4-fs: orig %lu/%lu/%lu@%lu, goal %lu/%lu/%lu@%lu, "
|
||||
"best %lu/%lu/%lu@%lu cr %d\n",
|
||||
ext4_msg(ac->ac_sb, KERN_ERR, "EXT4-fs: orig %lu/%lu/%lu@%lu, "
|
||||
"goal %lu/%lu/%lu@%lu, "
|
||||
"best %lu/%lu/%lu@%lu cr %d",
|
||||
(unsigned long)ac->ac_o_ex.fe_group,
|
||||
(unsigned long)ac->ac_o_ex.fe_start,
|
||||
(unsigned long)ac->ac_o_ex.fe_len,
|
||||
|
@ -3871,9 +3903,9 @@ static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
|
|||
(unsigned long)ac->ac_b_ex.fe_len,
|
||||
(unsigned long)ac->ac_b_ex.fe_logical,
|
||||
(int)ac->ac_criteria);
|
||||
printk(KERN_ERR "EXT4-fs: %lu scanned, %d found\n", ac->ac_ex_scanned,
|
||||
ac->ac_found);
|
||||
printk(KERN_ERR "EXT4-fs: groups: \n");
|
||||
ext4_msg(ac->ac_sb, KERN_ERR, "EXT4-fs: %lu scanned, %d found",
|
||||
ac->ac_ex_scanned, ac->ac_found);
|
||||
ext4_msg(ac->ac_sb, KERN_ERR, "EXT4-fs: groups: ");
|
||||
ngroups = ext4_get_groups_count(sb);
|
||||
for (i = 0; i < ngroups; i++) {
|
||||
struct ext4_group_info *grp = ext4_get_group_info(sb, i);
|
||||
|
@ -4637,7 +4669,7 @@ do_more:
|
|||
}
|
||||
ext4_mark_super_dirty(sb);
|
||||
error_return:
|
||||
if (freed)
|
||||
if (freed && !(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
|
||||
dquot_free_block(inode, freed);
|
||||
brelse(bitmap_bh);
|
||||
ext4_std_error(sb, err);
|
||||
|
@ -4645,7 +4677,7 @@ error_return:
|
|||
}
|
||||
|
||||
/**
|
||||
* ext4_add_groupblocks() -- Add given blocks to an existing group
|
||||
* ext4_group_add_blocks() -- Add given blocks to an existing group
|
||||
* @handle: handle to this transaction
|
||||
* @sb: super block
|
||||
* @block: start physcial block to add to the block group
|
||||
|
@ -4653,7 +4685,7 @@ error_return:
|
|||
*
|
||||
* This marks the blocks as free in the bitmap and buddy.
|
||||
*/
|
||||
void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
|
||||
int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
|
||||
ext4_fsblk_t block, unsigned long count)
|
||||
{
|
||||
struct buffer_head *bitmap_bh = NULL;
|
||||
|
@ -4666,25 +4698,35 @@ void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
|
|||
struct ext4_buddy e4b;
|
||||
int err = 0, ret, blk_free_count;
|
||||
ext4_grpblk_t blocks_freed;
|
||||
struct ext4_group_info *grp;
|
||||
|
||||
ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
|
||||
|
||||
if (count == 0)
|
||||
return 0;
|
||||
|
||||
ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
|
||||
grp = ext4_get_group_info(sb, block_group);
|
||||
/*
|
||||
* Check to see if we are freeing blocks across a group
|
||||
* boundary.
|
||||
*/
|
||||
if (bit + count > EXT4_BLOCKS_PER_GROUP(sb))
|
||||
if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
|
||||
ext4_warning(sb, "too much blocks added to group %u\n",
|
||||
block_group);
|
||||
err = -EINVAL;
|
||||
goto error_return;
|
||||
}
|
||||
|
||||
bitmap_bh = ext4_read_block_bitmap(sb, block_group);
|
||||
if (!bitmap_bh)
|
||||
if (!bitmap_bh) {
|
||||
err = -EIO;
|
||||
goto error_return;
|
||||
}
|
||||
|
||||
desc = ext4_get_group_desc(sb, block_group, &gd_bh);
|
||||
if (!desc)
|
||||
if (!desc) {
|
||||
err = -EIO;
|
||||
goto error_return;
|
||||
}
|
||||
|
||||
if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
|
||||
in_range(ext4_inode_bitmap(sb, desc), block, count) ||
|
||||
|
@ -4694,6 +4736,7 @@ void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
|
|||
ext4_error(sb, "Adding blocks in system zones - "
|
||||
"Block = %llu, count = %lu",
|
||||
block, count);
|
||||
err = -EINVAL;
|
||||
goto error_return;
|
||||
}
|
||||
|
||||
|
@ -4762,7 +4805,7 @@ void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
|
|||
error_return:
|
||||
brelse(bitmap_bh);
|
||||
ext4_std_error(sb, err);
|
||||
return;
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -4782,6 +4825,8 @@ static void ext4_trim_extent(struct super_block *sb, int start, int count,
|
|||
{
|
||||
struct ext4_free_extent ex;
|
||||
|
||||
trace_ext4_trim_extent(sb, group, start, count);
|
||||
|
||||
assert_spin_locked(ext4_group_lock_ptr(sb, group));
|
||||
|
||||
ex.fe_start = start;
|
||||
|
@ -4802,7 +4847,7 @@ static void ext4_trim_extent(struct super_block *sb, int start, int count,
|
|||
/**
|
||||
* ext4_trim_all_free -- function to trim all free space in alloc. group
|
||||
* @sb: super block for file system
|
||||
* @e4b: ext4 buddy
|
||||
* @group: group to be trimmed
|
||||
* @start: first group block to examine
|
||||
* @max: last group block to examine
|
||||
* @minblocks: minimum extent block count
|
||||
|
@ -4823,10 +4868,12 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
|
|||
ext4_grpblk_t minblocks)
|
||||
{
|
||||
void *bitmap;
|
||||
ext4_grpblk_t next, count = 0;
|
||||
ext4_grpblk_t next, count = 0, free_count = 0;
|
||||
struct ext4_buddy e4b;
|
||||
int ret;
|
||||
|
||||
trace_ext4_trim_all_free(sb, group, start, max);
|
||||
|
||||
ret = ext4_mb_load_buddy(sb, group, &e4b);
|
||||
if (ret) {
|
||||
ext4_error(sb, "Error in loading buddy "
|
||||
|
@ -4836,6 +4883,10 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
|
|||
bitmap = e4b.bd_bitmap;
|
||||
|
||||
ext4_lock_group(sb, group);
|
||||
if (EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) &&
|
||||
minblocks >= atomic_read(&EXT4_SB(sb)->s_last_trim_minblks))
|
||||
goto out;
|
||||
|
||||
start = (e4b.bd_info->bb_first_free > start) ?
|
||||
e4b.bd_info->bb_first_free : start;
|
||||
|
||||
|
@ -4850,6 +4901,7 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
|
|||
next - start, group, &e4b);
|
||||
count += next - start;
|
||||
}
|
||||
free_count += next - start;
|
||||
start = next + 1;
|
||||
|
||||
if (fatal_signal_pending(current)) {
|
||||
|
@ -4863,9 +4915,13 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
|
|||
ext4_lock_group(sb, group);
|
||||
}
|
||||
|
||||
if ((e4b.bd_info->bb_free - count) < minblocks)
|
||||
if ((e4b.bd_info->bb_free - free_count) < minblocks)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
|
||||
out:
|
||||
ext4_unlock_group(sb, group);
|
||||
ext4_mb_unload_buddy(&e4b);
|
||||
|
||||
|
@ -4904,6 +4960,8 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
|
|||
|
||||
if (unlikely(minlen > EXT4_BLOCKS_PER_GROUP(sb)))
|
||||
return -EINVAL;
|
||||
if (start + len <= first_data_blk)
|
||||
goto out;
|
||||
if (start < first_data_blk) {
|
||||
len -= first_data_blk - start;
|
||||
start = first_data_blk;
|
||||
|
@ -4952,5 +5010,9 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
|
|||
}
|
||||
range->len = trimmed * sb->s_blocksize;
|
||||
|
||||
if (!ret)
|
||||
atomic_set(&EXT4_SB(sb)->s_last_trim_minblks, minlen);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -187,7 +187,6 @@ struct ext4_allocation_context {
|
|||
__u16 ac_flags; /* allocation hints */
|
||||
__u8 ac_status;
|
||||
__u8 ac_criteria;
|
||||
__u8 ac_repeats;
|
||||
__u8 ac_2order; /* if request is to allocate 2^N blocks and
|
||||
* N > 0, the field stores N, otherwise 0 */
|
||||
__u8 ac_op; /* operation, for history only */
|
||||
|
|
|
@ -289,7 +289,7 @@ static struct stats dx_show_leaf(struct dx_hash_info *hinfo, struct ext4_dir_ent
|
|||
while (len--) printk("%c", *name++);
|
||||
ext4fs_dirhash(de->name, de->name_len, &h);
|
||||
printk(":%x.%u ", h.hash,
|
||||
((char *) de - base));
|
||||
(unsigned) ((char *) de - base));
|
||||
}
|
||||
space += EXT4_DIR_REC_LEN(de->name_len);
|
||||
names++;
|
||||
|
@ -1013,7 +1013,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct q
|
|||
|
||||
*err = -ENOENT;
|
||||
errout:
|
||||
dxtrace(printk(KERN_DEBUG "%s not found\n", name));
|
||||
dxtrace(printk(KERN_DEBUG "%s not found\n", d_name->name));
|
||||
dx_release (frames);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1985,18 +1985,11 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
|
|||
if (!list_empty(&EXT4_I(inode)->i_orphan))
|
||||
goto out_unlock;
|
||||
|
||||
/* Orphan handling is only valid for files with data blocks
|
||||
* being truncated, or files being unlinked. */
|
||||
|
||||
/* @@@ FIXME: Observation from aviro:
|
||||
* I think I can trigger J_ASSERT in ext4_orphan_add(). We block
|
||||
* here (on s_orphan_lock), so race with ext4_link() which might bump
|
||||
* ->i_nlink. For, say it, character device. Not a regular file,
|
||||
* not a directory, not a symlink and ->i_nlink > 0.
|
||||
*
|
||||
* tytso, 4/25/2009: I'm not sure how that could happen;
|
||||
* shouldn't the fs core protect us from these sort of
|
||||
* unlink()/link() races?
|
||||
/*
|
||||
* Orphan handling is only valid for files with data blocks
|
||||
* being truncated, or files being unlinked. Note that we either
|
||||
* hold i_mutex, or the inode can not be referenced from outside,
|
||||
* so i_nlink should not be bumped due to race
|
||||
*/
|
||||
J_ASSERT((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
|
||||
S_ISLNK(inode->i_mode)) || inode->i_nlink == 0);
|
||||
|
|
|
@ -285,11 +285,7 @@ static int io_submit_init(struct ext4_io_submit *io,
|
|||
io_end = ext4_init_io_end(inode, GFP_NOFS);
|
||||
if (!io_end)
|
||||
return -ENOMEM;
|
||||
do {
|
||||
bio = bio_alloc(GFP_NOIO, nvecs);
|
||||
nvecs >>= 1;
|
||||
} while (bio == NULL);
|
||||
|
||||
bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
|
||||
bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
|
||||
bio->bi_bdev = bh->b_bdev;
|
||||
bio->bi_private = io->io_end = io_end;
|
||||
|
|
199
fs/ext4/resize.c
199
fs/ext4/resize.c
|
@ -16,6 +16,35 @@
|
|||
|
||||
#include "ext4_jbd2.h"
|
||||
|
||||
int ext4_resize_begin(struct super_block *sb)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!capable(CAP_SYS_RESOURCE))
|
||||
return -EPERM;
|
||||
|
||||
/*
|
||||
* We are not allowed to do online-resizing on a filesystem mounted
|
||||
* with error, because it can destroy the filesystem easily.
|
||||
*/
|
||||
if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
|
||||
ext4_warning(sb, "There are errors in the filesystem, "
|
||||
"so online resizing is not allowed\n");
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
if (test_and_set_bit_lock(EXT4_RESIZING, &EXT4_SB(sb)->s_resize_flags))
|
||||
ret = -EBUSY;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void ext4_resize_end(struct super_block *sb)
|
||||
{
|
||||
clear_bit_unlock(EXT4_RESIZING, &EXT4_SB(sb)->s_resize_flags);
|
||||
smp_mb__after_clear_bit();
|
||||
}
|
||||
|
||||
#define outside(b, first, last) ((b) < (first) || (b) >= (last))
|
||||
#define inside(b, first, last) ((b) >= (first) && (b) < (last))
|
||||
|
||||
|
@ -118,10 +147,8 @@ static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
|
|||
brelse(bh);
|
||||
bh = ERR_PTR(err);
|
||||
} else {
|
||||
lock_buffer(bh);
|
||||
memset(bh->b_data, 0, sb->s_blocksize);
|
||||
set_buffer_uptodate(bh);
|
||||
unlock_buffer(bh);
|
||||
}
|
||||
|
||||
return bh;
|
||||
|
@ -132,8 +159,7 @@ static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
|
|||
* If that fails, restart the transaction & regain write access for the
|
||||
* buffer head which is used for block_bitmap modifications.
|
||||
*/
|
||||
static int extend_or_restart_transaction(handle_t *handle, int thresh,
|
||||
struct buffer_head *bh)
|
||||
static int extend_or_restart_transaction(handle_t *handle, int thresh)
|
||||
{
|
||||
int err;
|
||||
|
||||
|
@ -144,9 +170,8 @@ static int extend_or_restart_transaction(handle_t *handle, int thresh,
|
|||
if (err < 0)
|
||||
return err;
|
||||
if (err) {
|
||||
if ((err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA)))
|
||||
return err;
|
||||
if ((err = ext4_journal_get_write_access(handle, bh)))
|
||||
err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -181,21 +206,7 @@ static int setup_new_group_blocks(struct super_block *sb,
|
|||
if (IS_ERR(handle))
|
||||
return PTR_ERR(handle);
|
||||
|
||||
mutex_lock(&sbi->s_resize_lock);
|
||||
if (input->group != sbi->s_groups_count) {
|
||||
err = -EBUSY;
|
||||
goto exit_journal;
|
||||
}
|
||||
|
||||
if (IS_ERR(bh = bclean(handle, sb, input->block_bitmap))) {
|
||||
err = PTR_ERR(bh);
|
||||
goto exit_journal;
|
||||
}
|
||||
|
||||
if (ext4_bg_has_super(sb, input->group)) {
|
||||
ext4_debug("mark backup superblock %#04llx (+0)\n", start);
|
||||
ext4_set_bit(0, bh->b_data);
|
||||
}
|
||||
BUG_ON(input->group != sbi->s_groups_count);
|
||||
|
||||
/* Copy all of the GDT blocks into the backup in this group */
|
||||
for (i = 0, bit = 1, block = start + 1;
|
||||
|
@ -203,29 +214,26 @@ static int setup_new_group_blocks(struct super_block *sb,
|
|||
struct buffer_head *gdb;
|
||||
|
||||
ext4_debug("update backup group %#04llx (+%d)\n", block, bit);
|
||||
|
||||
if ((err = extend_or_restart_transaction(handle, 1, bh)))
|
||||
goto exit_bh;
|
||||
err = extend_or_restart_transaction(handle, 1);
|
||||
if (err)
|
||||
goto exit_journal;
|
||||
|
||||
gdb = sb_getblk(sb, block);
|
||||
if (!gdb) {
|
||||
err = -EIO;
|
||||
goto exit_bh;
|
||||
goto exit_journal;
|
||||
}
|
||||
if ((err = ext4_journal_get_write_access(handle, gdb))) {
|
||||
brelse(gdb);
|
||||
goto exit_bh;
|
||||
goto exit_journal;
|
||||
}
|
||||
lock_buffer(gdb);
|
||||
memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, gdb->b_size);
|
||||
set_buffer_uptodate(gdb);
|
||||
unlock_buffer(gdb);
|
||||
err = ext4_handle_dirty_metadata(handle, NULL, gdb);
|
||||
if (unlikely(err)) {
|
||||
brelse(gdb);
|
||||
goto exit_bh;
|
||||
goto exit_journal;
|
||||
}
|
||||
ext4_set_bit(bit, bh->b_data);
|
||||
brelse(gdb);
|
||||
}
|
||||
|
||||
|
@ -235,9 +243,22 @@ static int setup_new_group_blocks(struct super_block *sb,
|
|||
err = sb_issue_zeroout(sb, gdblocks + start + 1, reserved_gdb,
|
||||
GFP_NOFS);
|
||||
if (err)
|
||||
goto exit_bh;
|
||||
for (i = 0, bit = gdblocks + 1; i < reserved_gdb; i++, bit++)
|
||||
ext4_set_bit(bit, bh->b_data);
|
||||
goto exit_journal;
|
||||
|
||||
err = extend_or_restart_transaction(handle, 2);
|
||||
if (err)
|
||||
goto exit_journal;
|
||||
|
||||
bh = bclean(handle, sb, input->block_bitmap);
|
||||
if (IS_ERR(bh)) {
|
||||
err = PTR_ERR(bh);
|
||||
goto exit_journal;
|
||||
}
|
||||
|
||||
if (ext4_bg_has_super(sb, input->group)) {
|
||||
ext4_debug("mark backup group tables %#04llx (+0)\n", start);
|
||||
ext4_set_bits(bh->b_data, 0, gdblocks + reserved_gdb + 1);
|
||||
}
|
||||
|
||||
ext4_debug("mark block bitmap %#04llx (+%llu)\n", input->block_bitmap,
|
||||
input->block_bitmap - start);
|
||||
|
@ -253,12 +274,9 @@ static int setup_new_group_blocks(struct super_block *sb,
|
|||
err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group, GFP_NOFS);
|
||||
if (err)
|
||||
goto exit_bh;
|
||||
for (i = 0, bit = input->inode_table - start;
|
||||
i < sbi->s_itb_per_group; i++, bit++)
|
||||
ext4_set_bit(bit, bh->b_data);
|
||||
ext4_set_bits(bh->b_data, input->inode_table - start,
|
||||
sbi->s_itb_per_group);
|
||||
|
||||
if ((err = extend_or_restart_transaction(handle, 2, bh)))
|
||||
goto exit_bh;
|
||||
|
||||
ext4_mark_bitmap_end(input->blocks_count, sb->s_blocksize * 8,
|
||||
bh->b_data);
|
||||
|
@ -285,7 +303,6 @@ exit_bh:
|
|||
brelse(bh);
|
||||
|
||||
exit_journal:
|
||||
mutex_unlock(&sbi->s_resize_lock);
|
||||
if ((err2 = ext4_journal_stop(handle)) && !err)
|
||||
err = err2;
|
||||
|
||||
|
@ -377,15 +394,15 @@ static int verify_reserved_gdb(struct super_block *sb,
|
|||
* fail once we start modifying the data on disk, because JBD has no rollback.
|
||||
*/
|
||||
static int add_new_gdb(handle_t *handle, struct inode *inode,
|
||||
struct ext4_new_group_data *input,
|
||||
struct buffer_head **primary)
|
||||
ext4_group_t group)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
|
||||
unsigned long gdb_num = input->group / EXT4_DESC_PER_BLOCK(sb);
|
||||
unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
|
||||
ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
|
||||
struct buffer_head **o_group_desc, **n_group_desc;
|
||||
struct buffer_head *dind;
|
||||
struct buffer_head *gdb_bh;
|
||||
int gdbackups;
|
||||
struct ext4_iloc iloc;
|
||||
__le32 *data;
|
||||
|
@ -408,11 +425,12 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
|
|||
return -EPERM;
|
||||
}
|
||||
|
||||
*primary = sb_bread(sb, gdblock);
|
||||
if (!*primary)
|
||||
gdb_bh = sb_bread(sb, gdblock);
|
||||
if (!gdb_bh)
|
||||
return -EIO;
|
||||
|
||||
if ((gdbackups = verify_reserved_gdb(sb, *primary)) < 0) {
|
||||
gdbackups = verify_reserved_gdb(sb, gdb_bh);
|
||||
if (gdbackups < 0) {
|
||||
err = gdbackups;
|
||||
goto exit_bh;
|
||||
}
|
||||
|
@ -427,7 +445,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
|
|||
data = (__le32 *)dind->b_data;
|
||||
if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) {
|
||||
ext4_warning(sb, "new group %u GDT block %llu not reserved",
|
||||
input->group, gdblock);
|
||||
group, gdblock);
|
||||
err = -EINVAL;
|
||||
goto exit_dind;
|
||||
}
|
||||
|
@ -436,7 +454,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
|
|||
if (unlikely(err))
|
||||
goto exit_dind;
|
||||
|
||||
err = ext4_journal_get_write_access(handle, *primary);
|
||||
err = ext4_journal_get_write_access(handle, gdb_bh);
|
||||
if (unlikely(err))
|
||||
goto exit_sbh;
|
||||
|
||||
|
@ -449,12 +467,13 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
|
|||
if (unlikely(err))
|
||||
goto exit_dindj;
|
||||
|
||||
n_group_desc = kmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
|
||||
GFP_NOFS);
|
||||
n_group_desc = ext4_kvmalloc((gdb_num + 1) *
|
||||
sizeof(struct buffer_head *),
|
||||
GFP_NOFS);
|
||||
if (!n_group_desc) {
|
||||
err = -ENOMEM;
|
||||
ext4_warning(sb,
|
||||
"not enough memory for %lu groups", gdb_num + 1);
|
||||
ext4_warning(sb, "not enough memory for %lu groups",
|
||||
gdb_num + 1);
|
||||
goto exit_inode;
|
||||
}
|
||||
|
||||
|
@ -475,8 +494,8 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
|
|||
}
|
||||
inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9;
|
||||
ext4_mark_iloc_dirty(handle, inode, &iloc);
|
||||
memset((*primary)->b_data, 0, sb->s_blocksize);
|
||||
err = ext4_handle_dirty_metadata(handle, NULL, *primary);
|
||||
memset(gdb_bh->b_data, 0, sb->s_blocksize);
|
||||
err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
|
||||
if (unlikely(err)) {
|
||||
ext4_std_error(sb, err);
|
||||
goto exit_inode;
|
||||
|
@ -486,10 +505,10 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
|
|||
o_group_desc = EXT4_SB(sb)->s_group_desc;
|
||||
memcpy(n_group_desc, o_group_desc,
|
||||
EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
|
||||
n_group_desc[gdb_num] = *primary;
|
||||
n_group_desc[gdb_num] = gdb_bh;
|
||||
EXT4_SB(sb)->s_group_desc = n_group_desc;
|
||||
EXT4_SB(sb)->s_gdb_count++;
|
||||
kfree(o_group_desc);
|
||||
ext4_kvfree(o_group_desc);
|
||||
|
||||
le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
|
||||
err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
|
||||
|
@ -499,6 +518,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
|
|||
return err;
|
||||
|
||||
exit_inode:
|
||||
ext4_kvfree(n_group_desc);
|
||||
/* ext4_handle_release_buffer(handle, iloc.bh); */
|
||||
brelse(iloc.bh);
|
||||
exit_dindj:
|
||||
|
@ -508,7 +528,7 @@ exit_sbh:
|
|||
exit_dind:
|
||||
brelse(dind);
|
||||
exit_bh:
|
||||
brelse(*primary);
|
||||
brelse(gdb_bh);
|
||||
|
||||
ext4_debug("leaving with error %d\n", err);
|
||||
return err;
|
||||
|
@ -528,7 +548,7 @@ exit_bh:
|
|||
* backup GDT blocks are stored in their reserved primary GDT block.
|
||||
*/
|
||||
static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
|
||||
struct ext4_new_group_data *input)
|
||||
ext4_group_t group)
|
||||
{
|
||||
struct super_block *sb = inode->i_sb;
|
||||
int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
|
||||
|
@ -599,7 +619,7 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
|
|||
* Finally we can add each of the reserved backup GDT blocks from
|
||||
* the new group to its reserved primary GDT block.
|
||||
*/
|
||||
blk = input->group * EXT4_BLOCKS_PER_GROUP(sb);
|
||||
blk = group * EXT4_BLOCKS_PER_GROUP(sb);
|
||||
for (i = 0; i < reserved_gdb; i++) {
|
||||
int err2;
|
||||
data = (__le32 *)primary[i]->b_data;
|
||||
|
@ -799,13 +819,6 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
|
|||
goto exit_put;
|
||||
}
|
||||
|
||||
mutex_lock(&sbi->s_resize_lock);
|
||||
if (input->group != sbi->s_groups_count) {
|
||||
ext4_warning(sb, "multiple resizers run on filesystem!");
|
||||
err = -EBUSY;
|
||||
goto exit_journal;
|
||||
}
|
||||
|
||||
if ((err = ext4_journal_get_write_access(handle, sbi->s_sbh)))
|
||||
goto exit_journal;
|
||||
|
||||
|
@ -820,16 +833,25 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
|
|||
if ((err = ext4_journal_get_write_access(handle, primary)))
|
||||
goto exit_journal;
|
||||
|
||||
if (reserved_gdb && ext4_bg_num_gdb(sb, input->group) &&
|
||||
(err = reserve_backup_gdb(handle, inode, input)))
|
||||
if (reserved_gdb && ext4_bg_num_gdb(sb, input->group)) {
|
||||
err = reserve_backup_gdb(handle, inode, input->group);
|
||||
if (err)
|
||||
goto exit_journal;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* Note that we can access new group descriptor block safely
|
||||
* only if add_new_gdb() succeeds.
|
||||
*/
|
||||
err = add_new_gdb(handle, inode, input->group);
|
||||
if (err)
|
||||
goto exit_journal;
|
||||
} else if ((err = add_new_gdb(handle, inode, input, &primary)))
|
||||
goto exit_journal;
|
||||
primary = sbi->s_group_desc[gdb_num];
|
||||
}
|
||||
|
||||
/*
|
||||
* OK, now we've set up the new group. Time to make it active.
|
||||
*
|
||||
* We do not lock all allocations via s_resize_lock
|
||||
* so we have to be safe wrt. concurrent accesses the group
|
||||
* data. So we need to be careful to set all of the relevant
|
||||
* group descriptor data etc. *before* we enable the group.
|
||||
|
@ -886,13 +908,9 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
|
|||
*
|
||||
* The precise rules we use are:
|
||||
*
|
||||
* * Writers of s_groups_count *must* hold s_resize_lock
|
||||
* AND
|
||||
* * Writers must perform a smp_wmb() after updating all dependent
|
||||
* data and before modifying the groups count
|
||||
*
|
||||
* * Readers must hold s_resize_lock over the access
|
||||
* OR
|
||||
* * Readers must perform an smp_rmb() after reading the groups count
|
||||
* and before reading any dependent data.
|
||||
*
|
||||
|
@ -937,10 +955,9 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
|
|||
ext4_handle_dirty_super(handle, sb);
|
||||
|
||||
exit_journal:
|
||||
mutex_unlock(&sbi->s_resize_lock);
|
||||
if ((err2 = ext4_journal_stop(handle)) && !err)
|
||||
err = err2;
|
||||
if (!err) {
|
||||
if (!err && primary) {
|
||||
update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
|
||||
sizeof(struct ext4_super_block));
|
||||
update_backups(sb, primary->b_blocknr, primary->b_data,
|
||||
|
@ -969,16 +986,13 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
|
|||
ext4_grpblk_t add;
|
||||
struct buffer_head *bh;
|
||||
handle_t *handle;
|
||||
int err;
|
||||
int err, err2;
|
||||
ext4_group_t group;
|
||||
|
||||
/* We don't need to worry about locking wrt other resizers just
|
||||
* yet: we're going to revalidate es->s_blocks_count after
|
||||
* taking the s_resize_lock below. */
|
||||
o_blocks_count = ext4_blocks_count(es);
|
||||
|
||||
if (test_opt(sb, DEBUG))
|
||||
printk(KERN_DEBUG "EXT4-fs: extending last group from %llu uto %llu blocks\n",
|
||||
printk(KERN_DEBUG "EXT4-fs: extending last group from %llu to %llu blocks\n",
|
||||
o_blocks_count, n_blocks_count);
|
||||
|
||||
if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
|
||||
|
@ -995,7 +1009,7 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
|
|||
|
||||
if (n_blocks_count < o_blocks_count) {
|
||||
ext4_warning(sb, "can't shrink FS - resize aborted");
|
||||
return -EBUSY;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Handle the remaining blocks in the last group only. */
|
||||
|
@ -1038,32 +1052,25 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
|
|||
goto exit_put;
|
||||
}
|
||||
|
||||
mutex_lock(&EXT4_SB(sb)->s_resize_lock);
|
||||
if (o_blocks_count != ext4_blocks_count(es)) {
|
||||
ext4_warning(sb, "multiple resizers run on filesystem!");
|
||||
mutex_unlock(&EXT4_SB(sb)->s_resize_lock);
|
||||
ext4_journal_stop(handle);
|
||||
err = -EBUSY;
|
||||
goto exit_put;
|
||||
}
|
||||
|
||||
if ((err = ext4_journal_get_write_access(handle,
|
||||
EXT4_SB(sb)->s_sbh))) {
|
||||
ext4_warning(sb, "error %d on journal write access", err);
|
||||
mutex_unlock(&EXT4_SB(sb)->s_resize_lock);
|
||||
ext4_journal_stop(handle);
|
||||
goto exit_put;
|
||||
}
|
||||
ext4_blocks_count_set(es, o_blocks_count + add);
|
||||
mutex_unlock(&EXT4_SB(sb)->s_resize_lock);
|
||||
ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count,
|
||||
o_blocks_count + add);
|
||||
/* We add the blocks to the bitmap and set the group need init bit */
|
||||
ext4_add_groupblocks(handle, sb, o_blocks_count, add);
|
||||
err = ext4_group_add_blocks(handle, sb, o_blocks_count, add);
|
||||
ext4_handle_dirty_super(handle, sb);
|
||||
ext4_debug("freed blocks %llu through %llu\n", o_blocks_count,
|
||||
o_blocks_count + add);
|
||||
if ((err = ext4_journal_stop(handle)))
|
||||
err2 = ext4_journal_stop(handle);
|
||||
if (!err && err2)
|
||||
err = err2;
|
||||
|
||||
if (err)
|
||||
goto exit_put;
|
||||
|
||||
if (test_opt(sb, DEBUG))
|
||||
|
|
|
@ -110,6 +110,35 @@ static struct file_system_type ext3_fs_type = {
|
|||
#define IS_EXT3_SB(sb) (0)
|
||||
#endif
|
||||
|
||||
void *ext4_kvmalloc(size_t size, gfp_t flags)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
ret = kmalloc(size, flags);
|
||||
if (!ret)
|
||||
ret = __vmalloc(size, flags, PAGE_KERNEL);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void *ext4_kvzalloc(size_t size, gfp_t flags)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
ret = kmalloc(size, flags);
|
||||
if (!ret)
|
||||
ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void ext4_kvfree(void *ptr)
|
||||
{
|
||||
if (is_vmalloc_addr(ptr))
|
||||
vfree(ptr);
|
||||
else
|
||||
kfree(ptr);
|
||||
|
||||
}
|
||||
|
||||
ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
|
||||
struct ext4_group_desc *bg)
|
||||
{
|
||||
|
@ -269,6 +298,7 @@ handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
|
|||
journal_t *journal;
|
||||
handle_t *handle;
|
||||
|
||||
trace_ext4_journal_start(sb, nblocks, _RET_IP_);
|
||||
if (sb->s_flags & MS_RDONLY)
|
||||
return ERR_PTR(-EROFS);
|
||||
|
||||
|
@ -789,11 +819,8 @@ static void ext4_put_super(struct super_block *sb)
|
|||
|
||||
for (i = 0; i < sbi->s_gdb_count; i++)
|
||||
brelse(sbi->s_group_desc[i]);
|
||||
kfree(sbi->s_group_desc);
|
||||
if (is_vmalloc_addr(sbi->s_flex_groups))
|
||||
vfree(sbi->s_flex_groups);
|
||||
else
|
||||
kfree(sbi->s_flex_groups);
|
||||
ext4_kvfree(sbi->s_group_desc);
|
||||
ext4_kvfree(sbi->s_flex_groups);
|
||||
percpu_counter_destroy(&sbi->s_freeblocks_counter);
|
||||
percpu_counter_destroy(&sbi->s_freeinodes_counter);
|
||||
percpu_counter_destroy(&sbi->s_dirs_counter);
|
||||
|
@ -1976,15 +2003,11 @@ static int ext4_fill_flex_info(struct super_block *sb)
|
|||
((le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) + 1) <<
|
||||
EXT4_DESC_PER_BLOCK_BITS(sb))) / groups_per_flex;
|
||||
size = flex_group_count * sizeof(struct flex_groups);
|
||||
sbi->s_flex_groups = kzalloc(size, GFP_KERNEL);
|
||||
sbi->s_flex_groups = ext4_kvzalloc(size, GFP_KERNEL);
|
||||
if (sbi->s_flex_groups == NULL) {
|
||||
sbi->s_flex_groups = vzalloc(size);
|
||||
if (sbi->s_flex_groups == NULL) {
|
||||
ext4_msg(sb, KERN_ERR,
|
||||
"not enough memory for %u flex groups",
|
||||
flex_group_count);
|
||||
goto failed;
|
||||
}
|
||||
ext4_msg(sb, KERN_ERR, "not enough memory for %u flex groups",
|
||||
flex_group_count);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
for (i = 0; i < sbi->s_groups_count; i++) {
|
||||
|
@ -2383,17 +2406,25 @@ static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
|
|||
unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride);
|
||||
unsigned long stripe_width =
|
||||
le32_to_cpu(sbi->s_es->s_raid_stripe_width);
|
||||
int ret;
|
||||
|
||||
if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
|
||||
return sbi->s_stripe;
|
||||
ret = sbi->s_stripe;
|
||||
else if (stripe_width <= sbi->s_blocks_per_group)
|
||||
ret = stripe_width;
|
||||
else if (stride <= sbi->s_blocks_per_group)
|
||||
ret = stride;
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
if (stripe_width <= sbi->s_blocks_per_group)
|
||||
return stripe_width;
|
||||
/*
|
||||
* If the stripe width is 1, this makes no sense and
|
||||
* we set it to 0 to turn off stripe handling code.
|
||||
*/
|
||||
if (ret <= 1)
|
||||
ret = 0;
|
||||
|
||||
if (stride <= sbi->s_blocks_per_group)
|
||||
return stride;
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* sysfs supprt */
|
||||
|
@ -3408,8 +3439,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
|||
(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
|
||||
db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
|
||||
EXT4_DESC_PER_BLOCK(sb);
|
||||
sbi->s_group_desc = kmalloc(db_count * sizeof(struct buffer_head *),
|
||||
GFP_KERNEL);
|
||||
sbi->s_group_desc = ext4_kvmalloc(db_count *
|
||||
sizeof(struct buffer_head *),
|
||||
GFP_KERNEL);
|
||||
if (sbi->s_group_desc == NULL) {
|
||||
ext4_msg(sb, KERN_ERR, "not enough memory");
|
||||
goto failed_mount;
|
||||
|
@ -3491,7 +3523,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
|||
|
||||
INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
|
||||
mutex_init(&sbi->s_orphan_lock);
|
||||
mutex_init(&sbi->s_resize_lock);
|
||||
sbi->s_resize_flags = 0;
|
||||
|
||||
sb->s_root = NULL;
|
||||
|
||||
|
@ -3741,12 +3773,8 @@ failed_mount_wq:
|
|||
}
|
||||
failed_mount3:
|
||||
del_timer(&sbi->s_err_report);
|
||||
if (sbi->s_flex_groups) {
|
||||
if (is_vmalloc_addr(sbi->s_flex_groups))
|
||||
vfree(sbi->s_flex_groups);
|
||||
else
|
||||
kfree(sbi->s_flex_groups);
|
||||
}
|
||||
if (sbi->s_flex_groups)
|
||||
ext4_kvfree(sbi->s_flex_groups);
|
||||
percpu_counter_destroy(&sbi->s_freeblocks_counter);
|
||||
percpu_counter_destroy(&sbi->s_freeinodes_counter);
|
||||
percpu_counter_destroy(&sbi->s_dirs_counter);
|
||||
|
@ -3756,7 +3784,7 @@ failed_mount3:
|
|||
failed_mount2:
|
||||
for (i = 0; i < db_count; i++)
|
||||
brelse(sbi->s_group_desc[i]);
|
||||
kfree(sbi->s_group_desc);
|
||||
ext4_kvfree(sbi->s_group_desc);
|
||||
failed_mount:
|
||||
if (sbi->s_proc) {
|
||||
remove_proc_entry(sb->s_id, ext4_proc_root);
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* linux/fs/ext4/truncate.h
|
||||
*
|
||||
* Common inline functions needed for truncate support
|
||||
*/
|
||||
|
||||
/*
|
||||
* Truncate blocks that were not used by write. We have to truncate the
|
||||
* pagecache as well so that corresponding buffers get properly unmapped.
|
||||
*/
|
||||
static inline void ext4_truncate_failed_write(struct inode *inode)
|
||||
{
|
||||
truncate_inode_pages(inode->i_mapping, inode->i_size);
|
||||
ext4_truncate(inode);
|
||||
}
|
||||
|
||||
/*
|
||||
* Work out how many blocks we need to proceed with the next chunk of a
|
||||
* truncate transaction.
|
||||
*/
|
||||
static inline unsigned long ext4_blocks_for_truncate(struct inode *inode)
|
||||
{
|
||||
ext4_lblk_t needed;
|
||||
|
||||
needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
|
||||
|
||||
/* Give ourselves just enough room to cope with inodes in which
|
||||
* i_blocks is corrupt: we've seen disk corruptions in the past
|
||||
* which resulted in random data in an inode which looked enough
|
||||
* like a regular file for ext4 to try to delete it. Things
|
||||
* will go a bit crazy if that happens, but at least we should
|
||||
* try not to panic the whole kernel. */
|
||||
if (needed < 2)
|
||||
needed = 2;
|
||||
|
||||
/* But we need to bound the transaction so we don't overflow the
|
||||
* journal. */
|
||||
if (needed > EXT4_MAX_TRANS_DATA)
|
||||
needed = EXT4_MAX_TRANS_DATA;
|
||||
|
||||
return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
|
||||
}
|
||||
|
|
@ -257,9 +257,12 @@ static void
|
|||
__flush_batch(journal_t *journal, int *batch_count)
|
||||
{
|
||||
int i;
|
||||
struct blk_plug plug;
|
||||
|
||||
blk_start_plug(&plug);
|
||||
for (i = 0; i < *batch_count; i++)
|
||||
write_dirty_buffer(journal->j_chkpt_bhs[i], WRITE);
|
||||
write_dirty_buffer(journal->j_chkpt_bhs[i], WRITE_SYNC);
|
||||
blk_finish_plug(&plug);
|
||||
|
||||
for (i = 0; i < *batch_count; i++) {
|
||||
struct buffer_head *bh = journal->j_chkpt_bhs[i];
|
||||
|
|
|
@ -2390,73 +2390,6 @@ static void __exit journal_exit(void)
|
|||
jbd2_journal_destroy_caches();
|
||||
}
|
||||
|
||||
/*
|
||||
* jbd2_dev_to_name is a utility function used by the jbd2 and ext4
|
||||
* tracing infrastructure to map a dev_t to a device name.
|
||||
*
|
||||
* The caller should use rcu_read_lock() in order to make sure the
|
||||
* device name stays valid until its done with it. We use
|
||||
* rcu_read_lock() as well to make sure we're safe in case the caller
|
||||
* gets sloppy, and because rcu_read_lock() is cheap and can be safely
|
||||
* nested.
|
||||
*/
|
||||
struct devname_cache {
|
||||
struct rcu_head rcu;
|
||||
dev_t device;
|
||||
char devname[BDEVNAME_SIZE];
|
||||
};
|
||||
#define CACHE_SIZE_BITS 6
|
||||
static struct devname_cache *devcache[1 << CACHE_SIZE_BITS];
|
||||
static DEFINE_SPINLOCK(devname_cache_lock);
|
||||
|
||||
static void free_devcache(struct rcu_head *rcu)
|
||||
{
|
||||
kfree(rcu);
|
||||
}
|
||||
|
||||
const char *jbd2_dev_to_name(dev_t device)
|
||||
{
|
||||
int i = hash_32(device, CACHE_SIZE_BITS);
|
||||
char *ret;
|
||||
struct block_device *bd;
|
||||
static struct devname_cache *new_dev;
|
||||
|
||||
rcu_read_lock();
|
||||
if (devcache[i] && devcache[i]->device == device) {
|
||||
ret = devcache[i]->devname;
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
new_dev = kmalloc(sizeof(struct devname_cache), GFP_KERNEL);
|
||||
if (!new_dev)
|
||||
return "NODEV-ALLOCFAILURE"; /* Something non-NULL */
|
||||
bd = bdget(device);
|
||||
spin_lock(&devname_cache_lock);
|
||||
if (devcache[i]) {
|
||||
if (devcache[i]->device == device) {
|
||||
kfree(new_dev);
|
||||
bdput(bd);
|
||||
ret = devcache[i]->devname;
|
||||
spin_unlock(&devname_cache_lock);
|
||||
return ret;
|
||||
}
|
||||
call_rcu(&devcache[i]->rcu, free_devcache);
|
||||
}
|
||||
devcache[i] = new_dev;
|
||||
devcache[i]->device = device;
|
||||
if (bd) {
|
||||
bdevname(bd, devcache[i]->devname);
|
||||
bdput(bd);
|
||||
} else
|
||||
__bdevname(device, devcache[i]->devname);
|
||||
ret = devcache[i]->devname;
|
||||
spin_unlock(&devname_cache_lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(jbd2_dev_to_name);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
module_init(journal_init);
|
||||
module_exit(journal_exit);
|
||||
|
|
|
@ -1329,12 +1329,6 @@ extern int jbd_blocks_per_page(struct inode *inode);
|
|||
#define BUFFER_TRACE2(bh, bh2, info) do {} while (0)
|
||||
#define JBUFFER_TRACE(jh, info) do {} while (0)
|
||||
|
||||
/*
|
||||
* jbd2_dev_to_name is a utility function used by the jbd2 and ext4
|
||||
* tracing infrastructure to map a dev_t to a device name.
|
||||
*/
|
||||
extern const char *jbd2_dev_to_name(dev_t device);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _LINUX_JBD2_H */
|
||||
|
|
|
@ -23,7 +23,7 @@ TRACE_EVENT(ext4_free_inode,
|
|||
TP_STRUCT__entry(
|
||||
__field( dev_t, dev )
|
||||
__field( ino_t, ino )
|
||||
__field( umode_t, mode )
|
||||
__field( __u16, mode )
|
||||
__field( uid_t, uid )
|
||||
__field( gid_t, gid )
|
||||
__field( __u64, blocks )
|
||||
|
@ -52,7 +52,7 @@ TRACE_EVENT(ext4_request_inode,
|
|||
TP_STRUCT__entry(
|
||||
__field( dev_t, dev )
|
||||
__field( ino_t, dir )
|
||||
__field( umode_t, mode )
|
||||
__field( __u16, mode )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
|
@ -75,7 +75,7 @@ TRACE_EVENT(ext4_allocate_inode,
|
|||
__field( dev_t, dev )
|
||||
__field( ino_t, ino )
|
||||
__field( ino_t, dir )
|
||||
__field( umode_t, mode )
|
||||
__field( __u16, mode )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
|
@ -725,7 +725,7 @@ TRACE_EVENT(ext4_free_blocks,
|
|||
TP_STRUCT__entry(
|
||||
__field( dev_t, dev )
|
||||
__field( ino_t, ino )
|
||||
__field( umode_t, mode )
|
||||
__field( __u16, mode )
|
||||
__field( __u64, block )
|
||||
__field( unsigned long, count )
|
||||
__field( int, flags )
|
||||
|
@ -1012,7 +1012,7 @@ TRACE_EVENT(ext4_forget,
|
|||
TP_STRUCT__entry(
|
||||
__field( dev_t, dev )
|
||||
__field( ino_t, ino )
|
||||
__field( umode_t, mode )
|
||||
__field( __u16, mode )
|
||||
__field( int, is_metadata )
|
||||
__field( __u64, block )
|
||||
),
|
||||
|
@ -1039,7 +1039,7 @@ TRACE_EVENT(ext4_da_update_reserve_space,
|
|||
TP_STRUCT__entry(
|
||||
__field( dev_t, dev )
|
||||
__field( ino_t, ino )
|
||||
__field( umode_t, mode )
|
||||
__field( __u16, mode )
|
||||
__field( __u64, i_blocks )
|
||||
__field( int, used_blocks )
|
||||
__field( int, reserved_data_blocks )
|
||||
|
@ -1076,7 +1076,7 @@ TRACE_EVENT(ext4_da_reserve_space,
|
|||
TP_STRUCT__entry(
|
||||
__field( dev_t, dev )
|
||||
__field( ino_t, ino )
|
||||
__field( umode_t, mode )
|
||||
__field( __u16, mode )
|
||||
__field( __u64, i_blocks )
|
||||
__field( int, md_needed )
|
||||
__field( int, reserved_data_blocks )
|
||||
|
@ -1110,7 +1110,7 @@ TRACE_EVENT(ext4_da_release_space,
|
|||
TP_STRUCT__entry(
|
||||
__field( dev_t, dev )
|
||||
__field( ino_t, ino )
|
||||
__field( umode_t, mode )
|
||||
__field( __u16, mode )
|
||||
__field( __u64, i_blocks )
|
||||
__field( int, freed_blocks )
|
||||
__field( int, reserved_data_blocks )
|
||||
|
@ -1518,6 +1518,77 @@ TRACE_EVENT(ext4_load_inode,
|
|||
(unsigned long) __entry->ino)
|
||||
);
|
||||
|
||||
TRACE_EVENT(ext4_journal_start,
|
||||
TP_PROTO(struct super_block *sb, int nblocks, unsigned long IP),
|
||||
|
||||
TP_ARGS(sb, nblocks, IP),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( dev_t, dev )
|
||||
__field( int, nblocks )
|
||||
__field(unsigned long, ip )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = sb->s_dev;
|
||||
__entry->nblocks = nblocks;
|
||||
__entry->ip = IP;
|
||||
),
|
||||
|
||||
TP_printk("dev %d,%d nblocks %d caller %pF",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
__entry->nblocks, (void *)__entry->ip)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(ext4__trim,
|
||||
TP_PROTO(struct super_block *sb,
|
||||
ext4_group_t group,
|
||||
ext4_grpblk_t start,
|
||||
ext4_grpblk_t len),
|
||||
|
||||
TP_ARGS(sb, group, start, len),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( int, dev_major )
|
||||
__field( int, dev_minor )
|
||||
__field( __u32, group )
|
||||
__field( int, start )
|
||||
__field( int, len )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev_major = MAJOR(sb->s_dev);
|
||||
__entry->dev_minor = MINOR(sb->s_dev);
|
||||
__entry->group = group;
|
||||
__entry->start = start;
|
||||
__entry->len = len;
|
||||
),
|
||||
|
||||
TP_printk("dev %d,%d group %u, start %d, len %d",
|
||||
__entry->dev_major, __entry->dev_minor,
|
||||
__entry->group, __entry->start, __entry->len)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ext4__trim, ext4_trim_extent,
|
||||
|
||||
TP_PROTO(struct super_block *sb,
|
||||
ext4_group_t group,
|
||||
ext4_grpblk_t start,
|
||||
ext4_grpblk_t len),
|
||||
|
||||
TP_ARGS(sb, group, start, len)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(ext4__trim, ext4_trim_all_free,
|
||||
|
||||
TP_PROTO(struct super_block *sb,
|
||||
ext4_group_t group,
|
||||
ext4_grpblk_t start,
|
||||
ext4_grpblk_t len),
|
||||
|
||||
TP_ARGS(sb, group, start, len)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_EXT4_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
|
|
@ -26,8 +26,8 @@ TRACE_EVENT(jbd2_checkpoint,
|
|||
__entry->result = result;
|
||||
),
|
||||
|
||||
TP_printk("dev %s result %d",
|
||||
jbd2_dev_to_name(__entry->dev), __entry->result)
|
||||
TP_printk("dev %d,%d result %d",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->result)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(jbd2_commit,
|
||||
|
@ -48,9 +48,9 @@ DECLARE_EVENT_CLASS(jbd2_commit,
|
|||
__entry->transaction = commit_transaction->t_tid;
|
||||
),
|
||||
|
||||
TP_printk("dev %s transaction %d sync %d",
|
||||
jbd2_dev_to_name(__entry->dev), __entry->transaction,
|
||||
__entry->sync_commit)
|
||||
TP_printk("dev %d,%d transaction %d sync %d",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
__entry->transaction, __entry->sync_commit)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(jbd2_commit, jbd2_start_commit,
|
||||
|
@ -100,9 +100,9 @@ TRACE_EVENT(jbd2_end_commit,
|
|||
__entry->head = journal->j_tail_sequence;
|
||||
),
|
||||
|
||||
TP_printk("dev %s transaction %d sync %d head %d",
|
||||
jbd2_dev_to_name(__entry->dev), __entry->transaction,
|
||||
__entry->sync_commit, __entry->head)
|
||||
TP_printk("dev %d,%d transaction %d sync %d head %d",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
__entry->transaction, __entry->sync_commit, __entry->head)
|
||||
);
|
||||
|
||||
TRACE_EVENT(jbd2_submit_inode_data,
|
||||
|
@ -120,8 +120,9 @@ TRACE_EVENT(jbd2_submit_inode_data,
|
|||
__entry->ino = inode->i_ino;
|
||||
),
|
||||
|
||||
TP_printk("dev %s ino %lu",
|
||||
jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino)
|
||||
TP_printk("dev %d,%d ino %lu",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
(unsigned long) __entry->ino)
|
||||
);
|
||||
|
||||
TRACE_EVENT(jbd2_run_stats,
|
||||
|
@ -156,9 +157,9 @@ TRACE_EVENT(jbd2_run_stats,
|
|||
__entry->blocks_logged = stats->rs_blocks_logged;
|
||||
),
|
||||
|
||||
TP_printk("dev %s tid %lu wait %u running %u locked %u flushing %u "
|
||||
TP_printk("dev %d,%d tid %lu wait %u running %u locked %u flushing %u "
|
||||
"logging %u handle_count %u blocks %u blocks_logged %u",
|
||||
jbd2_dev_to_name(__entry->dev), __entry->tid,
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
|
||||
jiffies_to_msecs(__entry->wait),
|
||||
jiffies_to_msecs(__entry->running),
|
||||
jiffies_to_msecs(__entry->locked),
|
||||
|
@ -192,9 +193,9 @@ TRACE_EVENT(jbd2_checkpoint_stats,
|
|||
__entry->dropped = stats->cs_dropped;
|
||||
),
|
||||
|
||||
TP_printk("dev %s tid %lu chp_time %u forced_to_close %u "
|
||||
TP_printk("dev %d,%d tid %lu chp_time %u forced_to_close %u "
|
||||
"written %u dropped %u",
|
||||
jbd2_dev_to_name(__entry->dev), __entry->tid,
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid,
|
||||
jiffies_to_msecs(__entry->chp_time),
|
||||
__entry->forced_to_close, __entry->written, __entry->dropped)
|
||||
);
|
||||
|
@ -222,9 +223,10 @@ TRACE_EVENT(jbd2_cleanup_journal_tail,
|
|||
__entry->freed = freed;
|
||||
),
|
||||
|
||||
TP_printk("dev %s from %u to %u offset %lu freed %lu",
|
||||
jbd2_dev_to_name(__entry->dev), __entry->tail_sequence,
|
||||
__entry->first_tid, __entry->block_nr, __entry->freed)
|
||||
TP_printk("dev %d,%d from %u to %u offset %lu freed %lu",
|
||||
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||
__entry->tail_sequence, __entry->first_tid,
|
||||
__entry->block_nr, __entry->freed)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_JBD2_H */
|
||||
|
|
Loading…
Reference in New Issue