Merge branch 'cleanups-for-4.1-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux into for-linus-4.1
This commit is contained in:
commit
9deed229fa
|
@ -1206,7 +1206,7 @@ int btrfs_check_shared(struct btrfs_trans_handle *trans,
|
|||
struct ulist *roots = NULL;
|
||||
struct ulist_iterator uiter;
|
||||
struct ulist_node *node;
|
||||
struct seq_list elem = {};
|
||||
struct seq_list elem = SEQ_LIST_INIT(elem);
|
||||
int ret = 0;
|
||||
|
||||
tmp = ulist_alloc(GFP_NOFS);
|
||||
|
@ -1610,7 +1610,7 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
|
|||
struct ulist *roots = NULL;
|
||||
struct ulist_node *ref_node = NULL;
|
||||
struct ulist_node *root_node = NULL;
|
||||
struct seq_list tree_mod_seq_elem = {};
|
||||
struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
|
||||
struct ulist_iterator ref_uiter;
|
||||
struct ulist_iterator root_uiter;
|
||||
|
||||
|
|
|
@ -2990,8 +2990,8 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio)
|
|||
(unsigned long long)bio->bi_iter.bi_sector,
|
||||
dev_bytenr, bio->bi_bdev);
|
||||
|
||||
mapped_datav = kmalloc(sizeof(*mapped_datav) * bio->bi_vcnt,
|
||||
GFP_NOFS);
|
||||
mapped_datav = kmalloc_array(bio->bi_vcnt,
|
||||
sizeof(*mapped_datav), GFP_NOFS);
|
||||
if (!mapped_datav)
|
||||
goto leave;
|
||||
cur_bytenr = dev_bytenr;
|
||||
|
|
|
@ -622,7 +622,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
|||
cb->orig_bio = bio;
|
||||
|
||||
nr_pages = DIV_ROUND_UP(compressed_len, PAGE_CACHE_SIZE);
|
||||
cb->compressed_pages = kzalloc(sizeof(struct page *) * nr_pages,
|
||||
cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
|
||||
GFP_NOFS);
|
||||
if (!cb->compressed_pages)
|
||||
goto fail1;
|
||||
|
|
|
@ -578,7 +578,7 @@ tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
|
|||
if (!tree_mod_need_log(fs_info, eb))
|
||||
return 0;
|
||||
|
||||
tm_list = kzalloc(nr_items * sizeof(struct tree_mod_elem *), flags);
|
||||
tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), flags);
|
||||
if (!tm_list)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -677,7 +677,7 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
|
|||
|
||||
if (log_removal && btrfs_header_level(old_root) > 0) {
|
||||
nritems = btrfs_header_nritems(old_root);
|
||||
tm_list = kzalloc(nritems * sizeof(struct tree_mod_elem *),
|
||||
tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
|
||||
flags);
|
||||
if (!tm_list) {
|
||||
ret = -ENOMEM;
|
||||
|
@ -814,7 +814,7 @@ tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
|
|||
if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
|
||||
return 0;
|
||||
|
||||
tm_list = kzalloc(nr_items * 2 * sizeof(struct tree_mod_elem *),
|
||||
tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
|
||||
GFP_NOFS);
|
||||
if (!tm_list)
|
||||
return -ENOMEM;
|
||||
|
@ -905,8 +905,7 @@ tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
|
|||
return 0;
|
||||
|
||||
nritems = btrfs_header_nritems(eb);
|
||||
tm_list = kzalloc(nritems * sizeof(struct tree_mod_elem *),
|
||||
GFP_NOFS);
|
||||
tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
|
||||
if (!tm_list)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -1329,6 +1329,8 @@ struct seq_list {
|
|||
u64 seq;
|
||||
};
|
||||
|
||||
#define SEQ_LIST_INIT(name) { .list = LIST_HEAD_INIT((name).list), .seq = 0 }
|
||||
|
||||
enum btrfs_orphan_cleanup_state {
|
||||
ORPHAN_CLEANUP_STARTED = 1,
|
||||
ORPHAN_CLEANUP_DONE = 2,
|
||||
|
|
|
@ -670,8 +670,8 @@ void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info,
|
|||
case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
|
||||
case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
|
||||
srcdev = dev_replace->srcdev;
|
||||
args->status.progress_1000 = div64_u64(dev_replace->cursor_left,
|
||||
div64_u64(btrfs_device_get_total_bytes(srcdev), 1000));
|
||||
args->status.progress_1000 = div_u64(dev_replace->cursor_left,
|
||||
div_u64(btrfs_device_get_total_bytes(srcdev), 1000));
|
||||
break;
|
||||
}
|
||||
btrfs_dev_replace_unlock(dev_replace);
|
||||
|
@ -806,7 +806,7 @@ static int btrfs_dev_replace_kthread(void *data)
|
|||
btrfs_dev_replace_status(fs_info, status_args);
|
||||
progress = status_args->status.progress_1000;
|
||||
kfree(status_args);
|
||||
do_div(progress, 10);
|
||||
progress = div_u64(progress, 10);
|
||||
printk_in_rcu(KERN_INFO
|
||||
"BTRFS: continuing dev_replace from %s (devid %llu) to %s @%u%%\n",
|
||||
dev_replace->srcdev->missing ? "<missing disk>" :
|
||||
|
|
|
@ -302,7 +302,7 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
|
|||
offset += cur_len;
|
||||
}
|
||||
if (csum_size > sizeof(inline_result)) {
|
||||
result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
|
||||
result = kzalloc(csum_size, GFP_NOFS);
|
||||
if (!result)
|
||||
return 1;
|
||||
} else {
|
||||
|
@ -2276,7 +2276,7 @@ int open_ctree(struct super_block *sb,
|
|||
fs_info->free_chunk_space = 0;
|
||||
fs_info->tree_mod_log = RB_ROOT;
|
||||
fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
|
||||
fs_info->avg_delayed_ref_runtime = div64_u64(NSEC_PER_SEC, 64);
|
||||
fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */
|
||||
/* readahead state */
|
||||
INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
|
||||
spin_lock_init(&fs_info->reada_lock);
|
||||
|
|
|
@ -2561,8 +2561,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
|
|||
*/
|
||||
spin_lock(&delayed_refs->lock);
|
||||
avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
|
||||
avg = div64_u64(avg, 4);
|
||||
fs_info->avg_delayed_ref_runtime = avg;
|
||||
fs_info->avg_delayed_ref_runtime = avg >> 2; /* div by 4 */
|
||||
spin_unlock(&delayed_refs->lock);
|
||||
}
|
||||
return 0;
|
||||
|
@ -2624,7 +2623,7 @@ static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
|
|||
* We don't ever fill up leaves all the way so multiply by 2 just to be
|
||||
* closer to what we're really going to want to ouse.
|
||||
*/
|
||||
return div64_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
|
||||
return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
|
||||
}
|
||||
|
||||
int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
|
||||
|
@ -3193,7 +3192,7 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
|
|||
struct inode *inode = NULL;
|
||||
u64 alloc_hint = 0;
|
||||
int dcs = BTRFS_DC_ERROR;
|
||||
int num_pages = 0;
|
||||
u64 num_pages = 0;
|
||||
int retries = 0;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -3293,7 +3292,7 @@ again:
|
|||
* taking up quite a bit since it's not folded into the other space
|
||||
* cache.
|
||||
*/
|
||||
num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
|
||||
num_pages = div_u64(block_group->key.offset, 256 * 1024 * 1024);
|
||||
if (!num_pages)
|
||||
num_pages = 1;
|
||||
|
||||
|
@ -4812,10 +4811,10 @@ static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
|
|||
|
||||
num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
|
||||
csum_size * 2;
|
||||
num_bytes += div64_u64(data_used + meta_used, 50);
|
||||
num_bytes += div_u64(data_used + meta_used, 50);
|
||||
|
||||
if (num_bytes * 3 > meta_used)
|
||||
num_bytes = div64_u64(meta_used, 3);
|
||||
num_bytes = div_u64(meta_used, 3);
|
||||
|
||||
return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10);
|
||||
}
|
||||
|
@ -5075,16 +5074,16 @@ static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
|
|||
BTRFS_I(inode)->csum_bytes == 0)
|
||||
return 0;
|
||||
|
||||
old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
|
||||
old_csums = (int)div_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
|
||||
if (reserve)
|
||||
BTRFS_I(inode)->csum_bytes += num_bytes;
|
||||
else
|
||||
BTRFS_I(inode)->csum_bytes -= num_bytes;
|
||||
csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
|
||||
num_csums_per_leaf = (int)div64_u64(csum_size,
|
||||
num_csums_per_leaf = (int)div_u64(csum_size,
|
||||
sizeof(struct btrfs_csum_item) +
|
||||
sizeof(struct btrfs_disk_key));
|
||||
num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
|
||||
num_csums = (int)div_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
|
||||
num_csums = num_csums + num_csums_per_leaf - 1;
|
||||
num_csums = num_csums / num_csums_per_leaf;
|
||||
|
||||
|
@ -8720,7 +8719,7 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
|
|||
min_free <<= 1;
|
||||
} else if (index == BTRFS_RAID_RAID0) {
|
||||
dev_min = fs_devices->rw_devices;
|
||||
do_div(min_free, dev_min);
|
||||
min_free = div64_u64(min_free, dev_min);
|
||||
}
|
||||
|
||||
/* We need to do this so that we can look at pending chunks */
|
||||
|
|
|
@ -185,8 +185,8 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
|
|||
nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
|
||||
if (!dst) {
|
||||
if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
|
||||
btrfs_bio->csum_allocated = kmalloc(nblocks * csum_size,
|
||||
GFP_NOFS);
|
||||
btrfs_bio->csum_allocated = kmalloc_array(nblocks,
|
||||
csum_size, GFP_NOFS);
|
||||
if (!btrfs_bio->csum_allocated) {
|
||||
btrfs_free_path(path);
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -273,11 +273,7 @@ void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
|
|||
defrag = rb_entry(node, struct inode_defrag, rb_node);
|
||||
kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
|
||||
|
||||
if (need_resched()) {
|
||||
spin_unlock(&fs_info->defrag_inodes_lock);
|
||||
cond_resched();
|
||||
spin_lock(&fs_info->defrag_inodes_lock);
|
||||
}
|
||||
cond_resched_lock(&fs_info->defrag_inodes_lock);
|
||||
|
||||
node = rb_first(&fs_info->defrag_inodes);
|
||||
}
|
||||
|
@ -1485,7 +1481,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
|
|||
PAGE_CACHE_SIZE / (sizeof(struct page *)));
|
||||
nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
|
||||
nrptrs = max(nrptrs, 8);
|
||||
pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
|
||||
pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
|
||||
if (!pages)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1635,8 +1631,8 @@ again:
|
|||
btrfs_end_write_no_snapshoting(root);
|
||||
|
||||
if (only_release_metadata && copied > 0) {
|
||||
u64 lockstart = round_down(pos, root->sectorsize);
|
||||
u64 lockend = lockstart +
|
||||
lockstart = round_down(pos, root->sectorsize);
|
||||
lockend = lockstart +
|
||||
(dirty_pages << PAGE_CACHE_SHIFT) - 1;
|
||||
|
||||
set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
|
||||
|
|
|
@ -298,7 +298,7 @@ static int io_ctl_init(struct io_ctl *io_ctl, struct inode *inode,
|
|||
|
||||
memset(io_ctl, 0, sizeof(struct io_ctl));
|
||||
|
||||
io_ctl->pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS);
|
||||
io_ctl->pages = kcalloc(num_pages, sizeof(struct page *), GFP_NOFS);
|
||||
if (!io_ctl->pages)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1298,11 +1298,11 @@ static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
|
|||
u64 offset)
|
||||
{
|
||||
u64 bitmap_start;
|
||||
u64 bytes_per_bitmap;
|
||||
u32 bytes_per_bitmap;
|
||||
|
||||
bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
|
||||
bitmap_start = offset - ctl->start;
|
||||
bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
|
||||
bitmap_start = div_u64(bitmap_start, bytes_per_bitmap);
|
||||
bitmap_start *= bytes_per_bitmap;
|
||||
bitmap_start += ctl->start;
|
||||
|
||||
|
@ -1521,10 +1521,10 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
|
|||
u64 bitmap_bytes;
|
||||
u64 extent_bytes;
|
||||
u64 size = block_group->key.offset;
|
||||
u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
|
||||
int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
|
||||
u32 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
|
||||
u32 max_bitmaps = div_u64(size + bytes_per_bg - 1, bytes_per_bg);
|
||||
|
||||
max_bitmaps = max(max_bitmaps, 1);
|
||||
max_bitmaps = max_t(u32, max_bitmaps, 1);
|
||||
|
||||
ASSERT(ctl->total_bitmaps <= max_bitmaps);
|
||||
|
||||
|
@ -1537,7 +1537,7 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
|
|||
max_bytes = MAX_CACHE_BYTES_PER_GIG;
|
||||
else
|
||||
max_bytes = MAX_CACHE_BYTES_PER_GIG *
|
||||
div64_u64(size, 1024 * 1024 * 1024);
|
||||
div_u64(size, 1024 * 1024 * 1024);
|
||||
|
||||
/*
|
||||
* we want to account for 1 more bitmap than what we have so we can make
|
||||
|
@ -1552,14 +1552,14 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
|
|||
}
|
||||
|
||||
/*
|
||||
* we want the extent entry threshold to always be at most 1/2 the maxw
|
||||
* we want the extent entry threshold to always be at most 1/2 the max
|
||||
* bytes we can have, or whatever is less than that.
|
||||
*/
|
||||
extent_bytes = max_bytes - bitmap_bytes;
|
||||
extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2));
|
||||
extent_bytes = min_t(u64, extent_bytes, max_bytes >> 1);
|
||||
|
||||
ctl->extents_thresh =
|
||||
div64_u64(extent_bytes, (sizeof(struct btrfs_free_space)));
|
||||
div_u64(extent_bytes, sizeof(struct btrfs_free_space));
|
||||
}
|
||||
|
||||
static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
|
||||
|
@ -1673,7 +1673,7 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
|
|||
*/
|
||||
if (*bytes >= align) {
|
||||
tmp = entry->offset - ctl->start + align - 1;
|
||||
do_div(tmp, align);
|
||||
tmp = div64_u64(tmp, align);
|
||||
tmp = tmp * align + ctl->start;
|
||||
align_off = tmp - entry->offset;
|
||||
} else {
|
||||
|
@ -2402,11 +2402,8 @@ static void __btrfs_remove_free_space_cache_locked(
|
|||
} else {
|
||||
free_bitmap(ctl, info);
|
||||
}
|
||||
if (need_resched()) {
|
||||
spin_unlock(&ctl->tree_lock);
|
||||
cond_resched();
|
||||
spin_lock(&ctl->tree_lock);
|
||||
}
|
||||
|
||||
cond_resched_lock(&ctl->tree_lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2431,11 +2428,8 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
|
|||
|
||||
WARN_ON(cluster->block_group != block_group);
|
||||
__btrfs_return_cluster_to_free_space(block_group, cluster);
|
||||
if (need_resched()) {
|
||||
spin_unlock(&ctl->tree_lock);
|
||||
cond_resched();
|
||||
spin_lock(&ctl->tree_lock);
|
||||
}
|
||||
|
||||
cond_resched_lock(&ctl->tree_lock);
|
||||
}
|
||||
__btrfs_remove_free_space_cache_locked(ctl);
|
||||
spin_unlock(&ctl->tree_lock);
|
||||
|
|
|
@ -470,7 +470,7 @@ again:
|
|||
*/
|
||||
if (inode_need_compress(inode)) {
|
||||
WARN_ON(pages);
|
||||
pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
|
||||
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
|
||||
if (!pages) {
|
||||
/* just bail out to the uncompressed code */
|
||||
goto cont;
|
||||
|
|
|
@ -1564,7 +1564,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
|
|||
goto out_free;
|
||||
}
|
||||
|
||||
do_div(new_size, root->sectorsize);
|
||||
new_size = div_u64(new_size, root->sectorsize);
|
||||
new_size *= root->sectorsize;
|
||||
|
||||
printk_in_rcu(KERN_INFO "BTRFS: new size for %s is %llu\n",
|
||||
|
@ -3039,7 +3039,7 @@ out:
|
|||
static int check_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
|
||||
u64 disko)
|
||||
{
|
||||
struct seq_list tree_mod_seq_elem = {};
|
||||
struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
|
||||
struct ulist *roots;
|
||||
struct ulist_iterator uiter;
|
||||
struct ulist_node *root_node = NULL;
|
||||
|
|
|
@ -28,8 +28,7 @@ static inline u64 div_factor(u64 num, int factor)
|
|||
if (factor == 10)
|
||||
return num;
|
||||
num *= factor;
|
||||
do_div(num, 10);
|
||||
return num;
|
||||
return div_u64(num, 10);
|
||||
}
|
||||
|
||||
static inline u64 div_factor_fine(u64 num, int factor)
|
||||
|
@ -37,8 +36,7 @@ static inline u64 div_factor_fine(u64 num, int factor)
|
|||
if (factor == 100)
|
||||
return num;
|
||||
num *= factor;
|
||||
do_div(num, 100);
|
||||
return num;
|
||||
return div_u64(num, 100);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1845,7 +1845,7 @@ static int qgroup_shared_accounting(struct btrfs_trans_handle *trans,
|
|||
struct ulist *roots = NULL;
|
||||
struct ulist *qgroups, *tmp;
|
||||
struct btrfs_qgroup *qgroup;
|
||||
struct seq_list elem = {};
|
||||
struct seq_list elem = SEQ_LIST_INIT(elem);
|
||||
u64 seq;
|
||||
int old_roots = 0;
|
||||
int new_roots = 0;
|
||||
|
@ -1967,7 +1967,7 @@ static int qgroup_subtree_accounting(struct btrfs_trans_handle *trans,
|
|||
int err;
|
||||
struct btrfs_qgroup *qg;
|
||||
u64 root_obj = 0;
|
||||
struct seq_list elem = {};
|
||||
struct seq_list elem = SEQ_LIST_INIT(elem);
|
||||
|
||||
parents = ulist_alloc(GFP_NOFS);
|
||||
if (!parents)
|
||||
|
@ -2522,7 +2522,7 @@ qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
|
|||
{
|
||||
struct btrfs_key found;
|
||||
struct ulist *roots = NULL;
|
||||
struct seq_list tree_mod_seq_elem = {};
|
||||
struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
|
||||
u64 num_bytes;
|
||||
u64 seq;
|
||||
int new_roots;
|
||||
|
|
|
@ -1807,8 +1807,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
|
|||
int err;
|
||||
int i;
|
||||
|
||||
pointers = kzalloc(rbio->real_stripes * sizeof(void *),
|
||||
GFP_NOFS);
|
||||
pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
|
||||
if (!pointers) {
|
||||
err = -ENOMEM;
|
||||
goto cleanup_io;
|
||||
|
|
|
@ -964,9 +964,8 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
|
|||
* the statistics.
|
||||
*/
|
||||
|
||||
sblocks_for_recheck = kzalloc(BTRFS_MAX_MIRRORS *
|
||||
sizeof(*sblocks_for_recheck),
|
||||
GFP_NOFS);
|
||||
sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
|
||||
sizeof(*sblocks_for_recheck), GFP_NOFS);
|
||||
if (!sblocks_for_recheck) {
|
||||
spin_lock(&sctx->stat_lock);
|
||||
sctx->stat.malloc_errors++;
|
||||
|
@ -2319,7 +2318,7 @@ static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
|
|||
unsigned long *bitmap,
|
||||
u64 start, u64 len)
|
||||
{
|
||||
int offset;
|
||||
u32 offset;
|
||||
int nsectors;
|
||||
int sectorsize = sparity->sctx->dev_root->sectorsize;
|
||||
|
||||
|
@ -2329,7 +2328,7 @@ static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
|
|||
}
|
||||
|
||||
start -= sparity->logic_start;
|
||||
offset = (int)do_div(start, sparity->stripe_len);
|
||||
start = div_u64_rem(start, sparity->stripe_len, &offset);
|
||||
offset /= sectorsize;
|
||||
nsectors = (int)len / sectorsize;
|
||||
|
||||
|
@ -2612,8 +2611,8 @@ static int get_raid56_logic_offset(u64 physical, int num,
|
|||
int j = 0;
|
||||
u64 stripe_nr;
|
||||
u64 last_offset;
|
||||
int stripe_index;
|
||||
int rot;
|
||||
u32 stripe_index;
|
||||
u32 rot;
|
||||
|
||||
last_offset = (physical - map->stripes[num].physical) *
|
||||
nr_data_stripes(map);
|
||||
|
@ -2624,12 +2623,11 @@ static int get_raid56_logic_offset(u64 physical, int num,
|
|||
for (i = 0; i < nr_data_stripes(map); i++) {
|
||||
*offset = last_offset + i * map->stripe_len;
|
||||
|
||||
stripe_nr = *offset;
|
||||
do_div(stripe_nr, map->stripe_len);
|
||||
do_div(stripe_nr, nr_data_stripes(map));
|
||||
stripe_nr = div_u64(*offset, map->stripe_len);
|
||||
stripe_nr = div_u64(stripe_nr, nr_data_stripes(map));
|
||||
|
||||
/* Work out the disk rotation on this stripe-set */
|
||||
rot = do_div(stripe_nr, map->num_stripes);
|
||||
stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
|
||||
/* calculate which stripe this data locates */
|
||||
rot += i;
|
||||
stripe_index = rot % map->num_stripes;
|
||||
|
@ -2995,10 +2993,9 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
|
|||
int extent_mirror_num;
|
||||
int stop_loop = 0;
|
||||
|
||||
nstripes = length;
|
||||
physical = map->stripes[num].physical;
|
||||
offset = 0;
|
||||
do_div(nstripes, map->stripe_len);
|
||||
nstripes = div_u64(length, map->stripe_len);
|
||||
if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
|
||||
offset = map->stripe_len * num;
|
||||
increment = map->stripe_len * map->num_stripes;
|
||||
|
|
|
@ -1714,7 +1714,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
|
|||
avail_space = device->total_bytes - device->bytes_used;
|
||||
|
||||
/* align with stripe_len */
|
||||
do_div(avail_space, BTRFS_STRIPE_LEN);
|
||||
avail_space = div_u64(avail_space, BTRFS_STRIPE_LEN);
|
||||
avail_space *= BTRFS_STRIPE_LEN;
|
||||
|
||||
/*
|
||||
|
|
|
@ -61,11 +61,23 @@ static struct btrfs_feature_attr btrfs_attr_##_name = { \
|
|||
BTRFS_FEAT_ATTR(name, FEAT_INCOMPAT, BTRFS_FEATURE_INCOMPAT, feature)
|
||||
|
||||
/* convert from attribute */
|
||||
#define to_btrfs_feature_attr(a) \
|
||||
container_of(a, struct btrfs_feature_attr, kobj_attr)
|
||||
#define attr_to_btrfs_attr(a) container_of(a, struct kobj_attribute, attr)
|
||||
#define attr_to_btrfs_feature_attr(a) \
|
||||
to_btrfs_feature_attr(attr_to_btrfs_attr(a))
|
||||
static inline struct btrfs_feature_attr *
|
||||
to_btrfs_feature_attr(struct kobj_attribute *a)
|
||||
{
|
||||
return container_of(a, struct btrfs_feature_attr, kobj_attr);
|
||||
}
|
||||
|
||||
static inline struct kobj_attribute *attr_to_btrfs_attr(struct attribute *attr)
|
||||
{
|
||||
return container_of(attr, struct kobj_attribute, attr);
|
||||
}
|
||||
|
||||
static inline struct btrfs_feature_attr *
|
||||
attr_to_btrfs_feature_attr(struct attribute *attr)
|
||||
{
|
||||
return to_btrfs_feature_attr(attr_to_btrfs_attr(attr));
|
||||
}
|
||||
|
||||
char *btrfs_printable_features(enum btrfs_feature_set set, u64 flags);
|
||||
extern const char * const btrfs_feature_set_names[3];
|
||||
extern struct kobj_type space_info_ktype;
|
||||
|
|
|
@ -93,11 +93,8 @@ static void clear_btree_io_tree(struct extent_io_tree *tree)
|
|||
*/
|
||||
ASSERT(!waitqueue_active(&state->wq));
|
||||
free_extent_state(state);
|
||||
if (need_resched()) {
|
||||
spin_unlock(&tree->lock);
|
||||
cond_resched();
|
||||
spin_lock(&tree->lock);
|
||||
}
|
||||
|
||||
cond_resched_lock(&tree->lock);
|
||||
}
|
||||
spin_unlock(&tree->lock);
|
||||
}
|
||||
|
|
|
@ -366,8 +366,8 @@ loop_lock:
|
|||
btrfsic_submit_bio(cur->bi_rw, cur);
|
||||
num_run++;
|
||||
batch_run++;
|
||||
if (need_resched())
|
||||
cond_resched();
|
||||
|
||||
cond_resched();
|
||||
|
||||
/*
|
||||
* we made progress, there is more work to do and the bdi
|
||||
|
@ -400,8 +400,7 @@ loop_lock:
|
|||
* against it before looping
|
||||
*/
|
||||
last_waited = ioc->last_waited;
|
||||
if (need_resched())
|
||||
cond_resched();
|
||||
cond_resched();
|
||||
continue;
|
||||
}
|
||||
spin_lock(&device->io_lock);
|
||||
|
@ -3022,7 +3021,7 @@ static int chunk_drange_filter(struct extent_buffer *leaf,
|
|||
|
||||
stripe_offset = btrfs_stripe_offset(leaf, stripe);
|
||||
stripe_length = btrfs_chunk_length(leaf, chunk);
|
||||
do_div(stripe_length, factor);
|
||||
stripe_length = div_u64(stripe_length, factor);
|
||||
|
||||
if (stripe_offset < bargs->pend &&
|
||||
stripe_offset + stripe_length > bargs->pstart)
|
||||
|
@ -4289,7 +4288,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
|
|||
max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
|
||||
max_chunk_size);
|
||||
|
||||
devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
|
||||
devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
|
||||
GFP_NOFS);
|
||||
if (!devices_info)
|
||||
return -ENOMEM;
|
||||
|
@ -4400,8 +4399,8 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
|
|||
*/
|
||||
if (stripe_size * data_stripes > max_chunk_size) {
|
||||
u64 mask = (1ULL << 24) - 1;
|
||||
stripe_size = max_chunk_size;
|
||||
do_div(stripe_size, data_stripes);
|
||||
|
||||
stripe_size = div_u64(max_chunk_size, data_stripes);
|
||||
|
||||
/* bump the answer up to a 16MB boundary */
|
||||
stripe_size = (stripe_size + mask) & ~mask;
|
||||
|
@ -4413,10 +4412,10 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
|
|||
stripe_size = devices_info[ndevs-1].max_avail;
|
||||
}
|
||||
|
||||
do_div(stripe_size, dev_stripes);
|
||||
stripe_size = div_u64(stripe_size, dev_stripes);
|
||||
|
||||
/* align to BTRFS_STRIPE_LEN */
|
||||
do_div(stripe_size, raid_stripe_len);
|
||||
stripe_size = div_u64(stripe_size, raid_stripe_len);
|
||||
stripe_size *= raid_stripe_len;
|
||||
|
||||
map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
|
||||
|
@ -4954,7 +4953,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
|||
u64 stripe_nr_orig;
|
||||
u64 stripe_nr_end;
|
||||
u64 stripe_len;
|
||||
int stripe_index;
|
||||
u32 stripe_index;
|
||||
int i;
|
||||
int ret = 0;
|
||||
int num_stripes;
|
||||
|
@ -4995,7 +4994,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
|||
* stripe_nr counts the total number of stripes we have to stride
|
||||
* to get to this block
|
||||
*/
|
||||
do_div(stripe_nr, stripe_len);
|
||||
stripe_nr = div64_u64(stripe_nr, stripe_len);
|
||||
|
||||
stripe_offset = stripe_nr * stripe_len;
|
||||
BUG_ON(offset < stripe_offset);
|
||||
|
@ -5011,7 +5010,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
|||
/* allow a write of a full stripe, but make sure we don't
|
||||
* allow straddling of stripes
|
||||
*/
|
||||
do_div(raid56_full_stripe_start, full_stripe_len);
|
||||
raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
|
||||
full_stripe_len);
|
||||
raid56_full_stripe_start *= full_stripe_len;
|
||||
}
|
||||
|
||||
|
@ -5136,7 +5136,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
|||
stripe_index = 0;
|
||||
stripe_nr_orig = stripe_nr;
|
||||
stripe_nr_end = ALIGN(offset + *length, map->stripe_len);
|
||||
do_div(stripe_nr_end, map->stripe_len);
|
||||
stripe_nr_end = div_u64(stripe_nr_end, map->stripe_len);
|
||||
stripe_end_offset = stripe_nr_end * map->stripe_len -
|
||||
(offset + *length);
|
||||
|
||||
|
@ -5144,7 +5144,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
|||
if (rw & REQ_DISCARD)
|
||||
num_stripes = min_t(u64, map->num_stripes,
|
||||
stripe_nr_end - stripe_nr_orig);
|
||||
stripe_index = do_div(stripe_nr, map->num_stripes);
|
||||
stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
|
||||
&stripe_index);
|
||||
if (!(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)))
|
||||
mirror_num = 1;
|
||||
} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
|
||||
|
@ -5170,9 +5171,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
|||
}
|
||||
|
||||
} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
|
||||
int factor = map->num_stripes / map->sub_stripes;
|
||||
u32 factor = map->num_stripes / map->sub_stripes;
|
||||
|
||||
stripe_index = do_div(stripe_nr, factor);
|
||||
stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
|
||||
stripe_index *= map->sub_stripes;
|
||||
|
||||
if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
|
||||
|
@ -5198,8 +5199,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
|||
((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
|
||||
mirror_num > 1)) {
|
||||
/* push stripe_nr back to the start of the full stripe */
|
||||
stripe_nr = raid56_full_stripe_start;
|
||||
do_div(stripe_nr, stripe_len * nr_data_stripes(map));
|
||||
stripe_nr = div_u64(raid56_full_stripe_start,
|
||||
stripe_len * nr_data_stripes(map));
|
||||
|
||||
/* RAID[56] write or recovery. Return all stripes */
|
||||
num_stripes = map->num_stripes;
|
||||
|
@ -5209,32 +5210,32 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
|||
stripe_index = 0;
|
||||
stripe_offset = 0;
|
||||
} else {
|
||||
u64 tmp;
|
||||
|
||||
/*
|
||||
* Mirror #0 or #1 means the original data block.
|
||||
* Mirror #2 is RAID5 parity block.
|
||||
* Mirror #3 is RAID6 Q block.
|
||||
*/
|
||||
stripe_index = do_div(stripe_nr, nr_data_stripes(map));
|
||||
stripe_nr = div_u64_rem(stripe_nr,
|
||||
nr_data_stripes(map), &stripe_index);
|
||||
if (mirror_num > 1)
|
||||
stripe_index = nr_data_stripes(map) +
|
||||
mirror_num - 2;
|
||||
|
||||
/* We distribute the parity blocks across stripes */
|
||||
tmp = stripe_nr + stripe_index;
|
||||
stripe_index = do_div(tmp, map->num_stripes);
|
||||
div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
|
||||
&stripe_index);
|
||||
if (!(rw & (REQ_WRITE | REQ_DISCARD |
|
||||
REQ_GET_READ_MIRRORS)) && mirror_num <= 1)
|
||||
mirror_num = 1;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* after this do_div call, stripe_nr is the number of stripes
|
||||
* on this device we have to walk to find the data, and
|
||||
* stripe_index is the number of our device in the stripe array
|
||||
* after this, stripe_nr is the number of stripes on this
|
||||
* device we have to walk to find the data, and stripe_index is
|
||||
* the number of our device in the stripe array
|
||||
*/
|
||||
stripe_index = do_div(stripe_nr, map->num_stripes);
|
||||
stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
|
||||
&stripe_index);
|
||||
mirror_num = stripe_index + 1;
|
||||
}
|
||||
BUG_ON(stripe_index >= map->num_stripes);
|
||||
|
@ -5261,7 +5262,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
|||
need_raid_map && ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
|
||||
mirror_num > 1)) {
|
||||
u64 tmp;
|
||||
int i, rot;
|
||||
unsigned rot;
|
||||
|
||||
bbio->raid_map = (u64 *)((void *)bbio->stripes +
|
||||
sizeof(struct btrfs_bio_stripe) *
|
||||
|
@ -5269,8 +5270,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
|||
sizeof(int) * tgtdev_indexes);
|
||||
|
||||
/* Work out the disk rotation on this stripe-set */
|
||||
tmp = stripe_nr;
|
||||
rot = do_div(tmp, num_stripes);
|
||||
div_u64_rem(stripe_nr, num_stripes, &rot);
|
||||
|
||||
/* Fill in the logical address of each stripe */
|
||||
tmp = stripe_nr * nr_data_stripes(map);
|
||||
|
@ -5285,8 +5285,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
|||
}
|
||||
|
||||
if (rw & REQ_DISCARD) {
|
||||
int factor = 0;
|
||||
int sub_stripes = 0;
|
||||
u32 factor = 0;
|
||||
u32 sub_stripes = 0;
|
||||
u64 stripes_per_dev = 0;
|
||||
u32 remaining_stripes = 0;
|
||||
u32 last_stripe = 0;
|
||||
|
@ -5437,9 +5437,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
|||
}
|
||||
}
|
||||
if (found) {
|
||||
u64 length = map->stripe_len;
|
||||
|
||||
if (physical_of_found + length <=
|
||||
if (physical_of_found + map->stripe_len <=
|
||||
dev_replace->cursor_left) {
|
||||
struct btrfs_bio_stripe *tgtdev_stripe =
|
||||
bbio->stripes + num_stripes;
|
||||
|
@ -5535,15 +5533,15 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
|
|||
rmap_len = map->stripe_len;
|
||||
|
||||
if (map->type & BTRFS_BLOCK_GROUP_RAID10)
|
||||
do_div(length, map->num_stripes / map->sub_stripes);
|
||||
length = div_u64(length, map->num_stripes / map->sub_stripes);
|
||||
else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
|
||||
do_div(length, map->num_stripes);
|
||||
length = div_u64(length, map->num_stripes);
|
||||
else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
|
||||
do_div(length, nr_data_stripes(map));
|
||||
length = div_u64(length, nr_data_stripes(map));
|
||||
rmap_len = map->stripe_len * nr_data_stripes(map);
|
||||
}
|
||||
|
||||
buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
|
||||
buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
|
||||
BUG_ON(!buf); /* -ENOMEM */
|
||||
|
||||
for (i = 0; i < map->num_stripes; i++) {
|
||||
|
@ -5554,11 +5552,11 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
|
|||
continue;
|
||||
|
||||
stripe_nr = physical - map->stripes[i].physical;
|
||||
do_div(stripe_nr, map->stripe_len);
|
||||
stripe_nr = div_u64(stripe_nr, map->stripe_len);
|
||||
|
||||
if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
|
||||
stripe_nr = stripe_nr * map->num_stripes + i;
|
||||
do_div(stripe_nr, map->sub_stripes);
|
||||
stripe_nr = div_u64(stripe_nr, map->sub_stripes);
|
||||
} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
|
||||
stripe_nr = stripe_nr * map->num_stripes + i;
|
||||
} /* else if RAID[56], multiply by nr_data_stripes().
|
||||
|
|
Loading…
Reference in New Issue