Btrfs: Introduce BTRFS_BLOCK_GROUP_RAID56_MASK to check raid56 simply
So we can check raid56 with: (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) instead of long: (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) Signed-off-by: Zhao Lei <zhaolei@cn.fujitsu.com> Signed-off-by: Miao Xie <miaox@cn.fujitsu.com> Signed-off-by: Chris Mason <clm@fb.com>
This commit is contained in:
parent
10f1190016
commit
ffe2d2034b
|
@ -1020,6 +1020,9 @@ enum btrfs_raid_types {
|
|||
BTRFS_BLOCK_GROUP_RAID6 | \
|
||||
BTRFS_BLOCK_GROUP_DUP | \
|
||||
BTRFS_BLOCK_GROUP_RAID10)
|
||||
#define BTRFS_BLOCK_GROUP_RAID56_MASK (BTRFS_BLOCK_GROUP_RAID5 | \
|
||||
BTRFS_BLOCK_GROUP_RAID6)
|
||||
|
||||
/*
|
||||
* We need a bit for restriper to be able to tell when chunks of type
|
||||
* SINGLE are available. This "extended" profile format is used in
|
||||
|
|
|
@ -7812,8 +7812,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
|
|||
}
|
||||
|
||||
/* async crcs make it difficult to collect full stripe writes. */
|
||||
if (btrfs_get_alloc_profile(root, 1) &
|
||||
(BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6))
|
||||
if (btrfs_get_alloc_profile(root, 1) & BTRFS_BLOCK_GROUP_RAID56_MASK)
|
||||
async_submit = 0;
|
||||
else
|
||||
async_submit = 1;
|
||||
|
|
|
@ -1273,7 +1273,7 @@ static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
|
|||
{
|
||||
int i;
|
||||
|
||||
if (map_type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
|
||||
if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
|
||||
/* RAID5/6 */
|
||||
for (i = 0; i < nstripes; i++) {
|
||||
if (raid_map[i] == RAID6_Q_STRIPE ||
|
||||
|
@ -1420,8 +1420,7 @@ static void scrub_bio_wait_endio(struct bio *bio, int error)
|
|||
static inline int scrub_is_page_on_raid56(struct scrub_page *page)
|
||||
{
|
||||
return page->recover &&
|
||||
(page->recover->bbio->map_type & (BTRFS_BLOCK_GROUP_RAID5 |
|
||||
BTRFS_BLOCK_GROUP_RAID6));
|
||||
(page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
|
||||
}
|
||||
|
||||
static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
|
||||
|
@ -2994,8 +2993,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
|
|||
} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
|
||||
increment = map->stripe_len;
|
||||
mirror_num = num % map->num_stripes + 1;
|
||||
} else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
|
||||
BTRFS_BLOCK_GROUP_RAID6)) {
|
||||
} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
|
||||
get_raid56_logic_offset(physical, num, map, &offset, NULL);
|
||||
increment = map->stripe_len * nr_data_stripes(map);
|
||||
mirror_num = 1;
|
||||
|
@ -3029,8 +3027,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
|
|||
*/
|
||||
logical = base + offset;
|
||||
physical_end = physical + nstripes * map->stripe_len;
|
||||
if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
|
||||
BTRFS_BLOCK_GROUP_RAID6)) {
|
||||
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
|
||||
get_raid56_logic_offset(physical_end, num,
|
||||
map, &logic_end, NULL);
|
||||
logic_end += base;
|
||||
|
@ -3076,8 +3073,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
|
|||
ret = 0;
|
||||
while (physical < physical_end) {
|
||||
/* for raid56, we skip parity stripe */
|
||||
if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
|
||||
BTRFS_BLOCK_GROUP_RAID6)) {
|
||||
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
|
||||
ret = get_raid56_logic_offset(physical, num,
|
||||
map, &logical, &stripe_logical);
|
||||
logical += base;
|
||||
|
@ -3235,8 +3231,7 @@ again:
|
|||
scrub_free_csums(sctx);
|
||||
if (extent_logical + extent_len <
|
||||
key.objectid + bytes) {
|
||||
if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
|
||||
BTRFS_BLOCK_GROUP_RAID6)) {
|
||||
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
|
||||
/*
|
||||
* loop until we find next data stripe
|
||||
* or we have finished all stripes.
|
||||
|
|
|
@ -4196,7 +4196,7 @@ static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
|
|||
|
||||
static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
|
||||
{
|
||||
if (!(type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)))
|
||||
if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
|
||||
return;
|
||||
|
||||
btrfs_set_fs_incompat(info, RAID56);
|
||||
|
@ -4803,10 +4803,8 @@ unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
|
|||
|
||||
BUG_ON(em->start > logical || em->start + em->len < logical);
|
||||
map = (struct map_lookup *)em->bdev;
|
||||
if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
|
||||
BTRFS_BLOCK_GROUP_RAID6)) {
|
||||
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
|
||||
len = map->stripe_len * nr_data_stripes(map);
|
||||
}
|
||||
free_extent_map(em);
|
||||
return len;
|
||||
}
|
||||
|
@ -4826,8 +4824,7 @@ int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
|
|||
|
||||
BUG_ON(em->start > logical || em->start + em->len < logical);
|
||||
map = (struct map_lookup *)em->bdev;
|
||||
if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
|
||||
BTRFS_BLOCK_GROUP_RAID6))
|
||||
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
|
||||
ret = 1;
|
||||
free_extent_map(em);
|
||||
return ret;
|
||||
|
@ -4998,7 +4995,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
|||
stripe_offset = offset - stripe_offset;
|
||||
|
||||
/* if we're here for raid56, we need to know the stripe aligned start */
|
||||
if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
|
||||
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
|
||||
unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
|
||||
raid56_full_stripe_start = offset;
|
||||
|
||||
|
@ -5011,8 +5008,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
|||
|
||||
if (rw & REQ_DISCARD) {
|
||||
/* we don't discard raid56 yet */
|
||||
if (map->type &
|
||||
(BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
|
||||
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
|
||||
ret = -EOPNOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
|
@ -5022,7 +5018,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
|||
/* For writes to RAID[56], allow a full stripeset across all disks.
|
||||
For other RAID types and for RAID[56] reads, just allow a single
|
||||
stripe (on a single disk). */
|
||||
if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6) &&
|
||||
if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
|
||||
(rw & REQ_WRITE)) {
|
||||
max_len = stripe_len * nr_data_stripes(map) -
|
||||
(offset - raid56_full_stripe_start);
|
||||
|
@ -5188,8 +5184,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
|||
mirror_num = stripe_index - old_stripe_index + 1;
|
||||
}
|
||||
|
||||
} else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
|
||||
BTRFS_BLOCK_GROUP_RAID6)) {
|
||||
} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
|
||||
if (need_raid_map &&
|
||||
((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
|
||||
mirror_num > 1)) {
|
||||
|
@ -5253,7 +5248,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
|||
bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes);
|
||||
|
||||
/* build raid_map */
|
||||
if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6) &&
|
||||
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK &&
|
||||
need_raid_map && ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
|
||||
mirror_num > 1)) {
|
||||
u64 tmp;
|
||||
|
@ -5534,8 +5529,7 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
|
|||
do_div(length, map->num_stripes / map->sub_stripes);
|
||||
else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
|
||||
do_div(length, map->num_stripes);
|
||||
else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
|
||||
BTRFS_BLOCK_GROUP_RAID6)) {
|
||||
else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
|
||||
do_div(length, nr_data_stripes(map));
|
||||
rmap_len = map->stripe_len * nr_data_stripes(map);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue