2018-04-04 01:16:55 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
2013-01-30 07:40:14 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2012 Fusion-io All rights reserved.
|
|
|
|
* Copyright (C) 2012 Intel Corp. All rights reserved.
|
|
|
|
*/
|
|
|
|
|
2018-04-04 01:16:55 +08:00
|
|
|
#ifndef BTRFS_RAID56_H
|
|
|
|
#define BTRFS_RAID56_H
|
|
|
|
|
2022-06-01 17:46:59 +08:00
|
|
|
#include <linux/workqueue.h>
|
|
|
|
#include "volumes.h"
|
|
|
|
|
|
|
|
enum btrfs_rbio_ops {
|
|
|
|
BTRFS_RBIO_WRITE,
|
|
|
|
BTRFS_RBIO_READ_REBUILD,
|
|
|
|
BTRFS_RBIO_PARITY_SCRUB,
|
|
|
|
BTRFS_RBIO_REBUILD_MISSING,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct btrfs_raid_bio {
|
|
|
|
struct btrfs_io_context *bioc;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* While we're doing RMW on a stripe we put it into a hash table so we
|
|
|
|
* can lock the stripe and merge more rbios into it.
|
|
|
|
*/
|
|
|
|
struct list_head hash_list;
|
|
|
|
|
|
|
|
/* LRU list for the stripe cache */
|
|
|
|
struct list_head stripe_cache;
|
|
|
|
|
|
|
|
/* For scheduling work in the helper threads */
|
|
|
|
struct work_struct work;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* bio_list and bio_list_lock are used to add more bios into the stripe
|
|
|
|
* in hopes of avoiding the full RMW
|
|
|
|
*/
|
|
|
|
struct bio_list bio_list;
|
|
|
|
spinlock_t bio_list_lock;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Also protected by the bio_list_lock, the plug list is used by the
|
|
|
|
* plugging code to collect partial bios while plugged. The stripe
|
|
|
|
* locking code also uses it to hand off the stripe lock to the next
|
|
|
|
* pending IO.
|
|
|
|
*/
|
|
|
|
struct list_head plug_list;
|
|
|
|
|
|
|
|
/* Flags that tell us if it is safe to merge with this bio. */
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set if we're doing a parity rebuild for a read from higher up, which
|
|
|
|
* is handled differently from a parity rebuild as part of RMW.
|
|
|
|
*/
|
|
|
|
enum btrfs_rbio_ops operation;
|
|
|
|
|
|
|
|
/* How many pages there are for the full stripe including P/Q */
|
|
|
|
u16 nr_pages;
|
|
|
|
|
|
|
|
/* How many sectors there are for the full stripe including P/Q */
|
|
|
|
u16 nr_sectors;
|
|
|
|
|
|
|
|
/* Number of data stripes (no p/q) */
|
|
|
|
u8 nr_data;
|
|
|
|
|
|
|
|
/* Numer of all stripes (including P/Q) */
|
|
|
|
u8 real_stripes;
|
|
|
|
|
|
|
|
/* How many pages there are for each stripe */
|
|
|
|
u8 stripe_npages;
|
|
|
|
|
|
|
|
/* How many sectors there are for each stripe */
|
|
|
|
u8 stripe_nsectors;
|
|
|
|
|
|
|
|
/* First bad stripe, -1 means no corruption */
|
|
|
|
s8 faila;
|
|
|
|
|
|
|
|
/* Second bad stripe (for RAID6 use) */
|
|
|
|
s8 failb;
|
|
|
|
|
|
|
|
/* Stripe number that we're scrubbing */
|
|
|
|
u8 scrubp;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Size of all the bios in the bio_list. This helps us decide if the
|
|
|
|
* rbio maps to a full stripe or not.
|
|
|
|
*/
|
|
|
|
int bio_list_bytes;
|
|
|
|
|
|
|
|
refcount_t refs;
|
|
|
|
|
|
|
|
atomic_t stripes_pending;
|
|
|
|
|
|
|
|
atomic_t error;
|
|
|
|
|
2022-11-01 19:16:05 +08:00
|
|
|
wait_queue_head_t io_wait;
|
|
|
|
|
2022-06-01 17:46:59 +08:00
|
|
|
/* Bitmap to record which horizontal stripe has data */
|
|
|
|
unsigned long dbitmap;
|
|
|
|
|
|
|
|
/* Allocated with stripe_nsectors-many bits for finish_*() calls */
|
|
|
|
unsigned long finish_pbitmap;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* These are two arrays of pointers. We allocate the rbio big enough
|
|
|
|
* to hold them both and setup their locations when the rbio is
|
|
|
|
* allocated.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Pointers to pages that we allocated for reading/writing stripes
|
|
|
|
* directly from the disk (including P/Q).
|
|
|
|
*/
|
|
|
|
struct page **stripe_pages;
|
|
|
|
|
|
|
|
/* Pointers to the sectors in the bio_list, for faster lookup */
|
|
|
|
struct sector_ptr *bio_sectors;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For subpage support, we need to map each sector to above
|
|
|
|
* stripe_pages.
|
|
|
|
*/
|
|
|
|
struct sector_ptr *stripe_sectors;
|
|
|
|
|
|
|
|
/* Allocated with real_stripes-many pointers for finish_*() calls */
|
|
|
|
void **finish_pointers;
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For trace event usage only. Records useful debug info for each bio submitted
|
|
|
|
* by RAID56 to each physical device.
|
|
|
|
*
|
|
|
|
* No matter signed or not, (-1) is always the one indicating we can not grab
|
|
|
|
* the proper stripe number.
|
|
|
|
*/
|
|
|
|
struct raid56_bio_trace_info {
|
|
|
|
u64 devid;
|
|
|
|
|
|
|
|
/* The offset inside the stripe. (<= STRIPE_LEN) */
|
|
|
|
u32 offset;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Stripe number.
|
|
|
|
* 0 is the first data stripe, and nr_data for P stripe,
|
|
|
|
* nr_data + 1 for Q stripe.
|
|
|
|
* >= real_stripes for
|
|
|
|
*/
|
|
|
|
u8 stripe_nr;
|
|
|
|
};
|
|
|
|
|
2019-05-17 17:43:43 +08:00
|
|
|
static inline int nr_data_stripes(const struct map_lookup *map)
|
2013-01-30 07:40:14 +08:00
|
|
|
{
|
2022-05-13 16:34:30 +08:00
|
|
|
return map->num_stripes - btrfs_nr_parity_stripes(map->type);
|
2013-01-30 07:40:14 +08:00
|
|
|
}
|
2022-06-01 17:46:59 +08:00
|
|
|
|
2013-01-30 07:40:14 +08:00
|
|
|
#define RAID5_P_STRIPE ((u64)-2)
|
|
|
|
#define RAID6_Q_STRIPE ((u64)-1)
|
|
|
|
|
|
|
|
#define is_parity_stripe(x) (((x) == RAID5_P_STRIPE) || \
|
|
|
|
((x) == RAID6_Q_STRIPE))
|
|
|
|
|
Btrfs, raid56: support parity scrub on raid56
The implementation is:
- Read and check all the data with checksum in the same stripe.
All the data which has checksum is COW data, and we are sure
that it is not changed though we don't lock the stripe. because
the space of that data just can be reclaimed after the current
transction is committed, and then the fs can use it to store the
other data, but when doing scrub, we hold the current transaction,
that is that data can not be recovered, it is safe that read and check
it out of the stripe lock.
- Lock the stripe
- Read out all the data without checksum and parity
The data without checksum and the parity may be changed if we don't
lock the stripe, so we need read it in the stripe lock context.
- Check the parity
- Re-calculate the new parity and write back it if the old parity
is not right
- Unlock the stripe
If we can not read out the data or the data we read is corrupted,
we will try to repair it. If the repair fails. we will mark the
horizontal sub-stripe(pages on the same horizontal) as corrupted
sub-stripe, and we will skip the parity check and repair of that
horizontal sub-stripe.
And in order to skip the horizontal sub-stripe that has no data, we
introduce a bitmap. If there is some data on the horizontal sub-stripe,
we will the relative bit to 1, and when we check and repair the
parity, we will skip those horizontal sub-stripes that the relative
bits is 0.
Signed-off-by: Miao Xie <miaox@cn.fujitsu.com>
2014-11-06 17:20:58 +08:00
|
|
|
struct btrfs_device;
|
|
|
|
|
2022-06-17 18:04:09 +08:00
|
|
|
void raid56_parity_recover(struct bio *bio, struct btrfs_io_context *bioc,
|
2022-08-06 16:03:25 +08:00
|
|
|
int mirror_num);
|
2022-06-17 18:04:08 +08:00
|
|
|
void raid56_parity_write(struct bio *bio, struct btrfs_io_context *bioc);
|
2013-01-30 07:40:14 +08:00
|
|
|
|
2015-06-20 02:52:50 +08:00
|
|
|
void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
|
2022-04-01 19:23:26 +08:00
|
|
|
unsigned int pgoff, u64 logical);
|
2015-06-20 02:52:50 +08:00
|
|
|
|
2021-09-23 14:00:09 +08:00
|
|
|
struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio,
|
2022-06-17 18:04:05 +08:00
|
|
|
struct btrfs_io_context *bioc,
|
2021-09-23 14:00:09 +08:00
|
|
|
struct btrfs_device *scrub_dev,
|
|
|
|
unsigned long *dbitmap, int stripe_nsectors);
|
Btrfs, raid56: support parity scrub on raid56
The implementation is:
- Read and check all the data with checksum in the same stripe.
All the data which has checksum is COW data, and we are sure
that it is not changed though we don't lock the stripe. because
the space of that data just can be reclaimed after the current
transction is committed, and then the fs can use it to store the
other data, but when doing scrub, we hold the current transaction,
that is that data can not be recovered, it is safe that read and check
it out of the stripe lock.
- Lock the stripe
- Read out all the data without checksum and parity
The data without checksum and the parity may be changed if we don't
lock the stripe, so we need read it in the stripe lock context.
- Check the parity
- Re-calculate the new parity and write back it if the old parity
is not right
- Unlock the stripe
If we can not read out the data or the data we read is corrupted,
we will try to repair it. If the repair fails. we will mark the
horizontal sub-stripe(pages on the same horizontal) as corrupted
sub-stripe, and we will skip the parity check and repair of that
horizontal sub-stripe.
And in order to skip the horizontal sub-stripe that has no data, we
introduce a bitmap. If there is some data on the horizontal sub-stripe,
we will the relative bit to 1, and when we check and repair the
parity, we will skip those horizontal sub-stripes that the relative
bits is 0.
Signed-off-by: Miao Xie <miaox@cn.fujitsu.com>
2014-11-06 17:20:58 +08:00
|
|
|
void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio);
|
|
|
|
|
2015-06-20 02:52:50 +08:00
|
|
|
struct btrfs_raid_bio *
|
2022-06-17 18:04:05 +08:00
|
|
|
raid56_alloc_missing_rbio(struct bio *bio, struct btrfs_io_context *bioc);
|
2015-06-20 02:52:50 +08:00
|
|
|
void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio);
|
|
|
|
|
2013-01-30 07:40:14 +08:00
|
|
|
int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info);
|
|
|
|
void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info);
|
2018-04-04 01:16:55 +08:00
|
|
|
|
2013-01-30 07:40:14 +08:00
|
|
|
#endif
|