md: improve handling of bio with REQ_PREFLUSH in md_flush_request()
If pers->make_request fails in md_flush_request(), the bio is lost. To
fix this, pass back a bool to indicate if the original make_request call
should continue to handle the I/O and instead of assuming the flush logic
will push it to completion.
Convert md_flush_request to return a bool and no longer calls the raid
driver's make_request function. If the return is true, then the md flush
logic has or will complete the bio and the md make_request call is done.
If false, then the md make_request function needs to keep processing like
it is a normal bio. Let the original call to md_handle_request handle any
need to retry sending the bio to the raid driver's make_request function
should it be needed.
Also mark md_flush_request and the make_request function pointer as
__must_check to issue warnings should these critical return values be
ignored.
Fixes: 2bc13b83e6
("md: batch flush requests.")
Cc: stable@vger.kernel.org # # v4.19+
Cc: NeilBrown <neilb@suse.com>
Signed-off-by: David Jeffery <djeffery@redhat.com>
Reviewed-by: Xiao Ni <xni@redhat.com>
Signed-off-by: Song Liu <songliubraving@fb.com>
This commit is contained in:
parent
fadcbd2901
commit
775d78319f
|
@ -244,10 +244,9 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
|
|||
sector_t start_sector, end_sector, data_offset;
|
||||
sector_t bio_sector = bio->bi_iter.bi_sector;
|
||||
|
||||
if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
|
||||
md_flush_request(mddev, bio);
|
||||
if (unlikely(bio->bi_opf & REQ_PREFLUSH)
|
||||
&& md_flush_request(mddev, bio))
|
||||
return true;
|
||||
}
|
||||
|
||||
tmp_dev = which_dev(mddev, bio_sector);
|
||||
start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
|
||||
|
|
|
@ -104,10 +104,9 @@ static bool multipath_make_request(struct mddev *mddev, struct bio * bio)
|
|||
struct multipath_bh * mp_bh;
|
||||
struct multipath_info *multipath;
|
||||
|
||||
if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
|
||||
md_flush_request(mddev, bio);
|
||||
if (unlikely(bio->bi_opf & REQ_PREFLUSH)
|
||||
&& md_flush_request(mddev, bio))
|
||||
return true;
|
||||
}
|
||||
|
||||
mp_bh = mempool_alloc(&conf->pool, GFP_NOIO);
|
||||
|
||||
|
|
|
@ -550,7 +550,13 @@ static void md_submit_flush_data(struct work_struct *ws)
|
|||
}
|
||||
}
|
||||
|
||||
void md_flush_request(struct mddev *mddev, struct bio *bio)
|
||||
/*
|
||||
* Manages consolidation of flushes and submitting any flushes needed for
|
||||
* a bio with REQ_PREFLUSH. Returns true if the bio is finished or is
|
||||
* being finished in another context. Returns false if the flushing is
|
||||
* complete but still needs the I/O portion of the bio to be processed.
|
||||
*/
|
||||
bool md_flush_request(struct mddev *mddev, struct bio *bio)
|
||||
{
|
||||
ktime_t start = ktime_get_boottime();
|
||||
spin_lock_irq(&mddev->lock);
|
||||
|
@ -575,9 +581,10 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
|
|||
bio_endio(bio);
|
||||
else {
|
||||
bio->bi_opf &= ~REQ_PREFLUSH;
|
||||
mddev->pers->make_request(mddev, bio);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(md_flush_request);
|
||||
|
||||
|
|
|
@ -550,7 +550,7 @@ struct md_personality
|
|||
int level;
|
||||
struct list_head list;
|
||||
struct module *owner;
|
||||
bool (*make_request)(struct mddev *mddev, struct bio *bio);
|
||||
bool __must_check (*make_request)(struct mddev *mddev, struct bio *bio);
|
||||
/*
|
||||
* start up works that do NOT require md_thread. tasks that
|
||||
* requires md_thread should go into start()
|
||||
|
@ -703,7 +703,7 @@ extern void md_error(struct mddev *mddev, struct md_rdev *rdev);
|
|||
extern void md_finish_reshape(struct mddev *mddev);
|
||||
|
||||
extern int mddev_congested(struct mddev *mddev, int bits);
|
||||
extern void md_flush_request(struct mddev *mddev, struct bio *bio);
|
||||
extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio);
|
||||
extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
|
||||
sector_t sector, int size, struct page *page);
|
||||
extern int md_super_wait(struct mddev *mddev);
|
||||
|
|
|
@ -575,10 +575,9 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
|
|||
unsigned chunk_sects;
|
||||
unsigned sectors;
|
||||
|
||||
if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
|
||||
md_flush_request(mddev, bio);
|
||||
if (unlikely(bio->bi_opf & REQ_PREFLUSH)
|
||||
&& md_flush_request(mddev, bio))
|
||||
return true;
|
||||
}
|
||||
|
||||
if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
|
||||
raid0_handle_discard(mddev, bio);
|
||||
|
|
|
@ -1567,10 +1567,9 @@ static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
|
|||
{
|
||||
sector_t sectors;
|
||||
|
||||
if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
|
||||
md_flush_request(mddev, bio);
|
||||
if (unlikely(bio->bi_opf & REQ_PREFLUSH)
|
||||
&& md_flush_request(mddev, bio))
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* There is a limit to the maximum size, but
|
||||
|
|
|
@ -1525,10 +1525,9 @@ static bool raid10_make_request(struct mddev *mddev, struct bio *bio)
|
|||
int chunk_sects = chunk_mask + 1;
|
||||
int sectors = bio_sectors(bio);
|
||||
|
||||
if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
|
||||
md_flush_request(mddev, bio);
|
||||
if (unlikely(bio->bi_opf & REQ_PREFLUSH)
|
||||
&& md_flush_request(mddev, bio))
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!md_write_start(mddev, bio))
|
||||
return false;
|
||||
|
|
|
@ -5592,8 +5592,8 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
|
|||
if (ret == 0)
|
||||
return true;
|
||||
if (ret == -ENODEV) {
|
||||
md_flush_request(mddev, bi);
|
||||
return true;
|
||||
if (md_flush_request(mddev, bi))
|
||||
return true;
|
||||
}
|
||||
/* ret == -EAGAIN, fallback */
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue