md/raid0: fix up bio splitting.

raid0_make_request() should use a private bio_set rather than the
shared fs_bio_set, which is only meant for filesystems to use.

raid0_make_request() shouldn't loop around using the bio_set
multiple times as that can deadlock.

So use mddev->bio_set and pass the tail to generic_make_request()
instead of looping on it.

Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
This commit is contained in:
NeilBrown 2017-04-05 14:05:51 +10:00 committed by Shaohua Li
parent 868f604b1d
commit f00d7c85be
1 changed files with 36 additions and 35 deletions

View File

@ -462,52 +462,53 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
{ {
struct strip_zone *zone; struct strip_zone *zone;
struct md_rdev *tmp_dev; struct md_rdev *tmp_dev;
struct bio *split; sector_t bio_sector;
sector_t sector;
unsigned chunk_sects;
unsigned sectors;
if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
md_flush_request(mddev, bio); md_flush_request(mddev, bio);
return; return;
} }
do { bio_sector = bio->bi_iter.bi_sector;
sector_t bio_sector = bio->bi_iter.bi_sector; sector = bio_sector;
sector_t sector = bio_sector; chunk_sects = mddev->chunk_sectors;
unsigned chunk_sects = mddev->chunk_sectors;
unsigned sectors = chunk_sects - sectors = chunk_sects -
(likely(is_power_of_2(chunk_sects)) (likely(is_power_of_2(chunk_sects))
? (sector & (chunk_sects-1)) ? (sector & (chunk_sects-1))
: sector_div(sector, chunk_sects)); : sector_div(sector, chunk_sects));
/* Restore due to sector_div */ /* Restore due to sector_div */
sector = bio_sector; sector = bio_sector;
if (sectors < bio_sectors(bio)) { if (sectors < bio_sectors(bio)) {
split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set); struct bio *split = bio_split(bio, sectors, GFP_NOIO, mddev->bio_set);
bio_chain(split, bio); bio_chain(split, bio);
} else { generic_make_request(bio);
split = bio; bio = split;
} }
zone = find_zone(mddev->private, &sector); zone = find_zone(mddev->private, &sector);
tmp_dev = map_sector(mddev, zone, sector, &sector); tmp_dev = map_sector(mddev, zone, sector, &sector);
split->bi_bdev = tmp_dev->bdev; bio->bi_bdev = tmp_dev->bdev;
split->bi_iter.bi_sector = sector + zone->dev_start + bio->bi_iter.bi_sector = sector + zone->dev_start +
tmp_dev->data_offset; tmp_dev->data_offset;
if (unlikely((bio_op(split) == REQ_OP_DISCARD) && if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(split->bi_bdev)))) { !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
/* Just ignore it */ /* Just ignore it */
bio_endio(split); bio_endio(bio);
} else { } else {
if (mddev->gendisk) if (mddev->gendisk)
trace_block_bio_remap(bdev_get_queue(split->bi_bdev), trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
split, disk_devt(mddev->gendisk), bio, disk_devt(mddev->gendisk),
bio_sector); bio_sector);
mddev_check_writesame(mddev, split); mddev_check_writesame(mddev, bio);
generic_make_request(split); generic_make_request(bio);
} }
} while (split != bio);
} }
static void raid0_status(struct seq_file *seq, struct mddev *mddev) static void raid0_status(struct seq_file *seq, struct mddev *mddev)