block: Remove partition support for zoned block devices

No known partitioning tool supports zoned block devices, especially the
host managed flavor with strong sequential write constraints.
Furthermore, there are also no known user nor use cases for partitioned
zoned block devices.

This patch removes partition device creation for zoned block devices,
which allows simplifying the processing of zone commands for zoned
block devices. A warning is added if a partition table is found on the
device.

For report zones operations no zone sector information remapping is
necessary anymore, simplifying the code. Of note is that remapping of
zone reports for DM targets is still necessary as done by
dm_remap_zone_report().

Similarly, remaping of a zone reset bio is not necessary anymore.
Testing for the applicability of the zone reset all request also becomes
simpler and only needs to check that the number of sectors of the
requested zone range is equal to the disk capacity.

Reviewed-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Damien Le Moal 2019-11-11 11:39:25 +09:00 committed by Jens Axboe
parent ceeb373aa6
commit 5eac3eb30c
4 changed files with 21 additions and 124 deletions

View File

@ -851,11 +851,7 @@ static inline int blk_partition_remap(struct bio *bio)
if (unlikely(bio_check_ro(bio, p))) if (unlikely(bio_check_ro(bio, p)))
goto out; goto out;
/* if (bio_sectors(bio)) {
* Zone management bios do not have a sector count but they do have
* a start sector filled out and need to be remapped.
*/
if (bio_sectors(bio) || op_is_zone_mgmt(bio_op(bio))) {
if (bio_check_eod(bio, part_nr_sects_read(p))) if (bio_check_eod(bio, part_nr_sects_read(p)))
goto out; goto out;
bio->bi_iter.bi_sector += p->start_sect; bio->bi_iter.bi_sector += p->start_sect;

View File

@ -93,32 +93,10 @@ unsigned int blkdev_nr_zones(struct block_device *bdev)
if (!blk_queue_is_zoned(q)) if (!blk_queue_is_zoned(q))
return 0; return 0;
return __blkdev_nr_zones(q, bdev->bd_part->nr_sects); return __blkdev_nr_zones(q, get_capacity(bdev->bd_disk));
} }
EXPORT_SYMBOL_GPL(blkdev_nr_zones); EXPORT_SYMBOL_GPL(blkdev_nr_zones);
/*
* Check that a zone report belongs to this partition, and if yes, fix its start
* sector and write pointer and return true. Return false otherwise.
*/
static bool blkdev_report_zone(struct block_device *bdev, struct blk_zone *rep)
{
sector_t offset = get_start_sect(bdev);
if (rep->start < offset)
return false;
rep->start -= offset;
if (rep->start + rep->len > bdev->bd_part->nr_sects)
return false;
if (rep->type == BLK_ZONE_TYPE_CONVENTIONAL)
rep->wp = rep->start + rep->len;
else
rep->wp -= offset;
return true;
}
/** /**
* blkdev_report_zones - Get zones information * blkdev_report_zones - Get zones information
* @bdev: Target block device * @bdev: Target block device
@ -140,8 +118,7 @@ int blkdev_report_zones(struct block_device *bdev, sector_t sector,
{ {
struct request_queue *q = bdev_get_queue(bdev); struct request_queue *q = bdev_get_queue(bdev);
struct gendisk *disk = bdev->bd_disk; struct gendisk *disk = bdev->bd_disk;
unsigned int i, nrz; sector_t capacity = get_capacity(disk);
int ret;
if (!blk_queue_is_zoned(q)) if (!blk_queue_is_zoned(q))
return -EOPNOTSUPP; return -EOPNOTSUPP;
@ -154,27 +131,14 @@ int blkdev_report_zones(struct block_device *bdev, sector_t sector,
if (WARN_ON_ONCE(!disk->fops->report_zones)) if (WARN_ON_ONCE(!disk->fops->report_zones))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (!*nr_zones || sector >= bdev->bd_part->nr_sects) { if (!*nr_zones || sector >= capacity) {
*nr_zones = 0; *nr_zones = 0;
return 0; return 0;
} }
nrz = min(*nr_zones, *nr_zones = min(*nr_zones, __blkdev_nr_zones(q, capacity - sector));
__blkdev_nr_zones(q, bdev->bd_part->nr_sects - sector));
ret = disk->fops->report_zones(disk, get_start_sect(bdev) + sector,
zones, &nrz);
if (ret)
return ret;
for (i = 0; i < nrz; i++) { return disk->fops->report_zones(disk, sector, zones, nr_zones);
if (!blkdev_report_zone(bdev, zones))
break;
zones++;
}
*nr_zones = i;
return 0;
} }
EXPORT_SYMBOL_GPL(blkdev_report_zones); EXPORT_SYMBOL_GPL(blkdev_report_zones);
@ -185,15 +149,11 @@ static inline bool blkdev_allow_reset_all_zones(struct block_device *bdev,
if (!blk_queue_zone_resetall(bdev_get_queue(bdev))) if (!blk_queue_zone_resetall(bdev_get_queue(bdev)))
return false; return false;
if (sector || nr_sectors != part_nr_sects_read(bdev->bd_part))
return false;
/* /*
* REQ_OP_ZONE_RESET_ALL can be executed only if the block device is * REQ_OP_ZONE_RESET_ALL can be executed only if the number of sectors
* the entire disk, that is, if the blocks device start offset is 0 and * of the applicable zone range is the entire disk.
* its capacity is the same as the entire disk.
*/ */
return get_start_sect(bdev) == 0 && return !sector && nr_sectors == get_capacity(bdev->bd_disk);
part_nr_sects_read(bdev->bd_part) == get_capacity(bdev->bd_disk);
} }
/** /**
@ -218,6 +178,7 @@ int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
{ {
struct request_queue *q = bdev_get_queue(bdev); struct request_queue *q = bdev_get_queue(bdev);
sector_t zone_sectors = blk_queue_zone_sectors(q); sector_t zone_sectors = blk_queue_zone_sectors(q);
sector_t capacity = get_capacity(bdev->bd_disk);
sector_t end_sector = sector + nr_sectors; sector_t end_sector = sector + nr_sectors;
struct bio *bio = NULL; struct bio *bio = NULL;
int ret; int ret;
@ -231,7 +192,7 @@ int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
if (!op_is_zone_mgmt(op)) if (!op_is_zone_mgmt(op))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (!nr_sectors || end_sector > bdev->bd_part->nr_sects) if (!nr_sectors || end_sector > capacity)
/* Out of range */ /* Out of range */
return -EINVAL; return -EINVAL;
@ -239,8 +200,7 @@ int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
if (sector & (zone_sectors - 1)) if (sector & (zone_sectors - 1))
return -EINVAL; return -EINVAL;
if ((nr_sectors & (zone_sectors - 1)) && if ((nr_sectors & (zone_sectors - 1)) && end_sector != capacity)
end_sector != bdev->bd_part->nr_sects)
return -EINVAL; return -EINVAL;
while (sector < end_sector) { while (sector < end_sector) {

View File

@ -459,56 +459,6 @@ static int drop_partitions(struct gendisk *disk, struct block_device *bdev)
return 0; return 0;
} }
static bool part_zone_aligned(struct gendisk *disk,
struct block_device *bdev,
sector_t from, sector_t size)
{
unsigned int zone_sectors = bdev_zone_sectors(bdev);
/*
* If this function is called, then the disk is a zoned block device
* (host-aware or host-managed). This can be detected even if the
* zoned block device support is disabled (CONFIG_BLK_DEV_ZONED not
* set). In this case, however, only host-aware devices will be seen
* as a block device is not created for host-managed devices. Without
* zoned block device support, host-aware drives can still be used as
* regular block devices (no zone operation) and their zone size will
* be reported as 0. Allow this case.
*/
if (!zone_sectors)
return true;
/*
* Check partition start and size alignement. If the drive has a
* smaller last runt zone, ignore it and allow the partition to
* use it. Check the zone size too: it should be a power of 2 number
* of sectors.
*/
if (WARN_ON_ONCE(!is_power_of_2(zone_sectors))) {
u32 rem;
div_u64_rem(from, zone_sectors, &rem);
if (rem)
return false;
if ((from + size) < get_capacity(disk)) {
div_u64_rem(size, zone_sectors, &rem);
if (rem)
return false;
}
} else {
if (from & (zone_sectors - 1))
return false;
if ((from + size) < get_capacity(disk) &&
(size & (zone_sectors - 1)))
return false;
}
return true;
}
int rescan_partitions(struct gendisk *disk, struct block_device *bdev) int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
{ {
struct parsed_partitions *state = NULL; struct parsed_partitions *state = NULL;
@ -544,6 +494,14 @@ rescan:
} }
return -EIO; return -EIO;
} }
/* Partitions are not supported on zoned block devices */
if (bdev_is_zoned(bdev)) {
pr_warn("%s: ignoring partition table on zoned block device\n",
disk->disk_name);
goto out;
}
/* /*
* If any partition code tried to read beyond EOD, try * If any partition code tried to read beyond EOD, try
* unlocking native capacity even if partition table is * unlocking native capacity even if partition table is
@ -607,21 +565,6 @@ rescan:
} }
} }
/*
* On a zoned block device, partitions should be aligned on the
* device zone size (i.e. zone boundary crossing not allowed).
* Otherwise, resetting the write pointer of the last zone of
* one partition may impact the following partition.
*/
if (bdev_is_zoned(bdev) &&
!part_zone_aligned(disk, bdev, from, size)) {
printk(KERN_WARNING
"%s: p%d start %llu+%llu is not zone aligned\n",
disk->disk_name, p, (unsigned long long) from,
(unsigned long long) size);
continue;
}
part = add_partition(disk, p, from, size, part = add_partition(disk, p, from, size,
state->parts[p].flags, state->parts[p].flags,
&state->parts[p].info); &state->parts[p].info);
@ -635,6 +578,7 @@ rescan:
md_autodetect_dev(part_to_dev(part)->devt); md_autodetect_dev(part_to_dev(part)->devt);
#endif #endif
} }
out:
free_partitions(state); free_partitions(state);
return 0; return 0;
} }

View File

@ -1211,9 +1211,6 @@ EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
* The zone descriptors obtained with a zone report indicate * The zone descriptors obtained with a zone report indicate
* zone positions within the underlying device of the target. The zone * zone positions within the underlying device of the target. The zone
* descriptors must be remapped to match their position within the dm device. * descriptors must be remapped to match their position within the dm device.
* The caller target should obtain the zones information using
* blkdev_report_zones() to ensure that remapping for partition offset is
* already handled.
*/ */
void dm_remap_zone_report(struct dm_target *ti, sector_t start, void dm_remap_zone_report(struct dm_target *ti, sector_t start,
struct blk_zone *zones, unsigned int *nr_zones) struct blk_zone *zones, unsigned int *nr_zones)