2019-05-01 02:42:43 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2016-10-18 14:40:33 +08:00
|
|
|
/*
|
|
|
|
* Zoned block device handling
|
|
|
|
*
|
|
|
|
* Copyright (c) 2015, Hannes Reinecke
|
|
|
|
* Copyright (c) 2015, SUSE Linux GmbH
|
|
|
|
*
|
|
|
|
* Copyright (c) 2016, Damien Le Moal
|
|
|
|
* Copyright (c) 2016, Western Digital
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/rbtree.h>
|
|
|
|
#include <linux/blkdev.h>
|
2018-10-12 18:08:50 +08:00
|
|
|
#include <linux/blk-mq.h>
|
2019-07-01 13:09:18 +08:00
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/vmalloc.h>
|
2019-07-01 13:09:16 +08:00
|
|
|
#include <linux/sched/mm.h>
|
2016-10-18 14:40:33 +08:00
|
|
|
|
2018-10-12 18:08:47 +08:00
|
|
|
#include "blk.h"
|
|
|
|
|
2020-03-26 01:49:54 +08:00
|
|
|
#define ZONE_COND_NAME(name) [BLK_ZONE_COND_##name] = #name
|
|
|
|
static const char *const zone_cond_name[] = {
|
|
|
|
ZONE_COND_NAME(NOT_WP),
|
|
|
|
ZONE_COND_NAME(EMPTY),
|
|
|
|
ZONE_COND_NAME(IMP_OPEN),
|
|
|
|
ZONE_COND_NAME(EXP_OPEN),
|
|
|
|
ZONE_COND_NAME(CLOSED),
|
|
|
|
ZONE_COND_NAME(READONLY),
|
|
|
|
ZONE_COND_NAME(FULL),
|
|
|
|
ZONE_COND_NAME(OFFLINE),
|
|
|
|
};
|
|
|
|
#undef ZONE_COND_NAME
|
|
|
|
|
|
|
|
/**
|
|
|
|
* blk_zone_cond_str - Return string XXX in BLK_ZONE_COND_XXX.
|
|
|
|
* @zone_cond: BLK_ZONE_COND_XXX.
|
|
|
|
*
|
|
|
|
* Description: Centralize block layer function to convert BLK_ZONE_COND_XXX
|
|
|
|
* into string format. Useful in the debugging and tracing zone conditions. For
|
|
|
|
* invalid BLK_ZONE_COND_XXX it returns string "UNKNOWN".
|
|
|
|
*/
|
|
|
|
const char *blk_zone_cond_str(enum blk_zone_cond zone_cond)
|
|
|
|
{
|
|
|
|
static const char *zone_cond_str = "UNKNOWN";
|
|
|
|
|
|
|
|
if (zone_cond < ARRAY_SIZE(zone_cond_name) && zone_cond_name[zone_cond])
|
|
|
|
zone_cond_str = zone_cond_name[zone_cond];
|
|
|
|
|
|
|
|
return zone_cond_str;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(blk_zone_cond_str);
|
|
|
|
|
2017-12-21 14:43:38 +08:00
|
|
|
/*
|
|
|
|
* Return true if a request is a write requests that needs zone write locking.
|
|
|
|
*/
|
|
|
|
bool blk_req_needs_zone_write_lock(struct request *rq)
|
|
|
|
{
|
|
|
|
if (!rq->q->seq_zones_wlock)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (blk_rq_is_passthrough(rq))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
switch (req_op(rq)) {
|
|
|
|
case REQ_OP_WRITE_ZEROES:
|
|
|
|
case REQ_OP_WRITE_SAME:
|
|
|
|
case REQ_OP_WRITE:
|
|
|
|
return blk_rq_zone_is_seq(rq);
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(blk_req_needs_zone_write_lock);
|
|
|
|
|
2020-05-12 16:55:48 +08:00
|
|
|
bool blk_req_zone_write_trylock(struct request *rq)
|
|
|
|
{
|
|
|
|
unsigned int zno = blk_rq_zone_no(rq);
|
|
|
|
|
|
|
|
if (test_and_set_bit(zno, rq->q->seq_zones_wlock))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED);
|
|
|
|
rq->rq_flags |= RQF_ZONE_WRITE_LOCKED;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(blk_req_zone_write_trylock);
|
|
|
|
|
2017-12-21 14:43:38 +08:00
|
|
|
void __blk_req_zone_write_lock(struct request *rq)
|
|
|
|
{
|
|
|
|
if (WARN_ON_ONCE(test_and_set_bit(blk_rq_zone_no(rq),
|
|
|
|
rq->q->seq_zones_wlock)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED);
|
|
|
|
rq->rq_flags |= RQF_ZONE_WRITE_LOCKED;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(__blk_req_zone_write_lock);
|
|
|
|
|
|
|
|
void __blk_req_zone_write_unlock(struct request *rq)
|
|
|
|
{
|
|
|
|
rq->rq_flags &= ~RQF_ZONE_WRITE_LOCKED;
|
|
|
|
if (rq->q->seq_zones_wlock)
|
|
|
|
WARN_ON_ONCE(!test_and_clear_bit(blk_rq_zone_no(rq),
|
|
|
|
rq->q->seq_zones_wlock));
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(__blk_req_zone_write_unlock);
|
|
|
|
|
2018-10-12 18:08:43 +08:00
|
|
|
/**
|
|
|
|
* blkdev_nr_zones - Get number of zones
|
2019-12-03 17:39:04 +08:00
|
|
|
* @disk: Target gendisk
|
2018-10-12 18:08:43 +08:00
|
|
|
*
|
2019-12-03 17:39:04 +08:00
|
|
|
* Return the total number of zones of a zoned block device. For a block
|
|
|
|
* device without zone capabilities, the number of zones is always 0.
|
2018-10-12 18:08:43 +08:00
|
|
|
*/
|
2019-12-03 17:39:04 +08:00
|
|
|
unsigned int blkdev_nr_zones(struct gendisk *disk)
|
2018-10-12 18:08:43 +08:00
|
|
|
{
|
2019-12-03 17:39:04 +08:00
|
|
|
sector_t zone_sectors = blk_queue_zone_sectors(disk->queue);
|
2018-10-12 18:08:43 +08:00
|
|
|
|
2019-12-03 17:39:04 +08:00
|
|
|
if (!blk_queue_is_zoned(disk->queue))
|
2018-10-12 18:08:43 +08:00
|
|
|
return 0;
|
2019-12-03 17:39:04 +08:00
|
|
|
return (get_capacity(disk) + zone_sectors - 1) >> ilog2(zone_sectors);
|
2018-10-12 18:08:43 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(blkdev_nr_zones);
|
|
|
|
|
2016-10-18 14:40:33 +08:00
|
|
|
/**
|
|
|
|
* blkdev_report_zones - Get zones information
|
|
|
|
* @bdev: Target block device
|
|
|
|
* @sector: Sector from which to report zones
|
2019-11-11 10:39:30 +08:00
|
|
|
* @nr_zones: Maximum number of zones to report
|
|
|
|
* @cb: Callback function called for each reported zone
|
|
|
|
* @data: Private data for the callback
|
2016-10-18 14:40:33 +08:00
|
|
|
*
|
|
|
|
* Description:
|
2019-11-11 10:39:30 +08:00
|
|
|
* Get zone information starting from the zone containing @sector for at most
|
|
|
|
* @nr_zones, and call @cb for each zone reported by the device.
|
|
|
|
* To report all zones in a device starting from @sector, the BLK_ALL_ZONES
|
|
|
|
* constant can be passed to @nr_zones.
|
|
|
|
* Returns the number of zones reported by the device, or a negative errno
|
|
|
|
* value in case of failure.
|
|
|
|
*
|
|
|
|
* Note: The caller must use memalloc_noXX_save/restore() calls to control
|
|
|
|
* memory allocations done within this function.
|
2016-10-18 14:40:33 +08:00
|
|
|
*/
|
2018-10-12 18:08:49 +08:00
|
|
|
int blkdev_report_zones(struct block_device *bdev, sector_t sector,
|
2019-11-11 10:39:30 +08:00
|
|
|
unsigned int nr_zones, report_zones_cb cb, void *data)
|
2016-10-18 14:40:33 +08:00
|
|
|
{
|
2019-11-11 10:39:24 +08:00
|
|
|
struct gendisk *disk = bdev->bd_disk;
|
2019-11-11 10:39:25 +08:00
|
|
|
sector_t capacity = get_capacity(disk);
|
2016-10-18 14:40:33 +08:00
|
|
|
|
2019-11-11 10:39:30 +08:00
|
|
|
if (!blk_queue_is_zoned(bdev_get_queue(bdev)) ||
|
|
|
|
WARN_ON_ONCE(!disk->fops->report_zones))
|
2018-10-12 18:08:49 +08:00
|
|
|
return -EOPNOTSUPP;
|
2016-10-18 14:40:33 +08:00
|
|
|
|
2019-11-11 10:39:30 +08:00
|
|
|
if (!nr_zones || sector >= capacity)
|
2016-10-18 14:40:33 +08:00
|
|
|
return 0;
|
|
|
|
|
2019-11-11 10:39:30 +08:00
|
|
|
return disk->fops->report_zones(disk, sector, nr_zones, cb, data);
|
2016-10-18 14:40:33 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(blkdev_report_zones);
|
|
|
|
|
2021-05-26 05:24:51 +08:00
|
|
|
static inline unsigned long *blk_alloc_zone_bitmap(int node,
|
|
|
|
unsigned int nr_zones)
|
2019-08-02 01:26:36 +08:00
|
|
|
{
|
2021-05-26 05:24:51 +08:00
|
|
|
return kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(unsigned long),
|
|
|
|
GFP_NOIO, node);
|
|
|
|
}
|
2019-08-02 01:26:36 +08:00
|
|
|
|
2021-05-26 05:24:51 +08:00
|
|
|
static int blk_zone_need_reset_cb(struct blk_zone *zone, unsigned int idx,
|
|
|
|
void *data)
|
|
|
|
{
|
2019-08-02 01:26:36 +08:00
|
|
|
/*
|
2021-05-26 05:24:51 +08:00
|
|
|
* For an all-zones reset, ignore conventional, empty, read-only
|
|
|
|
* and offline zones.
|
2019-08-02 01:26:36 +08:00
|
|
|
*/
|
2021-05-26 05:24:51 +08:00
|
|
|
switch (zone->cond) {
|
|
|
|
case BLK_ZONE_COND_NOT_WP:
|
|
|
|
case BLK_ZONE_COND_EMPTY:
|
|
|
|
case BLK_ZONE_COND_READONLY:
|
|
|
|
case BLK_ZONE_COND_OFFLINE:
|
|
|
|
return 0;
|
|
|
|
default:
|
|
|
|
set_bit(idx, (unsigned long *)data);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int blkdev_zone_reset_all_emulated(struct block_device *bdev,
|
|
|
|
gfp_t gfp_mask)
|
|
|
|
{
|
|
|
|
struct request_queue *q = bdev_get_queue(bdev);
|
|
|
|
sector_t capacity = get_capacity(bdev->bd_disk);
|
|
|
|
sector_t zone_sectors = blk_queue_zone_sectors(q);
|
|
|
|
unsigned long *need_reset;
|
|
|
|
struct bio *bio = NULL;
|
|
|
|
sector_t sector = 0;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
need_reset = blk_alloc_zone_bitmap(q->node, q->nr_zones);
|
|
|
|
if (!need_reset)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ret = bdev->bd_disk->fops->report_zones(bdev->bd_disk, 0,
|
|
|
|
q->nr_zones, blk_zone_need_reset_cb,
|
|
|
|
need_reset);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out_free_need_reset;
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
while (sector < capacity) {
|
|
|
|
if (!test_bit(blk_queue_zone_no(q, sector), need_reset)) {
|
|
|
|
sector += zone_sectors;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
bio = blk_next_bio(bio, 0, gfp_mask);
|
|
|
|
bio_set_dev(bio, bdev);
|
|
|
|
bio->bi_opf = REQ_OP_ZONE_RESET | REQ_SYNC;
|
|
|
|
bio->bi_iter.bi_sector = sector;
|
|
|
|
sector += zone_sectors;
|
|
|
|
|
|
|
|
/* This may take a while, so be nice to others */
|
|
|
|
cond_resched();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bio) {
|
|
|
|
ret = submit_bio_wait(bio);
|
|
|
|
bio_put(bio);
|
|
|
|
}
|
|
|
|
|
|
|
|
out_free_need_reset:
|
|
|
|
kfree(need_reset);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int blkdev_zone_reset_all(struct block_device *bdev, gfp_t gfp_mask)
|
|
|
|
{
|
|
|
|
struct bio bio;
|
|
|
|
|
|
|
|
bio_init(&bio, NULL, 0);
|
|
|
|
bio_set_dev(&bio, bdev);
|
|
|
|
bio.bi_opf = REQ_OP_ZONE_RESET_ALL | REQ_SYNC;
|
|
|
|
|
|
|
|
return submit_bio_wait(&bio);
|
2019-08-02 01:26:36 +08:00
|
|
|
}
|
|
|
|
|
2016-10-18 14:40:33 +08:00
|
|
|
/**
|
2019-10-27 22:05:45 +08:00
|
|
|
* blkdev_zone_mgmt - Execute a zone management operation on a range of zones
|
2016-10-18 14:40:33 +08:00
|
|
|
* @bdev: Target block device
|
2019-10-27 22:05:45 +08:00
|
|
|
* @op: Operation to be performed on the zones
|
|
|
|
* @sector: Start sector of the first zone to operate on
|
|
|
|
* @nr_sectors: Number of sectors, should be at least the length of one zone and
|
|
|
|
* must be zone size aligned.
|
2016-10-18 14:40:33 +08:00
|
|
|
* @gfp_mask: Memory allocation flags (for bio_alloc)
|
|
|
|
*
|
|
|
|
* Description:
|
2019-10-27 22:05:45 +08:00
|
|
|
* Perform the specified operation on the range of zones specified by
|
2016-10-18 14:40:33 +08:00
|
|
|
* @sector..@sector+@nr_sectors. Specifying the entire disk sector range
|
|
|
|
* is valid, but the specified range should not contain conventional zones.
|
2019-10-27 22:05:45 +08:00
|
|
|
* The operation to execute on each zone can be a zone reset, open, close
|
|
|
|
* or finish request.
|
2016-10-18 14:40:33 +08:00
|
|
|
*/
|
2019-10-27 22:05:45 +08:00
|
|
|
int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
|
|
|
|
sector_t sector, sector_t nr_sectors,
|
|
|
|
gfp_t gfp_mask)
|
2016-10-18 14:40:33 +08:00
|
|
|
{
|
|
|
|
struct request_queue *q = bdev_get_queue(bdev);
|
2019-10-27 22:05:45 +08:00
|
|
|
sector_t zone_sectors = blk_queue_zone_sectors(q);
|
2019-11-11 10:39:25 +08:00
|
|
|
sector_t capacity = get_capacity(bdev->bd_disk);
|
2016-10-18 14:40:33 +08:00
|
|
|
sector_t end_sector = sector + nr_sectors;
|
2018-10-12 18:08:47 +08:00
|
|
|
struct bio *bio = NULL;
|
2021-05-26 05:24:51 +08:00
|
|
|
int ret = 0;
|
2016-10-18 14:40:33 +08:00
|
|
|
|
|
|
|
if (!blk_queue_is_zoned(q))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2018-10-12 18:08:47 +08:00
|
|
|
if (bdev_read_only(bdev))
|
|
|
|
return -EPERM;
|
|
|
|
|
2019-10-27 22:05:45 +08:00
|
|
|
if (!op_is_zone_mgmt(op))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2020-02-13 01:40:27 +08:00
|
|
|
if (end_sector <= sector || end_sector > capacity)
|
2016-10-18 14:40:33 +08:00
|
|
|
/* Out of range */
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* Check alignment (handle eventual smaller last zone) */
|
|
|
|
if (sector & (zone_sectors - 1))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2019-11-11 10:39:25 +08:00
|
|
|
if ((nr_sectors & (zone_sectors - 1)) && end_sector != capacity)
|
2016-10-18 14:40:33 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2021-05-26 05:24:51 +08:00
|
|
|
/*
|
|
|
|
* In the case of a zone reset operation over all zones,
|
|
|
|
* REQ_OP_ZONE_RESET_ALL can be used with devices supporting this
|
|
|
|
* command. For other devices, we emulate this command behavior by
|
|
|
|
* identifying the zones needing a reset.
|
|
|
|
*/
|
|
|
|
if (op == REQ_OP_ZONE_RESET && sector == 0 && nr_sectors == capacity) {
|
|
|
|
if (!blk_queue_zone_resetall(q))
|
|
|
|
return blkdev_zone_reset_all_emulated(bdev, gfp_mask);
|
|
|
|
return blkdev_zone_reset_all(bdev, gfp_mask);
|
|
|
|
}
|
|
|
|
|
2016-10-18 14:40:33 +08:00
|
|
|
while (sector < end_sector) {
|
2018-10-12 18:08:47 +08:00
|
|
|
bio = blk_next_bio(bio, 0, gfp_mask);
|
2017-08-24 01:10:32 +08:00
|
|
|
bio_set_dev(bio, bdev);
|
2020-01-08 05:58:17 +08:00
|
|
|
bio->bi_opf = op | REQ_SYNC;
|
2019-10-27 22:05:43 +08:00
|
|
|
bio->bi_iter.bi_sector = sector;
|
2016-10-18 14:40:33 +08:00
|
|
|
sector += zone_sectors;
|
|
|
|
|
|
|
|
/* This may take a while, so be nice to others */
|
|
|
|
cond_resched();
|
|
|
|
}
|
|
|
|
|
2018-10-12 18:08:47 +08:00
|
|
|
ret = submit_bio_wait(bio);
|
|
|
|
bio_put(bio);
|
|
|
|
|
|
|
|
return ret;
|
2016-10-18 14:40:33 +08:00
|
|
|
}
|
2019-10-27 22:05:45 +08:00
|
|
|
EXPORT_SYMBOL_GPL(blkdev_zone_mgmt);
|
2016-10-18 14:40:35 +08:00
|
|
|
|
2019-11-11 10:39:30 +08:00
|
|
|
struct zone_report_args {
|
|
|
|
struct blk_zone __user *zones;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int blkdev_copy_zone_to_user(struct blk_zone *zone, unsigned int idx,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
struct zone_report_args *args = data;
|
|
|
|
|
|
|
|
if (copy_to_user(&args->zones[idx], zone, sizeof(struct blk_zone)))
|
|
|
|
return -EFAULT;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-03-09 07:28:50 +08:00
|
|
|
/*
|
2016-10-18 14:40:35 +08:00
|
|
|
* BLKREPORTZONE ioctl processing.
|
|
|
|
* Called from blkdev_ioctl.
|
|
|
|
*/
|
|
|
|
int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
|
|
|
|
unsigned int cmd, unsigned long arg)
|
|
|
|
{
|
|
|
|
void __user *argp = (void __user *)arg;
|
2019-11-11 10:39:30 +08:00
|
|
|
struct zone_report_args args;
|
2016-10-18 14:40:35 +08:00
|
|
|
struct request_queue *q;
|
|
|
|
struct blk_zone_report rep;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!argp)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
q = bdev_get_queue(bdev);
|
|
|
|
if (!q)
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
if (!blk_queue_is_zoned(q))
|
|
|
|
return -ENOTTY;
|
|
|
|
|
|
|
|
if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (!rep.nr_zones)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2019-11-11 10:39:30 +08:00
|
|
|
args.zones = argp + sizeof(struct blk_zone_report);
|
|
|
|
ret = blkdev_report_zones(bdev, rep.sector, rep.nr_zones,
|
|
|
|
blkdev_copy_zone_to_user, &args);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2016-10-18 14:40:35 +08:00
|
|
|
|
2019-11-11 10:39:30 +08:00
|
|
|
rep.nr_zones = ret;
|
2020-06-30 03:06:37 +08:00
|
|
|
rep.flags = BLK_ZONE_REP_CAPACITY;
|
2019-11-11 10:39:30 +08:00
|
|
|
if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report)))
|
|
|
|
return -EFAULT;
|
|
|
|
return 0;
|
2016-10-18 14:40:35 +08:00
|
|
|
}
|
|
|
|
|
2021-03-11 15:25:46 +08:00
|
|
|
static int blkdev_truncate_zone_range(struct block_device *bdev, fmode_t mode,
|
|
|
|
const struct blk_zone_range *zrange)
|
|
|
|
{
|
|
|
|
loff_t start, end;
|
|
|
|
|
|
|
|
if (zrange->sector + zrange->nr_sectors <= zrange->sector ||
|
|
|
|
zrange->sector + zrange->nr_sectors > get_capacity(bdev->bd_disk))
|
|
|
|
/* Out of range */
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
start = zrange->sector << SECTOR_SHIFT;
|
|
|
|
end = ((zrange->sector + zrange->nr_sectors) << SECTOR_SHIFT) - 1;
|
|
|
|
|
|
|
|
return truncate_bdev_range(bdev, mode, start, end);
|
|
|
|
}
|
|
|
|
|
2018-03-09 07:28:50 +08:00
|
|
|
/*
|
2019-10-27 22:05:46 +08:00
|
|
|
* BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing.
|
2016-10-18 14:40:35 +08:00
|
|
|
* Called from blkdev_ioctl.
|
|
|
|
*/
|
2019-10-27 22:05:46 +08:00
|
|
|
int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
|
|
|
|
unsigned int cmd, unsigned long arg)
|
2016-10-18 14:40:35 +08:00
|
|
|
{
|
|
|
|
void __user *argp = (void __user *)arg;
|
|
|
|
struct request_queue *q;
|
|
|
|
struct blk_zone_range zrange;
|
2019-10-27 22:05:46 +08:00
|
|
|
enum req_opf op;
|
2021-03-11 15:25:46 +08:00
|
|
|
int ret;
|
2016-10-18 14:40:35 +08:00
|
|
|
|
|
|
|
if (!argp)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
q = bdev_get_queue(bdev);
|
|
|
|
if (!q)
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
if (!blk_queue_is_zoned(q))
|
|
|
|
return -ENOTTY;
|
|
|
|
|
|
|
|
if (!(mode & FMODE_WRITE))
|
|
|
|
return -EBADF;
|
|
|
|
|
|
|
|
if (copy_from_user(&zrange, argp, sizeof(struct blk_zone_range)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2019-10-27 22:05:46 +08:00
|
|
|
switch (cmd) {
|
|
|
|
case BLKRESETZONE:
|
|
|
|
op = REQ_OP_ZONE_RESET;
|
2021-03-11 15:25:46 +08:00
|
|
|
|
|
|
|
/* Invalidate the page cache, including dirty pages. */
|
|
|
|
ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2019-10-27 22:05:46 +08:00
|
|
|
break;
|
|
|
|
case BLKOPENZONE:
|
|
|
|
op = REQ_OP_ZONE_OPEN;
|
|
|
|
break;
|
|
|
|
case BLKCLOSEZONE:
|
|
|
|
op = REQ_OP_ZONE_CLOSE;
|
|
|
|
break;
|
|
|
|
case BLKFINISHZONE:
|
|
|
|
op = REQ_OP_ZONE_FINISH;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -ENOTTY;
|
|
|
|
}
|
|
|
|
|
2021-03-11 15:25:46 +08:00
|
|
|
ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors,
|
|
|
|
GFP_KERNEL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Invalidate the page cache again for zone reset: writes can only be
|
|
|
|
* direct for zoned devices so concurrent writes would not add any page
|
|
|
|
* to the page cache after/during reset. The page cache may be filled
|
|
|
|
* again due to concurrent reads though and dropping the pages for
|
|
|
|
* these is fine.
|
|
|
|
*/
|
|
|
|
if (!ret && cmd == BLKRESETZONE)
|
|
|
|
ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
|
|
|
|
|
|
|
|
return ret;
|
2016-10-18 14:40:35 +08:00
|
|
|
}
|
2018-10-12 18:08:50 +08:00
|
|
|
|
|
|
|
void blk_queue_free_zone_bitmaps(struct request_queue *q)
|
|
|
|
{
|
2019-12-03 17:39:05 +08:00
|
|
|
kfree(q->conv_zones_bitmap);
|
|
|
|
q->conv_zones_bitmap = NULL;
|
2018-10-12 18:08:50 +08:00
|
|
|
kfree(q->seq_zones_wlock);
|
|
|
|
q->seq_zones_wlock = NULL;
|
|
|
|
}
|
|
|
|
|
2019-11-11 10:39:30 +08:00
|
|
|
struct blk_revalidate_zone_args {
|
|
|
|
struct gendisk *disk;
|
2019-12-03 17:39:05 +08:00
|
|
|
unsigned long *conv_zones_bitmap;
|
2019-11-11 10:39:30 +08:00
|
|
|
unsigned long *seq_zones_wlock;
|
2019-12-03 17:39:06 +08:00
|
|
|
unsigned int nr_zones;
|
2019-12-03 17:39:08 +08:00
|
|
|
sector_t zone_sectors;
|
2019-11-11 10:39:30 +08:00
|
|
|
sector_t sector;
|
|
|
|
};
|
|
|
|
|
block: Enhance blk_revalidate_disk_zones()
For ZBC and ZAC zoned devices, the scsi driver revalidation processing
implemented by sd_revalidate_disk() includes a call to
sd_zbc_read_zones() which executes a full disk zone report used to
check that all zones of the disk are the same size. This processing is
followed by a call to blk_revalidate_disk_zones(), used to initialize
the device request queue zone bitmaps (zone type and zone write lock
bitmaps). To do so, blk_revalidate_disk_zones() also executes a full
device zone report to obtain zone types. As a result, the entire
zoned block device revalidation process includes two full device zone
report.
By moving the zone size checks into blk_revalidate_disk_zones(), this
process can be optimized to a single full device zone report, leading to
shorter device scan and revalidation times. This patch implements this
optimization, reducing the original full device zone report implemented
in sd_zbc_check_zones() to a single, small, report zones command
execution to obtain the size of the first zone of the device. Checks
whether all zones of the device are the same size as the first zone
size are moved to the generic blk_check_zone() function called from
blk_revalidate_disk_zones().
This optimization also has the following benefits:
1) fewer memory allocations in the scsi layer during disk revalidation
as the potentailly large buffer for zone report execution is not
needed.
2) Implement zone checks in a generic manner, reducing the burden on
device driver which only need to obtain the zone size and check that
this size is a power of 2 number of LBAs. Any new type of zoned
block device will benefit from this.
Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2019-11-11 10:39:22 +08:00
|
|
|
/*
|
|
|
|
* Helper function to check the validity of zones of a zoned block device.
|
|
|
|
*/
|
2019-11-11 10:39:30 +08:00
|
|
|
static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
|
|
|
|
void *data)
|
block: Enhance blk_revalidate_disk_zones()
For ZBC and ZAC zoned devices, the scsi driver revalidation processing
implemented by sd_revalidate_disk() includes a call to
sd_zbc_read_zones() which executes a full disk zone report used to
check that all zones of the disk are the same size. This processing is
followed by a call to blk_revalidate_disk_zones(), used to initialize
the device request queue zone bitmaps (zone type and zone write lock
bitmaps). To do so, blk_revalidate_disk_zones() also executes a full
device zone report to obtain zone types. As a result, the entire
zoned block device revalidation process includes two full device zone
report.
By moving the zone size checks into blk_revalidate_disk_zones(), this
process can be optimized to a single full device zone report, leading to
shorter device scan and revalidation times. This patch implements this
optimization, reducing the original full device zone report implemented
in sd_zbc_check_zones() to a single, small, report zones command
execution to obtain the size of the first zone of the device. Checks
whether all zones of the device are the same size as the first zone
size are moved to the generic blk_check_zone() function called from
blk_revalidate_disk_zones().
This optimization also has the following benefits:
1) fewer memory allocations in the scsi layer during disk revalidation
as the potentailly large buffer for zone report execution is not
needed.
2) Implement zone checks in a generic manner, reducing the burden on
device driver which only need to obtain the zone size and check that
this size is a power of 2 number of LBAs. Any new type of zoned
block device will benefit from this.
Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2019-11-11 10:39:22 +08:00
|
|
|
{
|
2019-11-11 10:39:30 +08:00
|
|
|
struct blk_revalidate_zone_args *args = data;
|
|
|
|
struct gendisk *disk = args->disk;
|
block: Enhance blk_revalidate_disk_zones()
For ZBC and ZAC zoned devices, the scsi driver revalidation processing
implemented by sd_revalidate_disk() includes a call to
sd_zbc_read_zones() which executes a full disk zone report used to
check that all zones of the disk are the same size. This processing is
followed by a call to blk_revalidate_disk_zones(), used to initialize
the device request queue zone bitmaps (zone type and zone write lock
bitmaps). To do so, blk_revalidate_disk_zones() also executes a full
device zone report to obtain zone types. As a result, the entire
zoned block device revalidation process includes two full device zone
report.
By moving the zone size checks into blk_revalidate_disk_zones(), this
process can be optimized to a single full device zone report, leading to
shorter device scan and revalidation times. This patch implements this
optimization, reducing the original full device zone report implemented
in sd_zbc_check_zones() to a single, small, report zones command
execution to obtain the size of the first zone of the device. Checks
whether all zones of the device are the same size as the first zone
size are moved to the generic blk_check_zone() function called from
blk_revalidate_disk_zones().
This optimization also has the following benefits:
1) fewer memory allocations in the scsi layer during disk revalidation
as the potentailly large buffer for zone report execution is not
needed.
2) Implement zone checks in a generic manner, reducing the burden on
device driver which only need to obtain the zone size and check that
this size is a power of 2 number of LBAs. Any new type of zoned
block device will benefit from this.
Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2019-11-11 10:39:22 +08:00
|
|
|
struct request_queue *q = disk->queue;
|
|
|
|
sector_t capacity = get_capacity(disk);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* All zones must have the same size, with the exception on an eventual
|
|
|
|
* smaller last zone.
|
|
|
|
*/
|
2019-12-03 17:39:08 +08:00
|
|
|
if (zone->start == 0) {
|
|
|
|
if (zone->len == 0 || !is_power_of_2(zone->len)) {
|
|
|
|
pr_warn("%s: Invalid zoned device with non power of two zone size (%llu)\n",
|
|
|
|
disk->disk_name, zone->len);
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
block: Enhance blk_revalidate_disk_zones()
For ZBC and ZAC zoned devices, the scsi driver revalidation processing
implemented by sd_revalidate_disk() includes a call to
sd_zbc_read_zones() which executes a full disk zone report used to
check that all zones of the disk are the same size. This processing is
followed by a call to blk_revalidate_disk_zones(), used to initialize
the device request queue zone bitmaps (zone type and zone write lock
bitmaps). To do so, blk_revalidate_disk_zones() also executes a full
device zone report to obtain zone types. As a result, the entire
zoned block device revalidation process includes two full device zone
report.
By moving the zone size checks into blk_revalidate_disk_zones(), this
process can be optimized to a single full device zone report, leading to
shorter device scan and revalidation times. This patch implements this
optimization, reducing the original full device zone report implemented
in sd_zbc_check_zones() to a single, small, report zones command
execution to obtain the size of the first zone of the device. Checks
whether all zones of the device are the same size as the first zone
size are moved to the generic blk_check_zone() function called from
blk_revalidate_disk_zones().
This optimization also has the following benefits:
1) fewer memory allocations in the scsi layer during disk revalidation
as the potentailly large buffer for zone report execution is not
needed.
2) Implement zone checks in a generic manner, reducing the burden on
device driver which only need to obtain the zone size and check that
this size is a power of 2 number of LBAs. Any new type of zoned
block device will benefit from this.
Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2019-11-11 10:39:22 +08:00
|
|
|
|
2019-12-03 17:39:08 +08:00
|
|
|
args->zone_sectors = zone->len;
|
|
|
|
args->nr_zones = (capacity + zone->len - 1) >> ilog2(zone->len);
|
|
|
|
} else if (zone->start + args->zone_sectors < capacity) {
|
|
|
|
if (zone->len != args->zone_sectors) {
|
|
|
|
pr_warn("%s: Invalid zoned device with non constant zone size\n",
|
|
|
|
disk->disk_name);
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (zone->len > args->zone_sectors) {
|
|
|
|
pr_warn("%s: Invalid zoned device with larger last zone size\n",
|
|
|
|
disk->disk_name);
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
block: Enhance blk_revalidate_disk_zones()
For ZBC and ZAC zoned devices, the scsi driver revalidation processing
implemented by sd_revalidate_disk() includes a call to
sd_zbc_read_zones() which executes a full disk zone report used to
check that all zones of the disk are the same size. This processing is
followed by a call to blk_revalidate_disk_zones(), used to initialize
the device request queue zone bitmaps (zone type and zone write lock
bitmaps). To do so, blk_revalidate_disk_zones() also executes a full
device zone report to obtain zone types. As a result, the entire
zoned block device revalidation process includes two full device zone
report.
By moving the zone size checks into blk_revalidate_disk_zones(), this
process can be optimized to a single full device zone report, leading to
shorter device scan and revalidation times. This patch implements this
optimization, reducing the original full device zone report implemented
in sd_zbc_check_zones() to a single, small, report zones command
execution to obtain the size of the first zone of the device. Checks
whether all zones of the device are the same size as the first zone
size are moved to the generic blk_check_zone() function called from
blk_revalidate_disk_zones().
This optimization also has the following benefits:
1) fewer memory allocations in the scsi layer during disk revalidation
as the potentailly large buffer for zone report execution is not
needed.
2) Implement zone checks in a generic manner, reducing the burden on
device driver which only need to obtain the zone size and check that
this size is a power of 2 number of LBAs. Any new type of zoned
block device will benefit from this.
Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2019-11-11 10:39:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Check for holes in the zone report */
|
2019-11-11 10:39:30 +08:00
|
|
|
if (zone->start != args->sector) {
|
block: Enhance blk_revalidate_disk_zones()
For ZBC and ZAC zoned devices, the scsi driver revalidation processing
implemented by sd_revalidate_disk() includes a call to
sd_zbc_read_zones() which executes a full disk zone report used to
check that all zones of the disk are the same size. This processing is
followed by a call to blk_revalidate_disk_zones(), used to initialize
the device request queue zone bitmaps (zone type and zone write lock
bitmaps). To do so, blk_revalidate_disk_zones() also executes a full
device zone report to obtain zone types. As a result, the entire
zoned block device revalidation process includes two full device zone
report.
By moving the zone size checks into blk_revalidate_disk_zones(), this
process can be optimized to a single full device zone report, leading to
shorter device scan and revalidation times. This patch implements this
optimization, reducing the original full device zone report implemented
in sd_zbc_check_zones() to a single, small, report zones command
execution to obtain the size of the first zone of the device. Checks
whether all zones of the device are the same size as the first zone
size are moved to the generic blk_check_zone() function called from
blk_revalidate_disk_zones().
This optimization also has the following benefits:
1) fewer memory allocations in the scsi layer during disk revalidation
as the potentailly large buffer for zone report execution is not
needed.
2) Implement zone checks in a generic manner, reducing the burden on
device driver which only need to obtain the zone size and check that
this size is a power of 2 number of LBAs. Any new type of zoned
block device will benefit from this.
Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2019-11-11 10:39:22 +08:00
|
|
|
pr_warn("%s: Zone gap at sectors %llu..%llu\n",
|
2019-11-11 10:39:30 +08:00
|
|
|
disk->disk_name, args->sector, zone->start);
|
|
|
|
return -ENODEV;
|
block: Enhance blk_revalidate_disk_zones()
For ZBC and ZAC zoned devices, the scsi driver revalidation processing
implemented by sd_revalidate_disk() includes a call to
sd_zbc_read_zones() which executes a full disk zone report used to
check that all zones of the disk are the same size. This processing is
followed by a call to blk_revalidate_disk_zones(), used to initialize
the device request queue zone bitmaps (zone type and zone write lock
bitmaps). To do so, blk_revalidate_disk_zones() also executes a full
device zone report to obtain zone types. As a result, the entire
zoned block device revalidation process includes two full device zone
report.
By moving the zone size checks into blk_revalidate_disk_zones(), this
process can be optimized to a single full device zone report, leading to
shorter device scan and revalidation times. This patch implements this
optimization, reducing the original full device zone report implemented
in sd_zbc_check_zones() to a single, small, report zones command
execution to obtain the size of the first zone of the device. Checks
whether all zones of the device are the same size as the first zone
size are moved to the generic blk_check_zone() function called from
blk_revalidate_disk_zones().
This optimization also has the following benefits:
1) fewer memory allocations in the scsi layer during disk revalidation
as the potentailly large buffer for zone report execution is not
needed.
2) Implement zone checks in a generic manner, reducing the burden on
device driver which only need to obtain the zone size and check that
this size is a power of 2 number of LBAs. Any new type of zoned
block device will benefit from this.
Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2019-11-11 10:39:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Check zone type */
|
|
|
|
switch (zone->type) {
|
|
|
|
case BLK_ZONE_TYPE_CONVENTIONAL:
|
2019-12-03 17:39:06 +08:00
|
|
|
if (!args->conv_zones_bitmap) {
|
|
|
|
args->conv_zones_bitmap =
|
|
|
|
blk_alloc_zone_bitmap(q->node, args->nr_zones);
|
|
|
|
if (!args->conv_zones_bitmap)
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
set_bit(idx, args->conv_zones_bitmap);
|
|
|
|
break;
|
block: Enhance blk_revalidate_disk_zones()
For ZBC and ZAC zoned devices, the scsi driver revalidation processing
implemented by sd_revalidate_disk() includes a call to
sd_zbc_read_zones() which executes a full disk zone report used to
check that all zones of the disk are the same size. This processing is
followed by a call to blk_revalidate_disk_zones(), used to initialize
the device request queue zone bitmaps (zone type and zone write lock
bitmaps). To do so, blk_revalidate_disk_zones() also executes a full
device zone report to obtain zone types. As a result, the entire
zoned block device revalidation process includes two full device zone
report.
By moving the zone size checks into blk_revalidate_disk_zones(), this
process can be optimized to a single full device zone report, leading to
shorter device scan and revalidation times. This patch implements this
optimization, reducing the original full device zone report implemented
in sd_zbc_check_zones() to a single, small, report zones command
execution to obtain the size of the first zone of the device. Checks
whether all zones of the device are the same size as the first zone
size are moved to the generic blk_check_zone() function called from
blk_revalidate_disk_zones().
This optimization also has the following benefits:
1) fewer memory allocations in the scsi layer during disk revalidation
as the potentailly large buffer for zone report execution is not
needed.
2) Implement zone checks in a generic manner, reducing the burden on
device driver which only need to obtain the zone size and check that
this size is a power of 2 number of LBAs. Any new type of zoned
block device will benefit from this.
Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2019-11-11 10:39:22 +08:00
|
|
|
case BLK_ZONE_TYPE_SEQWRITE_REQ:
|
|
|
|
case BLK_ZONE_TYPE_SEQWRITE_PREF:
|
2019-12-03 17:39:06 +08:00
|
|
|
if (!args->seq_zones_wlock) {
|
|
|
|
args->seq_zones_wlock =
|
|
|
|
blk_alloc_zone_bitmap(q->node, args->nr_zones);
|
|
|
|
if (!args->seq_zones_wlock)
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
block: Enhance blk_revalidate_disk_zones()
For ZBC and ZAC zoned devices, the scsi driver revalidation processing
implemented by sd_revalidate_disk() includes a call to
sd_zbc_read_zones() which executes a full disk zone report used to
check that all zones of the disk are the same size. This processing is
followed by a call to blk_revalidate_disk_zones(), used to initialize
the device request queue zone bitmaps (zone type and zone write lock
bitmaps). To do so, blk_revalidate_disk_zones() also executes a full
device zone report to obtain zone types. As a result, the entire
zoned block device revalidation process includes two full device zone
report.
By moving the zone size checks into blk_revalidate_disk_zones(), this
process can be optimized to a single full device zone report, leading to
shorter device scan and revalidation times. This patch implements this
optimization, reducing the original full device zone report implemented
in sd_zbc_check_zones() to a single, small, report zones command
execution to obtain the size of the first zone of the device. Checks
whether all zones of the device are the same size as the first zone
size are moved to the generic blk_check_zone() function called from
blk_revalidate_disk_zones().
This optimization also has the following benefits:
1) fewer memory allocations in the scsi layer during disk revalidation
as the potentailly large buffer for zone report execution is not
needed.
2) Implement zone checks in a generic manner, reducing the burden on
device driver which only need to obtain the zone size and check that
this size is a power of 2 number of LBAs. Any new type of zoned
block device will benefit from this.
Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2019-11-11 10:39:22 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
pr_warn("%s: Invalid zone type 0x%x at sectors %llu\n",
|
|
|
|
disk->disk_name, (int)zone->type, zone->start);
|
2019-11-11 10:39:30 +08:00
|
|
|
return -ENODEV;
|
block: Enhance blk_revalidate_disk_zones()
For ZBC and ZAC zoned devices, the scsi driver revalidation processing
implemented by sd_revalidate_disk() includes a call to
sd_zbc_read_zones() which executes a full disk zone report used to
check that all zones of the disk are the same size. This processing is
followed by a call to blk_revalidate_disk_zones(), used to initialize
the device request queue zone bitmaps (zone type and zone write lock
bitmaps). To do so, blk_revalidate_disk_zones() also executes a full
device zone report to obtain zone types. As a result, the entire
zoned block device revalidation process includes two full device zone
report.
By moving the zone size checks into blk_revalidate_disk_zones(), this
process can be optimized to a single full device zone report, leading to
shorter device scan and revalidation times. This patch implements this
optimization, reducing the original full device zone report implemented
in sd_zbc_check_zones() to a single, small, report zones command
execution to obtain the size of the first zone of the device. Checks
whether all zones of the device are the same size as the first zone
size are moved to the generic blk_check_zone() function called from
blk_revalidate_disk_zones().
This optimization also has the following benefits:
1) fewer memory allocations in the scsi layer during disk revalidation
as the potentailly large buffer for zone report execution is not
needed.
2) Implement zone checks in a generic manner, reducing the burden on
device driver which only need to obtain the zone size and check that
this size is a power of 2 number of LBAs. Any new type of zoned
block device will benefit from this.
Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2019-11-11 10:39:22 +08:00
|
|
|
}
|
|
|
|
|
2019-11-11 10:39:30 +08:00
|
|
|
args->sector += zone->len;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-10-12 18:08:50 +08:00
|
|
|
/**
|
|
|
|
* blk_revalidate_disk_zones - (re)allocate and initialize zone bitmaps
|
|
|
|
* @disk: Target disk
|
2020-05-12 16:55:49 +08:00
|
|
|
* @update_driver_data: Callback to update driver data on the frozen disk
|
2018-10-12 18:08:50 +08:00
|
|
|
*
|
|
|
|
* Helper function for low-level device drivers to (re) allocate and initialize
|
|
|
|
* a disk request queue zone bitmaps. This functions should normally be called
|
2019-12-03 17:39:07 +08:00
|
|
|
* within the disk ->revalidate method for blk-mq based drivers. For BIO based
|
|
|
|
* drivers only q->nr_zones needs to be updated so that the sysfs exposed value
|
|
|
|
* is correct.
|
2020-05-12 16:55:49 +08:00
|
|
|
* If the @update_driver_data callback function is not NULL, the callback is
|
|
|
|
* executed with the device request queue frozen after all zones have been
|
|
|
|
* checked.
|
2018-10-12 18:08:50 +08:00
|
|
|
*/
|
2020-05-12 16:55:49 +08:00
|
|
|
int blk_revalidate_disk_zones(struct gendisk *disk,
|
|
|
|
void (*update_driver_data)(struct gendisk *disk))
|
2018-10-12 18:08:50 +08:00
|
|
|
{
|
|
|
|
struct request_queue *q = disk->queue;
|
2019-12-03 17:39:06 +08:00
|
|
|
struct blk_revalidate_zone_args args = {
|
|
|
|
.disk = disk,
|
|
|
|
};
|
2019-12-03 17:39:08 +08:00
|
|
|
unsigned int noio_flag;
|
|
|
|
int ret;
|
2018-10-12 18:08:50 +08:00
|
|
|
|
2019-11-11 10:39:23 +08:00
|
|
|
if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
|
|
|
|
return -EIO;
|
2019-12-03 17:39:07 +08:00
|
|
|
if (WARN_ON_ONCE(!queue_is_mq(q)))
|
|
|
|
return -EIO;
|
2018-10-12 18:08:50 +08:00
|
|
|
|
2020-07-30 19:25:17 +08:00
|
|
|
if (!get_capacity(disk))
|
|
|
|
return -EIO;
|
|
|
|
|
2019-12-03 17:39:06 +08:00
|
|
|
/*
|
2019-12-03 17:39:08 +08:00
|
|
|
* Ensure that all memory allocations in this context are done as if
|
|
|
|
* GFP_NOIO was specified.
|
2019-12-03 17:39:06 +08:00
|
|
|
*/
|
2019-12-03 17:39:08 +08:00
|
|
|
noio_flag = memalloc_noio_save();
|
|
|
|
ret = disk->fops->report_zones(disk, 0, UINT_MAX,
|
|
|
|
blk_revalidate_zone_cb, &args);
|
2020-11-11 15:36:06 +08:00
|
|
|
if (!ret) {
|
|
|
|
pr_warn("%s: No zones reported\n", disk->disk_name);
|
|
|
|
ret = -ENODEV;
|
|
|
|
}
|
2019-12-03 17:39:08 +08:00
|
|
|
memalloc_noio_restore(noio_flag);
|
2018-10-12 18:08:50 +08:00
|
|
|
|
2020-11-11 15:36:06 +08:00
|
|
|
/*
|
|
|
|
* If zones where reported, make sure that the entire disk capacity
|
|
|
|
* has been checked.
|
|
|
|
*/
|
|
|
|
if (ret > 0 && args.sector != get_capacity(disk)) {
|
|
|
|
pr_warn("%s: Missing zones from sector %llu\n",
|
|
|
|
disk->disk_name, args.sector);
|
|
|
|
ret = -ENODEV;
|
|
|
|
}
|
|
|
|
|
2018-10-12 18:08:50 +08:00
|
|
|
/*
|
2019-12-03 17:39:08 +08:00
|
|
|
* Install the new bitmaps and update nr_zones only once the queue is
|
|
|
|
* stopped and all I/Os are completed (i.e. a scheduler is not
|
|
|
|
* referencing the bitmaps).
|
2018-10-12 18:08:50 +08:00
|
|
|
*/
|
|
|
|
blk_mq_freeze_queue(q);
|
2020-11-11 15:36:06 +08:00
|
|
|
if (ret > 0) {
|
2019-12-03 17:39:08 +08:00
|
|
|
blk_queue_chunk_sectors(q, args.zone_sectors);
|
2019-12-03 17:39:06 +08:00
|
|
|
q->nr_zones = args.nr_zones;
|
2019-11-11 10:39:30 +08:00
|
|
|
swap(q->seq_zones_wlock, args.seq_zones_wlock);
|
2019-12-03 17:39:05 +08:00
|
|
|
swap(q->conv_zones_bitmap, args.conv_zones_bitmap);
|
2020-05-12 16:55:49 +08:00
|
|
|
if (update_driver_data)
|
|
|
|
update_driver_data(disk);
|
2019-11-11 10:39:30 +08:00
|
|
|
ret = 0;
|
|
|
|
} else {
|
2018-10-12 18:08:50 +08:00
|
|
|
pr_warn("%s: failed to revalidate zones\n", disk->disk_name);
|
|
|
|
blk_queue_free_zone_bitmaps(q);
|
|
|
|
}
|
2019-11-11 10:39:30 +08:00
|
|
|
blk_mq_unfreeze_queue(q);
|
2018-10-12 18:08:50 +08:00
|
|
|
|
2019-11-11 10:39:30 +08:00
|
|
|
kfree(args.seq_zones_wlock);
|
2019-12-03 17:39:05 +08:00
|
|
|
kfree(args.conv_zones_bitmap);
|
2018-10-12 18:08:50 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones);
|
2021-01-28 12:47:32 +08:00
|
|
|
|
|
|
|
void blk_queue_clear_zone_settings(struct request_queue *q)
|
|
|
|
{
|
|
|
|
blk_mq_freeze_queue(q);
|
|
|
|
|
|
|
|
blk_queue_free_zone_bitmaps(q);
|
|
|
|
blk_queue_flag_clear(QUEUE_FLAG_ZONE_RESETALL, q);
|
|
|
|
q->required_elevator_features &= ~ELEVATOR_F_ZBD_SEQ_WRITE;
|
|
|
|
q->nr_zones = 0;
|
|
|
|
q->max_open_zones = 0;
|
|
|
|
q->max_active_zones = 0;
|
|
|
|
q->limits.chunk_sectors = 0;
|
|
|
|
q->limits.zone_write_granularity = 0;
|
|
|
|
q->limits.max_zone_append_sectors = 0;
|
|
|
|
|
|
|
|
blk_mq_unfreeze_queue(q);
|
|
|
|
}
|