2018-07-07 01:38:38 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef __BLK_NULL_BLK_H
|
|
|
|
#define __BLK_NULL_BLK_H
|
|
|
|
|
2019-09-16 22:07:59 +08:00
|
|
|
#undef pr_fmt
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
|
2018-07-07 01:38:38 +08:00
|
|
|
#include <linux/blkdev.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/blk-mq.h>
|
|
|
|
#include <linux/hrtimer.h>
|
|
|
|
#include <linux/configfs.h>
|
|
|
|
#include <linux/badblocks.h>
|
|
|
|
#include <linux/fault-inject.h>
|
2020-11-20 09:55:14 +08:00
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/mutex.h>
|
2018-07-07 01:38:38 +08:00
|
|
|
|
|
|
|
struct nullb_cmd {
|
|
|
|
struct request *rq;
|
|
|
|
struct bio *bio;
|
|
|
|
unsigned int tag;
|
|
|
|
blk_status_t error;
|
|
|
|
struct nullb_queue *nq;
|
|
|
|
struct hrtimer timer;
|
2021-04-01 06:52:44 +08:00
|
|
|
bool fake_timeout;
|
2018-07-07 01:38:38 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct nullb_queue {
|
|
|
|
unsigned long *tag_map;
|
|
|
|
wait_queue_head_t wait;
|
|
|
|
unsigned int queue_depth;
|
|
|
|
struct nullb_device *dev;
|
|
|
|
unsigned int requeue_selection;
|
|
|
|
|
|
|
|
struct nullb_cmd *cmds;
|
|
|
|
};
|
|
|
|
|
2020-11-20 09:55:14 +08:00
|
|
|
struct nullb_zone {
|
|
|
|
/*
|
|
|
|
* Zone lock to prevent concurrent modification of a zone write
|
|
|
|
* pointer position and condition: with memory backing, a write
|
|
|
|
* command execution may sleep on memory allocation. For this case,
|
|
|
|
* use mutex as the zone lock. Otherwise, use the spinlock for
|
|
|
|
* locking the zone.
|
|
|
|
*/
|
|
|
|
union {
|
|
|
|
spinlock_t spinlock;
|
|
|
|
struct mutex mutex;
|
|
|
|
};
|
|
|
|
enum blk_zone_type type;
|
|
|
|
enum blk_zone_cond cond;
|
|
|
|
sector_t start;
|
|
|
|
sector_t wp;
|
|
|
|
unsigned int len;
|
|
|
|
unsigned int capacity;
|
|
|
|
};
|
|
|
|
|
2018-07-07 01:38:38 +08:00
|
|
|
struct nullb_device {
|
|
|
|
struct nullb *nullb;
|
|
|
|
struct config_item item;
|
|
|
|
struct radix_tree_root data; /* data stored in the disk */
|
|
|
|
struct radix_tree_root cache; /* disk cache data */
|
|
|
|
unsigned long flags; /* device flags */
|
|
|
|
unsigned int curr_cache;
|
|
|
|
struct badblocks badblocks;
|
|
|
|
|
2018-07-07 01:38:39 +08:00
|
|
|
unsigned int nr_zones;
|
2020-08-28 18:54:00 +08:00
|
|
|
unsigned int nr_zones_imp_open;
|
|
|
|
unsigned int nr_zones_exp_open;
|
|
|
|
unsigned int nr_zones_closed;
|
2020-11-20 09:55:15 +08:00
|
|
|
unsigned int imp_close_zone_no;
|
2020-11-20 09:55:14 +08:00
|
|
|
struct nullb_zone *zones;
|
2018-07-07 01:38:39 +08:00
|
|
|
sector_t zone_size_sects;
|
2020-11-20 09:55:14 +08:00
|
|
|
bool need_zone_res_mgmt;
|
|
|
|
spinlock_t zone_res_lock;
|
2018-07-07 01:38:39 +08:00
|
|
|
|
2018-07-07 01:38:38 +08:00
|
|
|
unsigned long size; /* device size in MB */
|
|
|
|
unsigned long completion_nsec; /* time in ns to complete a request */
|
|
|
|
unsigned long cache_size; /* disk cache size in MB */
|
2018-07-07 01:38:39 +08:00
|
|
|
unsigned long zone_size; /* zone size in MB if device is zoned */
|
2020-06-30 03:06:38 +08:00
|
|
|
unsigned long zone_capacity; /* zone capacity in MB if device is zoned */
|
2018-10-30 15:14:05 +08:00
|
|
|
unsigned int zone_nr_conv; /* number of conventional zones */
|
2020-08-28 18:54:00 +08:00
|
|
|
unsigned int zone_max_open; /* max number of open zones */
|
|
|
|
unsigned int zone_max_active; /* max number of active zones */
|
2018-07-07 01:38:38 +08:00
|
|
|
unsigned int submit_queues; /* number of submission queues */
|
|
|
|
unsigned int home_node; /* home node for the device */
|
|
|
|
unsigned int queue_mode; /* block interface */
|
|
|
|
unsigned int blocksize; /* block size */
|
2020-11-20 09:55:18 +08:00
|
|
|
unsigned int max_sectors; /* Max sectors per command */
|
2018-07-07 01:38:38 +08:00
|
|
|
unsigned int irqmode; /* IRQ completion handler */
|
|
|
|
unsigned int hw_queue_depth; /* queue depth */
|
|
|
|
unsigned int index; /* index of the disk, only valid with a disk */
|
|
|
|
unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */
|
|
|
|
bool blocking; /* blocking blk-mq device */
|
|
|
|
bool use_per_node_hctx; /* use per-node allocation for hardware context */
|
|
|
|
bool power; /* power on/off the device */
|
|
|
|
bool memory_backed; /* if data is stored in memory */
|
|
|
|
bool discard; /* if support discard */
|
2018-07-07 01:38:39 +08:00
|
|
|
bool zoned; /* if device is zoned */
|
2021-04-12 17:55:23 +08:00
|
|
|
bool virt_boundary; /* virtual boundary on/off for the device */
|
2018-07-07 01:38:38 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct nullb {
|
|
|
|
struct nullb_device *dev;
|
|
|
|
struct list_head list;
|
|
|
|
unsigned int index;
|
|
|
|
struct request_queue *q;
|
|
|
|
struct gendisk *disk;
|
|
|
|
struct blk_mq_tag_set *tag_set;
|
|
|
|
struct blk_mq_tag_set __tag_set;
|
|
|
|
unsigned int queue_depth;
|
|
|
|
atomic_long_t cur_bytes;
|
|
|
|
struct hrtimer bw_timer;
|
|
|
|
unsigned long cache_flush_pos;
|
|
|
|
spinlock_t lock;
|
|
|
|
|
|
|
|
struct nullb_queue *queues;
|
|
|
|
unsigned int nr_queues;
|
|
|
|
char disk_name[DISK_NAME_LEN];
|
|
|
|
};
|
2018-07-07 01:38:39 +08:00
|
|
|
|
2020-11-20 09:55:17 +08:00
|
|
|
blk_status_t null_handle_discard(struct nullb_device *dev, sector_t sector,
|
|
|
|
sector_t nr_sectors);
|
2020-04-23 11:02:37 +08:00
|
|
|
blk_status_t null_process_cmd(struct nullb_cmd *cmd,
|
|
|
|
enum req_opf op, sector_t sector,
|
|
|
|
unsigned int nr_sectors);
|
|
|
|
|
2018-07-07 01:38:39 +08:00
|
|
|
#ifdef CONFIG_BLK_DEV_ZONED
|
2020-04-23 11:02:38 +08:00
|
|
|
int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q);
|
|
|
|
int null_register_zoned_dev(struct nullb *nullb);
|
|
|
|
void null_free_zoned_dev(struct nullb_device *dev);
|
2019-11-11 10:39:27 +08:00
|
|
|
int null_report_zones(struct gendisk *disk, sector_t sector,
|
2019-11-11 10:39:30 +08:00
|
|
|
unsigned int nr_zones, report_zones_cb cb, void *data);
|
2020-04-23 11:02:37 +08:00
|
|
|
blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd,
|
|
|
|
enum req_opf op, sector_t sector,
|
|
|
|
sector_t nr_sectors);
|
2019-10-18 05:19:43 +08:00
|
|
|
size_t null_zone_valid_read_len(struct nullb *nullb,
|
|
|
|
sector_t sector, unsigned int len);
|
2018-07-07 01:38:39 +08:00
|
|
|
#else
|
2020-04-23 11:02:38 +08:00
|
|
|
static inline int null_init_zoned_dev(struct nullb_device *dev,
|
|
|
|
struct request_queue *q)
|
2018-07-07 01:38:39 +08:00
|
|
|
{
|
2019-09-16 22:07:59 +08:00
|
|
|
pr_err("CONFIG_BLK_DEV_ZONED not enabled\n");
|
2018-07-07 01:38:39 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2020-04-23 11:02:38 +08:00
|
|
|
static inline int null_register_zoned_dev(struct nullb *nullb)
|
|
|
|
{
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
static inline void null_free_zoned_dev(struct nullb_device *dev) {}
|
2020-04-23 11:02:37 +08:00
|
|
|
static inline blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd,
|
|
|
|
enum req_opf op, sector_t sector, sector_t nr_sectors)
|
2018-09-13 08:21:11 +08:00
|
|
|
{
|
2019-08-23 12:45:18 +08:00
|
|
|
return BLK_STS_NOTSUPP;
|
2018-09-13 08:21:11 +08:00
|
|
|
}
|
2019-10-18 05:19:43 +08:00
|
|
|
static inline size_t null_zone_valid_read_len(struct nullb *nullb,
|
|
|
|
sector_t sector,
|
|
|
|
unsigned int len)
|
|
|
|
{
|
|
|
|
return len;
|
|
|
|
}
|
2019-11-11 10:39:27 +08:00
|
|
|
#define null_report_zones NULL
|
2018-07-07 01:38:39 +08:00
|
|
|
#endif /* CONFIG_BLK_DEV_ZONED */
|
2018-07-07 01:38:38 +08:00
|
|
|
#endif /* __NULL_BLK_H */
|