for-linus-20180504
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABCAAGBQJa7HerAAoJEPfTWPspceCmxdsP/3ncJdw/PJRGaQNt99ogEIbl y/YscWxPWsxbigM0Yc0zh134vO5ZeE7v12InpoE3i5OO4UW+oC+WYP/KDAo3TJIy j/9r25p1kfb3j/8fNlb8uMf/6/nKk29cu+gqIZleHMOj6hfap5AFdTwW0/B/gC/p BJ+C3e3s41intl+NikZmD4M959gpPTgm5ma8wyCz1XKtGQMH5AxFFrIc22vug/Fb 3Nk++xuFvgF04tCXwimhgny2eOtHt5L6KNuYYHFWBnd1gXALttsisLgAW2vXbfFB c9PDEya3c+btr8+ied27Tp0hHlcQa2/ZY+yFJ3RJ35AXMvTVNDx6bKF3PzfJWzt+ ynjrywsXC/k7G1JBZntdXF7+y8b52keaIBS8DBBxzhhmzrv0NOTGTaQRhuK5eeem tHrvEZlP5iqPRGGQz7F1RYztdWulo/iMLJwibuy2rcNYeHL5T0Olhv9hdH26OVqV CNEuEvy+xO4uzkXAGm3j/EoHryHvGgp2xD/8OuQfTnjB6IdcuLznJuyBiUyOj/te PgSAI/SdUKPnWyVVONKjXyOyvAglcenNtWMmAZQbsOSNZAW2blrXSFvzHa8wDVe+ Zpw5+fWJOioemMo+gf884jMRbNDfwyq5hcgjpbkYRz+qg60abqefNt7e87mTqTcJ WqP9luNiP9RmXsXo4k+w =P6V8 -----END PGP SIGNATURE----- Merge tag 'for-linus-20180504' of git://git.kernel.dk/linux-block Pull block fixes from Jens Axboe: "A collection of fixes that should to into this release. This contains: - Set of bcache fixes from Coly, fixing regression in patches that went into this series. - Set of NVMe fixes by way of Keith. - Set of bdi related fixes, one from Jan and two from Tetsuo Handa, fixing various issues around device addition/removal. - Two block inflight fixes from Omar, fixing issues around the transition to using tags for blk-mq inflight accounting that we did a few releases ago" * tag 'for-linus-20180504' of git://git.kernel.dk/linux-block: bdi: Fix oops in wb_workfn() nvmet: switch loopback target state to connecting when resetting nvme/multipath: Fix multipath disabled naming collisions nvme/multipath: Disable runtime writable enabling parameter nvme: Set integrity flag for user passthrough commands nvme: fix potential memory leak in option parsing bdi: Fix use after free bug in debugfs_remove() bdi: wake up concurrent wb_shutdown() callers. bcache: use pr_info() to inform duplicated CACHE_SET_IO_DISABLE set bcache: set dc->io_disable to true in conditional_stop_bcache_device() bcache: add wait_for_kthread_stop() in bch_allocator_thread() bcache: count backing device I/O error for writeback I/O bcache: set CACHE_SET_IO_DISABLE in bch_cached_dev_error() bcache: store disk name in struct cache and struct cached_dev blk-mq: fix sysfs inflight counter blk-mq: count allocated but not started requests in iostats inflight
This commit is contained in:
commit
2f50037a1c
|
@ -95,18 +95,15 @@ static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
|
|||
{
|
||||
struct mq_inflight *mi = priv;
|
||||
|
||||
if (blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) {
|
||||
/*
|
||||
* index[0] counts the specific partition that was asked
|
||||
* for. index[1] counts the ones that are active on the
|
||||
* whole device, so increment that if mi->part is indeed
|
||||
* a partition, and not a whole device.
|
||||
*/
|
||||
if (rq->part == mi->part)
|
||||
mi->inflight[0]++;
|
||||
if (mi->part->partno)
|
||||
mi->inflight[1]++;
|
||||
}
|
||||
/*
|
||||
* index[0] counts the specific partition that was asked for. index[1]
|
||||
* counts the ones that are active on the whole device, so increment
|
||||
* that if mi->part is indeed a partition, and not a whole device.
|
||||
*/
|
||||
if (rq->part == mi->part)
|
||||
mi->inflight[0]++;
|
||||
if (mi->part->partno)
|
||||
mi->inflight[1]++;
|
||||
}
|
||||
|
||||
void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
|
||||
|
@ -118,6 +115,25 @@ void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
|
|||
blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
|
||||
}
|
||||
|
||||
static void blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx,
|
||||
struct request *rq, void *priv,
|
||||
bool reserved)
|
||||
{
|
||||
struct mq_inflight *mi = priv;
|
||||
|
||||
if (rq->part == mi->part)
|
||||
mi->inflight[rq_data_dir(rq)]++;
|
||||
}
|
||||
|
||||
void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
|
||||
unsigned int inflight[2])
|
||||
{
|
||||
struct mq_inflight mi = { .part = part, .inflight = inflight, };
|
||||
|
||||
inflight[0] = inflight[1] = 0;
|
||||
blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight_rw, &mi);
|
||||
}
|
||||
|
||||
void blk_freeze_queue_start(struct request_queue *q)
|
||||
{
|
||||
int freeze_depth;
|
||||
|
|
|
@ -188,7 +188,9 @@ static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
|
|||
}
|
||||
|
||||
void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
|
||||
unsigned int inflight[2]);
|
||||
unsigned int inflight[2]);
|
||||
void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
|
||||
unsigned int inflight[2]);
|
||||
|
||||
static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
|
|
|
@ -82,6 +82,18 @@ void part_in_flight(struct request_queue *q, struct hd_struct *part,
|
|||
}
|
||||
}
|
||||
|
||||
void part_in_flight_rw(struct request_queue *q, struct hd_struct *part,
|
||||
unsigned int inflight[2])
|
||||
{
|
||||
if (q->mq_ops) {
|
||||
blk_mq_in_flight_rw(q, part, inflight);
|
||||
return;
|
||||
}
|
||||
|
||||
inflight[0] = atomic_read(&part->in_flight[0]);
|
||||
inflight[1] = atomic_read(&part->in_flight[1]);
|
||||
}
|
||||
|
||||
struct hd_struct *__disk_get_part(struct gendisk *disk, int partno)
|
||||
{
|
||||
struct disk_part_tbl *ptbl = rcu_dereference(disk->part_tbl);
|
||||
|
|
|
@ -145,13 +145,15 @@ ssize_t part_stat_show(struct device *dev,
|
|||
jiffies_to_msecs(part_stat_read(p, time_in_queue)));
|
||||
}
|
||||
|
||||
ssize_t part_inflight_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct hd_struct *p = dev_to_part(dev);
|
||||
struct request_queue *q = part_to_disk(p)->queue;
|
||||
unsigned int inflight[2];
|
||||
|
||||
return sprintf(buf, "%8u %8u\n", atomic_read(&p->in_flight[0]),
|
||||
atomic_read(&p->in_flight[1]));
|
||||
part_in_flight_rw(q, p, inflight);
|
||||
return sprintf(buf, "%8u %8u\n", inflight[0], inflight[1]);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FAIL_MAKE_REQUEST
|
||||
|
|
|
@ -290,7 +290,7 @@ do { \
|
|||
if (kthread_should_stop() || \
|
||||
test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)) { \
|
||||
set_current_state(TASK_RUNNING); \
|
||||
return 0; \
|
||||
goto out; \
|
||||
} \
|
||||
\
|
||||
schedule(); \
|
||||
|
@ -378,6 +378,9 @@ retry_invalidate:
|
|||
bch_prio_write(ca);
|
||||
}
|
||||
}
|
||||
out:
|
||||
wait_for_kthread_stop();
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Allocation */
|
||||
|
|
|
@ -392,6 +392,8 @@ struct cached_dev {
|
|||
#define DEFAULT_CACHED_DEV_ERROR_LIMIT 64
|
||||
atomic_t io_errors;
|
||||
unsigned error_limit;
|
||||
|
||||
char backing_dev_name[BDEVNAME_SIZE];
|
||||
};
|
||||
|
||||
enum alloc_reserve {
|
||||
|
@ -464,6 +466,8 @@ struct cache {
|
|||
atomic_long_t meta_sectors_written;
|
||||
atomic_long_t btree_sectors_written;
|
||||
atomic_long_t sectors_written;
|
||||
|
||||
char cache_dev_name[BDEVNAME_SIZE];
|
||||
};
|
||||
|
||||
struct gc_stat {
|
||||
|
|
|
@ -106,7 +106,6 @@ void bch_btree_verify(struct btree *b)
|
|||
|
||||
void bch_data_verify(struct cached_dev *dc, struct bio *bio)
|
||||
{
|
||||
char name[BDEVNAME_SIZE];
|
||||
struct bio *check;
|
||||
struct bio_vec bv, cbv;
|
||||
struct bvec_iter iter, citer = { 0 };
|
||||
|
@ -134,7 +133,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
|
|||
bv.bv_len),
|
||||
dc->disk.c,
|
||||
"verify failed at dev %s sector %llu",
|
||||
bdevname(dc->bdev, name),
|
||||
dc->backing_dev_name,
|
||||
(uint64_t) bio->bi_iter.bi_sector);
|
||||
|
||||
kunmap_atomic(p1);
|
||||
|
|
|
@ -52,7 +52,6 @@ void bch_submit_bbio(struct bio *bio, struct cache_set *c,
|
|||
/* IO errors */
|
||||
void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
|
||||
{
|
||||
char buf[BDEVNAME_SIZE];
|
||||
unsigned errors;
|
||||
|
||||
WARN_ONCE(!dc, "NULL pointer of struct cached_dev");
|
||||
|
@ -60,7 +59,7 @@ void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
|
|||
errors = atomic_add_return(1, &dc->io_errors);
|
||||
if (errors < dc->error_limit)
|
||||
pr_err("%s: IO error on backing device, unrecoverable",
|
||||
bio_devname(bio, buf));
|
||||
dc->backing_dev_name);
|
||||
else
|
||||
bch_cached_dev_error(dc);
|
||||
}
|
||||
|
@ -105,19 +104,18 @@ void bch_count_io_errors(struct cache *ca,
|
|||
}
|
||||
|
||||
if (error) {
|
||||
char buf[BDEVNAME_SIZE];
|
||||
unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT,
|
||||
&ca->io_errors);
|
||||
errors >>= IO_ERROR_SHIFT;
|
||||
|
||||
if (errors < ca->set->error_limit)
|
||||
pr_err("%s: IO error on %s%s",
|
||||
bdevname(ca->bdev, buf), m,
|
||||
ca->cache_dev_name, m,
|
||||
is_read ? ", recovering." : ".");
|
||||
else
|
||||
bch_cache_set_error(ca->set,
|
||||
"%s: too many IO errors %s",
|
||||
bdevname(ca->bdev, buf), m);
|
||||
ca->cache_dev_name, m);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -649,11 +649,8 @@ static void backing_request_endio(struct bio *bio)
|
|||
*/
|
||||
if (unlikely(s->iop.writeback &&
|
||||
bio->bi_opf & REQ_PREFLUSH)) {
|
||||
char buf[BDEVNAME_SIZE];
|
||||
|
||||
bio_devname(bio, buf);
|
||||
pr_err("Can't flush %s: returned bi_status %i",
|
||||
buf, bio->bi_status);
|
||||
dc->backing_dev_name, bio->bi_status);
|
||||
} else {
|
||||
/* set to orig_bio->bi_status in bio_complete() */
|
||||
s->iop.status = bio->bi_status;
|
||||
|
|
|
@ -936,7 +936,6 @@ static void cancel_writeback_rate_update_dwork(struct cached_dev *dc)
|
|||
static void cached_dev_detach_finish(struct work_struct *w)
|
||||
{
|
||||
struct cached_dev *dc = container_of(w, struct cached_dev, detach);
|
||||
char buf[BDEVNAME_SIZE];
|
||||
struct closure cl;
|
||||
closure_init_stack(&cl);
|
||||
|
||||
|
@ -967,7 +966,7 @@ static void cached_dev_detach_finish(struct work_struct *w)
|
|||
|
||||
mutex_unlock(&bch_register_lock);
|
||||
|
||||
pr_info("Caching disabled for %s", bdevname(dc->bdev, buf));
|
||||
pr_info("Caching disabled for %s", dc->backing_dev_name);
|
||||
|
||||
/* Drop ref we took in cached_dev_detach() */
|
||||
closure_put(&dc->disk.cl);
|
||||
|
@ -999,29 +998,28 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
|
|||
{
|
||||
uint32_t rtime = cpu_to_le32(get_seconds());
|
||||
struct uuid_entry *u;
|
||||
char buf[BDEVNAME_SIZE];
|
||||
struct cached_dev *exist_dc, *t;
|
||||
|
||||
bdevname(dc->bdev, buf);
|
||||
|
||||
if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) ||
|
||||
(!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)))
|
||||
return -ENOENT;
|
||||
|
||||
if (dc->disk.c) {
|
||||
pr_err("Can't attach %s: already attached", buf);
|
||||
pr_err("Can't attach %s: already attached",
|
||||
dc->backing_dev_name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (test_bit(CACHE_SET_STOPPING, &c->flags)) {
|
||||
pr_err("Can't attach %s: shutting down", buf);
|
||||
pr_err("Can't attach %s: shutting down",
|
||||
dc->backing_dev_name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (dc->sb.block_size < c->sb.block_size) {
|
||||
/* Will die */
|
||||
pr_err("Couldn't attach %s: block size less than set's block size",
|
||||
buf);
|
||||
dc->backing_dev_name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -1029,7 +1027,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
|
|||
list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
|
||||
if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
|
||||
pr_err("Tried to attach %s but duplicate UUID already attached",
|
||||
buf);
|
||||
dc->backing_dev_name);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1047,13 +1045,15 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
|
|||
|
||||
if (!u) {
|
||||
if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
|
||||
pr_err("Couldn't find uuid for %s in set", buf);
|
||||
pr_err("Couldn't find uuid for %s in set",
|
||||
dc->backing_dev_name);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
u = uuid_find_empty(c);
|
||||
if (!u) {
|
||||
pr_err("Not caching %s, no room for UUID", buf);
|
||||
pr_err("Not caching %s, no room for UUID",
|
||||
dc->backing_dev_name);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
@ -1112,7 +1112,8 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
|
|||
up_write(&dc->writeback_lock);
|
||||
|
||||
pr_info("Caching %s as %s on set %pU",
|
||||
bdevname(dc->bdev, buf), dc->disk.disk->disk_name,
|
||||
dc->backing_dev_name,
|
||||
dc->disk.disk->disk_name,
|
||||
dc->disk.c->sb.set_uuid);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1225,10 +1226,10 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
|
|||
struct block_device *bdev,
|
||||
struct cached_dev *dc)
|
||||
{
|
||||
char name[BDEVNAME_SIZE];
|
||||
const char *err = "cannot allocate memory";
|
||||
struct cache_set *c;
|
||||
|
||||
bdevname(bdev, dc->backing_dev_name);
|
||||
memcpy(&dc->sb, sb, sizeof(struct cache_sb));
|
||||
dc->bdev = bdev;
|
||||
dc->bdev->bd_holder = dc;
|
||||
|
@ -1237,6 +1238,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
|
|||
bio_first_bvec_all(&dc->sb_bio)->bv_page = sb_page;
|
||||
get_page(sb_page);
|
||||
|
||||
|
||||
if (cached_dev_init(dc, sb->block_size << 9))
|
||||
goto err;
|
||||
|
||||
|
@ -1247,7 +1249,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
|
|||
if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
|
||||
goto err;
|
||||
|
||||
pr_info("registered backing device %s", bdevname(bdev, name));
|
||||
pr_info("registered backing device %s", dc->backing_dev_name);
|
||||
|
||||
list_add(&dc->list, &uncached_devices);
|
||||
list_for_each_entry(c, &bch_cache_sets, list)
|
||||
|
@ -1259,7 +1261,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
|
|||
|
||||
return;
|
||||
err:
|
||||
pr_notice("error %s: %s", bdevname(bdev, name), err);
|
||||
pr_notice("error %s: %s", dc->backing_dev_name, err);
|
||||
bcache_device_stop(&dc->disk);
|
||||
}
|
||||
|
||||
|
@ -1367,7 +1369,7 @@ int bch_flash_dev_create(struct cache_set *c, uint64_t size)
|
|||
|
||||
bool bch_cached_dev_error(struct cached_dev *dc)
|
||||
{
|
||||
char name[BDEVNAME_SIZE];
|
||||
struct cache_set *c;
|
||||
|
||||
if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
|
||||
return false;
|
||||
|
@ -1377,7 +1379,22 @@ bool bch_cached_dev_error(struct cached_dev *dc)
|
|||
smp_mb();
|
||||
|
||||
pr_err("stop %s: too many IO errors on backing device %s\n",
|
||||
dc->disk.disk->disk_name, bdevname(dc->bdev, name));
|
||||
dc->disk.disk->disk_name, dc->backing_dev_name);
|
||||
|
||||
/*
|
||||
* If the cached device is still attached to a cache set,
|
||||
* even dc->io_disable is true and no more I/O requests
|
||||
* accepted, cache device internal I/O (writeback scan or
|
||||
* garbage collection) may still prevent bcache device from
|
||||
* being stopped. So here CACHE_SET_IO_DISABLE should be
|
||||
* set to c->flags too, to make the internal I/O to cache
|
||||
* device rejected and stopped immediately.
|
||||
* If c is NULL, that means the bcache device is not attached
|
||||
* to any cache set, then no CACHE_SET_IO_DISABLE bit to set.
|
||||
*/
|
||||
c = dc->disk.c;
|
||||
if (c && test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
|
||||
pr_info("CACHE_SET_IO_DISABLE already set");
|
||||
|
||||
bcache_device_stop(&dc->disk);
|
||||
return true;
|
||||
|
@ -1395,7 +1412,7 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
|
|||
return false;
|
||||
|
||||
if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
|
||||
pr_warn("CACHE_SET_IO_DISABLE already set");
|
||||
pr_info("CACHE_SET_IO_DISABLE already set");
|
||||
|
||||
/* XXX: we can be called from atomic context
|
||||
acquire_console_sem();
|
||||
|
@ -1539,6 +1556,20 @@ static void conditional_stop_bcache_device(struct cache_set *c,
|
|||
*/
|
||||
pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.",
|
||||
d->disk->disk_name);
|
||||
/*
|
||||
* There might be a small time gap that cache set is
|
||||
* released but bcache device is not. Inside this time
|
||||
* gap, regular I/O requests will directly go into
|
||||
* backing device as no cache set attached to. This
|
||||
* behavior may also introduce potential inconsistence
|
||||
* data in writeback mode while cache is dirty.
|
||||
* Therefore before calling bcache_device_stop() due
|
||||
* to a broken cache device, dc->io_disable should be
|
||||
* explicitly set to true.
|
||||
*/
|
||||
dc->io_disable = true;
|
||||
/* make others know io_disable is true earlier */
|
||||
smp_mb();
|
||||
bcache_device_stop(d);
|
||||
} else {
|
||||
/*
|
||||
|
@ -2003,12 +2034,10 @@ static int cache_alloc(struct cache *ca)
|
|||
static int register_cache(struct cache_sb *sb, struct page *sb_page,
|
||||
struct block_device *bdev, struct cache *ca)
|
||||
{
|
||||
char name[BDEVNAME_SIZE];
|
||||
const char *err = NULL; /* must be set for any error case */
|
||||
int ret = 0;
|
||||
|
||||
bdevname(bdev, name);
|
||||
|
||||
bdevname(bdev, ca->cache_dev_name);
|
||||
memcpy(&ca->sb, sb, sizeof(struct cache_sb));
|
||||
ca->bdev = bdev;
|
||||
ca->bdev->bd_holder = ca;
|
||||
|
@ -2045,14 +2074,14 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
|
|||
goto out;
|
||||
}
|
||||
|
||||
pr_info("registered cache device %s", name);
|
||||
pr_info("registered cache device %s", ca->cache_dev_name);
|
||||
|
||||
out:
|
||||
kobject_put(&ca->kobj);
|
||||
|
||||
err:
|
||||
if (err)
|
||||
pr_notice("error %s: %s", name, err);
|
||||
pr_notice("error %s: %s", ca->cache_dev_name, err);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -244,8 +244,10 @@ static void dirty_endio(struct bio *bio)
|
|||
struct keybuf_key *w = bio->bi_private;
|
||||
struct dirty_io *io = w->private;
|
||||
|
||||
if (bio->bi_status)
|
||||
if (bio->bi_status) {
|
||||
SET_KEY_DIRTY(&w->key, false);
|
||||
bch_count_backing_io_errors(io->dc, bio);
|
||||
}
|
||||
|
||||
closure_put(&io->cl);
|
||||
}
|
||||
|
|
|
@ -764,6 +764,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
|
|||
ret = PTR_ERR(meta);
|
||||
goto out_unmap;
|
||||
}
|
||||
req->cmd_flags |= REQ_INTEGRITY;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2997,31 +2998,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
|
|||
if (nvme_init_ns_head(ns, nsid, id))
|
||||
goto out_free_id;
|
||||
nvme_setup_streams_ns(ctrl, ns);
|
||||
|
||||
#ifdef CONFIG_NVME_MULTIPATH
|
||||
/*
|
||||
* If multipathing is enabled we need to always use the subsystem
|
||||
* instance number for numbering our devices to avoid conflicts
|
||||
* between subsystems that have multiple controllers and thus use
|
||||
* the multipath-aware subsystem node and those that have a single
|
||||
* controller and use the controller node directly.
|
||||
*/
|
||||
if (ns->head->disk) {
|
||||
sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
|
||||
ctrl->cntlid, ns->head->instance);
|
||||
flags = GENHD_FL_HIDDEN;
|
||||
} else {
|
||||
sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance,
|
||||
ns->head->instance);
|
||||
}
|
||||
#else
|
||||
/*
|
||||
* But without the multipath code enabled, multiple controller per
|
||||
* subsystems are visible as devices and thus we cannot use the
|
||||
* subsystem instance.
|
||||
*/
|
||||
sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
|
||||
#endif
|
||||
nvme_set_disk_name(disk_name, ns, ctrl, &flags);
|
||||
|
||||
if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
|
||||
if (nvme_nvm_register(ns, disk_name, node)) {
|
||||
|
|
|
@ -668,6 +668,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
|
|||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
kfree(opts->transport);
|
||||
opts->transport = p;
|
||||
break;
|
||||
case NVMF_OPT_NQN:
|
||||
|
@ -676,6 +677,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
|
|||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
kfree(opts->subsysnqn);
|
||||
opts->subsysnqn = p;
|
||||
nqnlen = strlen(opts->subsysnqn);
|
||||
if (nqnlen >= NVMF_NQN_SIZE) {
|
||||
|
@ -698,6 +700,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
|
|||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
kfree(opts->traddr);
|
||||
opts->traddr = p;
|
||||
break;
|
||||
case NVMF_OPT_TRSVCID:
|
||||
|
@ -706,6 +709,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
|
|||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
kfree(opts->trsvcid);
|
||||
opts->trsvcid = p;
|
||||
break;
|
||||
case NVMF_OPT_QUEUE_SIZE:
|
||||
|
@ -792,6 +796,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
|
|||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
nvmf_host_put(opts->host);
|
||||
opts->host = nvmf_host_add(p);
|
||||
kfree(p);
|
||||
if (!opts->host) {
|
||||
|
@ -817,6 +822,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
|
|||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
kfree(opts->host_traddr);
|
||||
opts->host_traddr = p;
|
||||
break;
|
||||
case NVMF_OPT_HOST_ID:
|
||||
|
|
|
@ -15,10 +15,32 @@
|
|||
#include "nvme.h"
|
||||
|
||||
static bool multipath = true;
|
||||
module_param(multipath, bool, 0644);
|
||||
module_param(multipath, bool, 0444);
|
||||
MODULE_PARM_DESC(multipath,
|
||||
"turn on native support for multiple controllers per subsystem");
|
||||
|
||||
/*
|
||||
* If multipathing is enabled we need to always use the subsystem instance
|
||||
* number for numbering our devices to avoid conflicts between subsystems that
|
||||
* have multiple controllers and thus use the multipath-aware subsystem node
|
||||
* and those that have a single controller and use the controller node
|
||||
* directly.
|
||||
*/
|
||||
void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
|
||||
struct nvme_ctrl *ctrl, int *flags)
|
||||
{
|
||||
if (!multipath) {
|
||||
sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
|
||||
} else if (ns->head->disk) {
|
||||
sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
|
||||
ctrl->cntlid, ns->head->instance);
|
||||
*flags = GENHD_FL_HIDDEN;
|
||||
} else {
|
||||
sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance,
|
||||
ns->head->instance);
|
||||
}
|
||||
}
|
||||
|
||||
void nvme_failover_req(struct request *req)
|
||||
{
|
||||
struct nvme_ns *ns = req->q->queuedata;
|
||||
|
|
|
@ -436,6 +436,8 @@ extern const struct attribute_group nvme_ns_id_attr_group;
|
|||
extern const struct block_device_operations nvme_ns_head_ops;
|
||||
|
||||
#ifdef CONFIG_NVME_MULTIPATH
|
||||
void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
|
||||
struct nvme_ctrl *ctrl, int *flags);
|
||||
void nvme_failover_req(struct request *req);
|
||||
bool nvme_req_needs_failover(struct request *req, blk_status_t error);
|
||||
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
|
||||
|
@ -461,6 +463,16 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
|
|||
}
|
||||
|
||||
#else
|
||||
/*
|
||||
* Without the multipath code enabled, multiple controller per subsystems are
|
||||
* visible as devices and thus we cannot use the subsystem instance.
|
||||
*/
|
||||
static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
|
||||
struct nvme_ctrl *ctrl, int *flags)
|
||||
{
|
||||
sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
|
||||
}
|
||||
|
||||
static inline void nvme_failover_req(struct request *req)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -469,6 +469,12 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
|
|||
nvme_stop_ctrl(&ctrl->ctrl);
|
||||
nvme_loop_shutdown_ctrl(ctrl);
|
||||
|
||||
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
|
||||
/* state change failure should never happen */
|
||||
WARN_ON_ONCE(1);
|
||||
return;
|
||||
}
|
||||
|
||||
ret = nvme_loop_configure_admin_queue(ctrl);
|
||||
if (ret)
|
||||
goto out_disable;
|
||||
|
|
|
@ -1961,7 +1961,7 @@ void wb_workfn(struct work_struct *work)
|
|||
}
|
||||
|
||||
if (!list_empty(&wb->work_list))
|
||||
mod_delayed_work(bdi_wq, &wb->dwork, 0);
|
||||
wb_wakeup(wb);
|
||||
else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
|
||||
wb_wakeup_delayed(wb);
|
||||
|
||||
|
|
|
@ -368,7 +368,9 @@ static inline void free_part_stats(struct hd_struct *part)
|
|||
part_stat_add(cpu, gendiskp, field, -subnd)
|
||||
|
||||
void part_in_flight(struct request_queue *q, struct hd_struct *part,
|
||||
unsigned int inflight[2]);
|
||||
unsigned int inflight[2]);
|
||||
void part_in_flight_rw(struct request_queue *q, struct hd_struct *part,
|
||||
unsigned int inflight[2]);
|
||||
void part_dec_in_flight(struct request_queue *q, struct hd_struct *part,
|
||||
int rw);
|
||||
void part_inc_in_flight(struct request_queue *q, struct hd_struct *part,
|
||||
|
|
|
@ -305,4 +305,21 @@ do { \
|
|||
__ret; \
|
||||
})
|
||||
|
||||
/**
|
||||
* clear_and_wake_up_bit - clear a bit and wake up anyone waiting on that bit
|
||||
*
|
||||
* @bit: the bit of the word being waited on
|
||||
* @word: the word being waited on, a kernel virtual address
|
||||
*
|
||||
* You can use this helper if bitflags are manipulated atomically rather than
|
||||
* non-atomically under a lock.
|
||||
*/
|
||||
static inline void clear_and_wake_up_bit(int bit, void *word)
|
||||
{
|
||||
clear_bit_unlock(bit, word);
|
||||
/* See wake_up_bit() for which memory barrier you need to use. */
|
||||
smp_mb__after_atomic();
|
||||
wake_up_bit(word, bit);
|
||||
}
|
||||
|
||||
#endif /* _LINUX_WAIT_BIT_H */
|
||||
|
|
|
@ -115,6 +115,7 @@ static int bdi_debug_register(struct backing_dev_info *bdi, const char *name)
|
|||
bdi, &bdi_debug_stats_fops);
|
||||
if (!bdi->debug_stats) {
|
||||
debugfs_remove(bdi->debug_dir);
|
||||
bdi->debug_dir = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -383,7 +384,7 @@ static void wb_shutdown(struct bdi_writeback *wb)
|
|||
* the barrier provided by test_and_clear_bit() above.
|
||||
*/
|
||||
smp_wmb();
|
||||
clear_bit(WB_shutting_down, &wb->state);
|
||||
clear_and_wake_up_bit(WB_shutting_down, &wb->state);
|
||||
}
|
||||
|
||||
static void wb_exit(struct bdi_writeback *wb)
|
||||
|
|
Loading…
Reference in New Issue