block-5.11-2021-01-10
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAl/7KA0QHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpn6WEACeUa97qyzm7G8/E5ejBL6lXSTRXNc8qa+h YCdrDltkqs6OHAuEyUCwGw3zPmb7fp4M5RLZ/Dp9EtMwld45HfoN6mpRe0+i4U96 iAkHMNUo6ytp3wXX1XKgZ0FhcSOSwkQK8CMzmLPn+pxkDYzQPFg38AUISPpoDA/L YNh4tEiHHd5oprHIzludE00m2i1oYNrBcmUe27sKxR0mak0kEJtxr4cXLrqBtN3k 9C31A0gstCINSHmQPAcRvFerDxDM0WPYQ7K6UEXfkCfbyf6i+1eG/qLUwUCdm9MD Rjot6dXzQ2LzqJbaAZndjJRDRZx2xpC2TNlNaBjYzSOC6AXSY0MKiZBCnH/i/OoZ f0Bq/k7LVeMbyu02cgIis4DPLabfG+XQUOniu4HQTrzK8+neApAlCwINc73cvQOb hBS+LfUVqP6K6g3oVGSvqG01wj2HK69SWMNKTr9GZ3GIqrcWYtA/JnqFfTE7/KwC H7rkPL8i3+NBXmjjz6hm8hx3MrnekKJpsdCBicm9OOYqJRbkGVjoUYeDFz5MElfp k71u2WDQ81aiqfWajsJkZaUFxZgUrRzuWeyBZiQQP9kJEMzUUiDSg4K+0WJhk5bO Y0EX0sdCz8k9IBKfi2+FcF5dYj3RDolALmBDrrcfchTW0h7vxMpn4rr/ueN7gViz rW/Gj9pRsA== =CClj -----END PGP SIGNATURE----- Merge tag 'block-5.11-2021-01-10' of git://git.kernel.dk/linux-block Pull block fixes from Jens Axboe: - Missing CRC32 selections (Arnd) - Fix for a merge window regression with bdev inode init (Christoph) - bcache fixes - rnbd fixes - NVMe pull request from Christoph: - fix a race in the nvme-tcp send code (Sagi Grimberg) - fix a list corruption in an nvme-rdma error path (Israel Rukshin) - avoid a possible double fetch in nvme-pci (Lalithambika Krishnakumar) - add the susystem NQN quirk for a Samsung driver (Gopal Tiwari) - fix two compiler warnings in nvme-fcloop (James Smart) - don't call sleeping functions from irq context in nvme-fc (James Smart) - remove an unused argument (Max Gurtovoy) - remove unused exports (Minwoo Im) - Use-after-free fix for partition iteration (Ming) - Missing blk-mq debugfs flag annotation (John) - Bdev freeze regression fix (Satya) - blk-iocost NULL pointer deref fix (Tejun) * tag 'block-5.11-2021-01-10' of git://git.kernel.dk/linux-block: (26 commits) bcache: set bcache device into read-only mode for BCH_FEATURE_INCOMPAT_OBSO_LARGE_BUCKET bcache: introduce BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE for large bucket bcache: check unsupported feature sets for bcache register bcache: fix typo from SUUP to SUPP in features.h bcache: set pdev_set_uuid before scond loop iteration blk-mq-debugfs: Add decode for BLK_MQ_F_TAG_HCTX_SHARED block/rnbd-clt: avoid module unload race with close confirmation block/rnbd: Adding name to the Contributors List block/rnbd-clt: Fix sg table use after free block/rnbd-srv: Fix use after free in rnbd_srv_sess_dev_force_close block/rnbd: Select SG_POOL for RNBD_CLIENT block: pre-initialize struct block_device in bdev_alloc_inode fs: Fix freeze_bdev()/thaw_bdev() accounting of bd_fsfreeze_sb nvme: remove the unused status argument from nvme_trace_bio_complete nvmet-rdma: Fix list_del corruption on queue establishment failure nvme: unexport functions with no external caller nvme: avoid possible double fetch in handling CQE nvme-tcp: Fix possible race of io_work and direct send nvme-pci: mark Samsung PM1725a as IGNORE_DEV_SUBNQN nvme-fcloop: Fix sscanf type and list_first_entry_or_null warnings ...
This commit is contained in:
commit
ed41fd071c
|
@ -6332,13 +6332,13 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
|
||||||
* limit 'something'.
|
* limit 'something'.
|
||||||
*/
|
*/
|
||||||
/* no more than 50% of tags for async I/O */
|
/* no more than 50% of tags for async I/O */
|
||||||
bfqd->word_depths[0][0] = max((1U << bt->sb.shift) >> 1, 1U);
|
bfqd->word_depths[0][0] = max(bt->sb.depth >> 1, 1U);
|
||||||
/*
|
/*
|
||||||
* no more than 75% of tags for sync writes (25% extra tags
|
* no more than 75% of tags for sync writes (25% extra tags
|
||||||
* w.r.t. async I/O, to prevent async I/O from starving sync
|
* w.r.t. async I/O, to prevent async I/O from starving sync
|
||||||
* writes)
|
* writes)
|
||||||
*/
|
*/
|
||||||
bfqd->word_depths[0][1] = max(((1U << bt->sb.shift) * 3) >> 2, 1U);
|
bfqd->word_depths[0][1] = max((bt->sb.depth * 3) >> 2, 1U);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* In-word depths in case some bfq_queue is being weight-
|
* In-word depths in case some bfq_queue is being weight-
|
||||||
|
@ -6348,9 +6348,9 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
|
||||||
* shortage.
|
* shortage.
|
||||||
*/
|
*/
|
||||||
/* no more than ~18% of tags for async I/O */
|
/* no more than ~18% of tags for async I/O */
|
||||||
bfqd->word_depths[1][0] = max(((1U << bt->sb.shift) * 3) >> 4, 1U);
|
bfqd->word_depths[1][0] = max((bt->sb.depth * 3) >> 4, 1U);
|
||||||
/* no more than ~37% of tags for sync writes (~20% extra tags) */
|
/* no more than ~37% of tags for sync writes (~20% extra tags) */
|
||||||
bfqd->word_depths[1][1] = max(((1U << bt->sb.shift) * 6) >> 4, 1U);
|
bfqd->word_depths[1][1] = max((bt->sb.depth * 6) >> 4, 1U);
|
||||||
|
|
||||||
for (i = 0; i < 2; i++)
|
for (i = 0; i < 2; i++)
|
||||||
for (j = 0; j < 2; j++)
|
for (j = 0; j < 2; j++)
|
||||||
|
|
|
@ -2551,8 +2551,8 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
|
||||||
bool use_debt, ioc_locked;
|
bool use_debt, ioc_locked;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
/* bypass IOs if disabled or for root cgroup */
|
/* bypass IOs if disabled, still initializing, or for root cgroup */
|
||||||
if (!ioc->enabled || !iocg->level)
|
if (!ioc->enabled || !iocg || !iocg->level)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* calculate the absolute vtime cost */
|
/* calculate the absolute vtime cost */
|
||||||
|
@ -2679,14 +2679,14 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
|
||||||
struct bio *bio)
|
struct bio *bio)
|
||||||
{
|
{
|
||||||
struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
|
struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
|
||||||
struct ioc *ioc = iocg->ioc;
|
struct ioc *ioc = rqos_to_ioc(rqos);
|
||||||
sector_t bio_end = bio_end_sector(bio);
|
sector_t bio_end = bio_end_sector(bio);
|
||||||
struct ioc_now now;
|
struct ioc_now now;
|
||||||
u64 vtime, abs_cost, cost;
|
u64 vtime, abs_cost, cost;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
/* bypass if disabled or for root cgroup */
|
/* bypass if disabled, still initializing, or for root cgroup */
|
||||||
if (!ioc->enabled || !iocg->level)
|
if (!ioc->enabled || !iocg || !iocg->level)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
abs_cost = calc_vtime_cost(bio, iocg, true);
|
abs_cost = calc_vtime_cost(bio, iocg, true);
|
||||||
|
@ -2863,6 +2863,12 @@ static int blk_iocost_init(struct request_queue *q)
|
||||||
ioc_refresh_params(ioc, true);
|
ioc_refresh_params(ioc, true);
|
||||||
spin_unlock_irq(&ioc->lock);
|
spin_unlock_irq(&ioc->lock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* rqos must be added before activation to allow iocg_pd_init() to
|
||||||
|
* lookup the ioc from q. This means that the rqos methods may get
|
||||||
|
* called before policy activation completion, can't assume that the
|
||||||
|
* target bio has an iocg associated and need to test for NULL iocg.
|
||||||
|
*/
|
||||||
rq_qos_add(q, rqos);
|
rq_qos_add(q, rqos);
|
||||||
ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
|
ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
|
|
@ -246,6 +246,7 @@ static const char *const hctx_flag_name[] = {
|
||||||
HCTX_FLAG_NAME(BLOCKING),
|
HCTX_FLAG_NAME(BLOCKING),
|
||||||
HCTX_FLAG_NAME(NO_SCHED),
|
HCTX_FLAG_NAME(NO_SCHED),
|
||||||
HCTX_FLAG_NAME(STACKING),
|
HCTX_FLAG_NAME(STACKING),
|
||||||
|
HCTX_FLAG_NAME(TAG_HCTX_SHARED),
|
||||||
};
|
};
|
||||||
#undef HCTX_FLAG_NAME
|
#undef HCTX_FLAG_NAME
|
||||||
|
|
||||||
|
|
|
@ -246,15 +246,18 @@ struct block_device *disk_part_iter_next(struct disk_part_iter *piter)
|
||||||
part = rcu_dereference(ptbl->part[piter->idx]);
|
part = rcu_dereference(ptbl->part[piter->idx]);
|
||||||
if (!part)
|
if (!part)
|
||||||
continue;
|
continue;
|
||||||
if (!bdev_nr_sectors(part) &&
|
|
||||||
!(piter->flags & DISK_PITER_INCL_EMPTY) &&
|
|
||||||
!(piter->flags & DISK_PITER_INCL_EMPTY_PART0 &&
|
|
||||||
piter->idx == 0))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
piter->part = bdgrab(part);
|
piter->part = bdgrab(part);
|
||||||
if (!piter->part)
|
if (!piter->part)
|
||||||
continue;
|
continue;
|
||||||
|
if (!bdev_nr_sectors(part) &&
|
||||||
|
!(piter->flags & DISK_PITER_INCL_EMPTY) &&
|
||||||
|
!(piter->flags & DISK_PITER_INCL_EMPTY_PART0 &&
|
||||||
|
piter->idx == 0)) {
|
||||||
|
bdput(piter->part);
|
||||||
|
piter->part = NULL;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
piter->idx += inc;
|
piter->idx += inc;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -445,6 +445,7 @@ config BLK_DEV_RBD
|
||||||
config BLK_DEV_RSXX
|
config BLK_DEV_RSXX
|
||||||
tristate "IBM Flash Adapter 900GB Full Height PCIe Device Driver"
|
tristate "IBM Flash Adapter 900GB Full Height PCIe Device Driver"
|
||||||
depends on PCI
|
depends on PCI
|
||||||
|
select CRC32
|
||||||
help
|
help
|
||||||
Device driver for IBM's high speed PCIe SSD
|
Device driver for IBM's high speed PCIe SSD
|
||||||
storage device: Flash Adapter 900GB Full Height.
|
storage device: Flash Adapter 900GB Full Height.
|
||||||
|
|
|
@ -7,6 +7,7 @@ config BLK_DEV_RNBD_CLIENT
|
||||||
tristate "RDMA Network Block Device driver client"
|
tristate "RDMA Network Block Device driver client"
|
||||||
depends on INFINIBAND_RTRS_CLIENT
|
depends on INFINIBAND_RTRS_CLIENT
|
||||||
select BLK_DEV_RNBD
|
select BLK_DEV_RNBD
|
||||||
|
select SG_POOL
|
||||||
help
|
help
|
||||||
RNBD client is a network block device driver using rdma transport.
|
RNBD client is a network block device driver using rdma transport.
|
||||||
|
|
||||||
|
|
|
@ -90,3 +90,4 @@ Kleber Souza <kleber.souza@profitbricks.com>
|
||||||
Lutz Pogrell <lutz.pogrell@cloud.ionos.com>
|
Lutz Pogrell <lutz.pogrell@cloud.ionos.com>
|
||||||
Milind Dumbare <Milind.dumbare@gmail.com>
|
Milind Dumbare <Milind.dumbare@gmail.com>
|
||||||
Roman Penyaev <roman.penyaev@profitbricks.com>
|
Roman Penyaev <roman.penyaev@profitbricks.com>
|
||||||
|
Swapnil Ingle <ingleswapnil@gmail.com>
|
||||||
|
|
|
@ -375,12 +375,19 @@ static struct rnbd_iu *rnbd_get_iu(struct rnbd_clt_session *sess,
|
||||||
init_waitqueue_head(&iu->comp.wait);
|
init_waitqueue_head(&iu->comp.wait);
|
||||||
iu->comp.errno = INT_MAX;
|
iu->comp.errno = INT_MAX;
|
||||||
|
|
||||||
|
if (sg_alloc_table(&iu->sgt, 1, GFP_KERNEL)) {
|
||||||
|
rnbd_put_permit(sess, permit);
|
||||||
|
kfree(iu);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
return iu;
|
return iu;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rnbd_put_iu(struct rnbd_clt_session *sess, struct rnbd_iu *iu)
|
static void rnbd_put_iu(struct rnbd_clt_session *sess, struct rnbd_iu *iu)
|
||||||
{
|
{
|
||||||
if (atomic_dec_and_test(&iu->refcount)) {
|
if (atomic_dec_and_test(&iu->refcount)) {
|
||||||
|
sg_free_table(&iu->sgt);
|
||||||
rnbd_put_permit(sess, iu->permit);
|
rnbd_put_permit(sess, iu->permit);
|
||||||
kfree(iu);
|
kfree(iu);
|
||||||
}
|
}
|
||||||
|
@ -487,8 +494,6 @@ static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id, bool wait)
|
||||||
iu->buf = NULL;
|
iu->buf = NULL;
|
||||||
iu->dev = dev;
|
iu->dev = dev;
|
||||||
|
|
||||||
sg_alloc_table(&iu->sgt, 1, GFP_KERNEL);
|
|
||||||
|
|
||||||
msg.hdr.type = cpu_to_le16(RNBD_MSG_CLOSE);
|
msg.hdr.type = cpu_to_le16(RNBD_MSG_CLOSE);
|
||||||
msg.device_id = cpu_to_le32(device_id);
|
msg.device_id = cpu_to_le32(device_id);
|
||||||
|
|
||||||
|
@ -502,7 +507,6 @@ static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id, bool wait)
|
||||||
err = errno;
|
err = errno;
|
||||||
}
|
}
|
||||||
|
|
||||||
sg_free_table(&iu->sgt);
|
|
||||||
rnbd_put_iu(sess, iu);
|
rnbd_put_iu(sess, iu);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -575,7 +579,6 @@ static int send_msg_open(struct rnbd_clt_dev *dev, bool wait)
|
||||||
iu->buf = rsp;
|
iu->buf = rsp;
|
||||||
iu->dev = dev;
|
iu->dev = dev;
|
||||||
|
|
||||||
sg_alloc_table(&iu->sgt, 1, GFP_KERNEL);
|
|
||||||
sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp));
|
sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp));
|
||||||
|
|
||||||
msg.hdr.type = cpu_to_le16(RNBD_MSG_OPEN);
|
msg.hdr.type = cpu_to_le16(RNBD_MSG_OPEN);
|
||||||
|
@ -594,7 +597,6 @@ static int send_msg_open(struct rnbd_clt_dev *dev, bool wait)
|
||||||
err = errno;
|
err = errno;
|
||||||
}
|
}
|
||||||
|
|
||||||
sg_free_table(&iu->sgt);
|
|
||||||
rnbd_put_iu(sess, iu);
|
rnbd_put_iu(sess, iu);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -622,8 +624,6 @@ static int send_msg_sess_info(struct rnbd_clt_session *sess, bool wait)
|
||||||
|
|
||||||
iu->buf = rsp;
|
iu->buf = rsp;
|
||||||
iu->sess = sess;
|
iu->sess = sess;
|
||||||
|
|
||||||
sg_alloc_table(&iu->sgt, 1, GFP_KERNEL);
|
|
||||||
sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp));
|
sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp));
|
||||||
|
|
||||||
msg.hdr.type = cpu_to_le16(RNBD_MSG_SESS_INFO);
|
msg.hdr.type = cpu_to_le16(RNBD_MSG_SESS_INFO);
|
||||||
|
@ -650,7 +650,6 @@ put_iu:
|
||||||
} else {
|
} else {
|
||||||
err = errno;
|
err = errno;
|
||||||
}
|
}
|
||||||
sg_free_table(&iu->sgt);
|
|
||||||
rnbd_put_iu(sess, iu);
|
rnbd_put_iu(sess, iu);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -1698,7 +1697,8 @@ static void rnbd_destroy_sessions(void)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
list_for_each_entry_safe(sess, sn, &sess_list, list) {
|
list_for_each_entry_safe(sess, sn, &sess_list, list) {
|
||||||
WARN_ON(!rnbd_clt_get_sess(sess));
|
if (!rnbd_clt_get_sess(sess))
|
||||||
|
continue;
|
||||||
close_rtrs(sess);
|
close_rtrs(sess);
|
||||||
list_for_each_entry_safe(dev, tn, &sess->devs_list, list) {
|
list_for_each_entry_safe(dev, tn, &sess->devs_list, list) {
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -338,10 +338,12 @@ static int rnbd_srv_link_ev(struct rtrs_srv *rtrs,
|
||||||
|
|
||||||
void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev)
|
void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev)
|
||||||
{
|
{
|
||||||
mutex_lock(&sess_dev->sess->lock);
|
struct rnbd_srv_session *sess = sess_dev->sess;
|
||||||
rnbd_srv_destroy_dev_session_sysfs(sess_dev);
|
|
||||||
mutex_unlock(&sess_dev->sess->lock);
|
|
||||||
sess_dev->keep_id = true;
|
sess_dev->keep_id = true;
|
||||||
|
mutex_lock(&sess->lock);
|
||||||
|
rnbd_srv_destroy_dev_session_sysfs(sess_dev);
|
||||||
|
mutex_unlock(&sess->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int process_msg_close(struct rtrs_srv *rtrs,
|
static int process_msg_close(struct rtrs_srv *rtrs,
|
||||||
|
|
|
@ -19,6 +19,7 @@ if NVM
|
||||||
|
|
||||||
config NVM_PBLK
|
config NVM_PBLK
|
||||||
tristate "Physical Block Device Open-Channel SSD target"
|
tristate "Physical Block Device Open-Channel SSD target"
|
||||||
|
select CRC32
|
||||||
help
|
help
|
||||||
Allows an open-channel SSD to be exposed as a block device to the
|
Allows an open-channel SSD to be exposed as a block device to the
|
||||||
host. The target assumes the device exposes raw flash and must be
|
host. The target assumes the device exposes raw flash and must be
|
||||||
|
|
|
@ -17,7 +17,7 @@ struct feature {
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct feature feature_list[] = {
|
static struct feature feature_list[] = {
|
||||||
{BCH_FEATURE_INCOMPAT, BCH_FEATURE_INCOMPAT_LARGE_BUCKET,
|
{BCH_FEATURE_INCOMPAT, BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE,
|
||||||
"large_bucket"},
|
"large_bucket"},
|
||||||
{0, 0, 0 },
|
{0, 0, 0 },
|
||||||
};
|
};
|
||||||
|
|
|
@ -13,11 +13,15 @@
|
||||||
|
|
||||||
/* Feature set definition */
|
/* Feature set definition */
|
||||||
/* Incompat feature set */
|
/* Incompat feature set */
|
||||||
#define BCH_FEATURE_INCOMPAT_LARGE_BUCKET 0x0001 /* 32bit bucket size */
|
/* 32bit bucket size, obsoleted */
|
||||||
|
#define BCH_FEATURE_INCOMPAT_OBSO_LARGE_BUCKET 0x0001
|
||||||
|
/* real bucket size is (1 << bucket_size) */
|
||||||
|
#define BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE 0x0002
|
||||||
|
|
||||||
#define BCH_FEATURE_COMPAT_SUUP 0
|
#define BCH_FEATURE_COMPAT_SUPP 0
|
||||||
#define BCH_FEATURE_RO_COMPAT_SUUP 0
|
#define BCH_FEATURE_RO_COMPAT_SUPP 0
|
||||||
#define BCH_FEATURE_INCOMPAT_SUUP BCH_FEATURE_INCOMPAT_LARGE_BUCKET
|
#define BCH_FEATURE_INCOMPAT_SUPP (BCH_FEATURE_INCOMPAT_OBSO_LARGE_BUCKET| \
|
||||||
|
BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE)
|
||||||
|
|
||||||
#define BCH_HAS_COMPAT_FEATURE(sb, mask) \
|
#define BCH_HAS_COMPAT_FEATURE(sb, mask) \
|
||||||
((sb)->feature_compat & (mask))
|
((sb)->feature_compat & (mask))
|
||||||
|
@ -77,7 +81,23 @@ static inline void bch_clear_feature_##name(struct cache_sb *sb) \
|
||||||
~BCH##_FEATURE_INCOMPAT_##flagname; \
|
~BCH##_FEATURE_INCOMPAT_##flagname; \
|
||||||
}
|
}
|
||||||
|
|
||||||
BCH_FEATURE_INCOMPAT_FUNCS(large_bucket, LARGE_BUCKET);
|
BCH_FEATURE_INCOMPAT_FUNCS(obso_large_bucket, OBSO_LARGE_BUCKET);
|
||||||
|
BCH_FEATURE_INCOMPAT_FUNCS(large_bucket, LOG_LARGE_BUCKET_SIZE);
|
||||||
|
|
||||||
|
static inline bool bch_has_unknown_compat_features(struct cache_sb *sb)
|
||||||
|
{
|
||||||
|
return ((sb->feature_compat & ~BCH_FEATURE_COMPAT_SUPP) != 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool bch_has_unknown_ro_compat_features(struct cache_sb *sb)
|
||||||
|
{
|
||||||
|
return ((sb->feature_ro_compat & ~BCH_FEATURE_RO_COMPAT_SUPP) != 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool bch_has_unknown_incompat_features(struct cache_sb *sb)
|
||||||
|
{
|
||||||
|
return ((sb->feature_incompat & ~BCH_FEATURE_INCOMPAT_SUPP) != 0);
|
||||||
|
}
|
||||||
|
|
||||||
int bch_print_cache_set_feature_compat(struct cache_set *c, char *buf, int size);
|
int bch_print_cache_set_feature_compat(struct cache_set *c, char *buf, int size);
|
||||||
int bch_print_cache_set_feature_ro_compat(struct cache_set *c, char *buf, int size);
|
int bch_print_cache_set_feature_ro_compat(struct cache_set *c, char *buf, int size);
|
||||||
|
|
|
@ -64,9 +64,25 @@ static unsigned int get_bucket_size(struct cache_sb *sb, struct cache_sb_disk *s
|
||||||
{
|
{
|
||||||
unsigned int bucket_size = le16_to_cpu(s->bucket_size);
|
unsigned int bucket_size = le16_to_cpu(s->bucket_size);
|
||||||
|
|
||||||
if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES &&
|
if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) {
|
||||||
bch_has_feature_large_bucket(sb))
|
if (bch_has_feature_large_bucket(sb)) {
|
||||||
bucket_size |= le16_to_cpu(s->bucket_size_hi) << 16;
|
unsigned int max, order;
|
||||||
|
|
||||||
|
max = sizeof(unsigned int) * BITS_PER_BYTE - 1;
|
||||||
|
order = le16_to_cpu(s->bucket_size);
|
||||||
|
/*
|
||||||
|
* bcache tool will make sure the overflow won't
|
||||||
|
* happen, an error message here is enough.
|
||||||
|
*/
|
||||||
|
if (order > max)
|
||||||
|
pr_err("Bucket size (1 << %u) overflows\n",
|
||||||
|
order);
|
||||||
|
bucket_size = 1 << order;
|
||||||
|
} else if (bch_has_feature_obso_large_bucket(sb)) {
|
||||||
|
bucket_size +=
|
||||||
|
le16_to_cpu(s->obso_bucket_size_hi) << 16;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return bucket_size;
|
return bucket_size;
|
||||||
}
|
}
|
||||||
|
@ -228,6 +244,20 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
|
||||||
sb->feature_compat = le64_to_cpu(s->feature_compat);
|
sb->feature_compat = le64_to_cpu(s->feature_compat);
|
||||||
sb->feature_incompat = le64_to_cpu(s->feature_incompat);
|
sb->feature_incompat = le64_to_cpu(s->feature_incompat);
|
||||||
sb->feature_ro_compat = le64_to_cpu(s->feature_ro_compat);
|
sb->feature_ro_compat = le64_to_cpu(s->feature_ro_compat);
|
||||||
|
|
||||||
|
/* Check incompatible features */
|
||||||
|
err = "Unsupported compatible feature found";
|
||||||
|
if (bch_has_unknown_compat_features(sb))
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
err = "Unsupported read-only compatible feature found";
|
||||||
|
if (bch_has_unknown_ro_compat_features(sb))
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
err = "Unsupported incompatible feature found";
|
||||||
|
if (bch_has_unknown_incompat_features(sb))
|
||||||
|
goto err;
|
||||||
|
|
||||||
err = read_super_common(sb, bdev, s);
|
err = read_super_common(sb, bdev, s);
|
||||||
if (err)
|
if (err)
|
||||||
goto err;
|
goto err;
|
||||||
|
@ -1302,6 +1332,12 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
|
||||||
bcache_device_link(&dc->disk, c, "bdev");
|
bcache_device_link(&dc->disk, c, "bdev");
|
||||||
atomic_inc(&c->attached_dev_nr);
|
atomic_inc(&c->attached_dev_nr);
|
||||||
|
|
||||||
|
if (bch_has_feature_obso_large_bucket(&(c->cache->sb))) {
|
||||||
|
pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n");
|
||||||
|
pr_err("Please update to the latest bcache-tools to create the cache device\n");
|
||||||
|
set_disk_ro(dc->disk.disk, 1);
|
||||||
|
}
|
||||||
|
|
||||||
/* Allow the writeback thread to proceed */
|
/* Allow the writeback thread to proceed */
|
||||||
up_write(&dc->writeback_lock);
|
up_write(&dc->writeback_lock);
|
||||||
|
|
||||||
|
@ -1524,6 +1560,12 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
|
||||||
|
|
||||||
bcache_device_link(d, c, "volume");
|
bcache_device_link(d, c, "volume");
|
||||||
|
|
||||||
|
if (bch_has_feature_obso_large_bucket(&c->cache->sb)) {
|
||||||
|
pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n");
|
||||||
|
pr_err("Please update to the latest bcache-tools to create the cache device\n");
|
||||||
|
set_disk_ro(d->disk, 1);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
err:
|
err:
|
||||||
kobject_put(&d->kobj);
|
kobject_put(&d->kobj);
|
||||||
|
@ -2083,6 +2125,9 @@ static int run_cache_set(struct cache_set *c)
|
||||||
c->cache->sb.last_mount = (u32)ktime_get_real_seconds();
|
c->cache->sb.last_mount = (u32)ktime_get_real_seconds();
|
||||||
bcache_write_super(c);
|
bcache_write_super(c);
|
||||||
|
|
||||||
|
if (bch_has_feature_obso_large_bucket(&c->cache->sb))
|
||||||
|
pr_err("Detect obsoleted large bucket layout, all attached bcache device will be read-only\n");
|
||||||
|
|
||||||
list_for_each_entry_safe(dc, t, &uncached_devices, list)
|
list_for_each_entry_safe(dc, t, &uncached_devices, list)
|
||||||
bch_cached_dev_attach(dc, c, NULL);
|
bch_cached_dev_attach(dc, c, NULL);
|
||||||
|
|
||||||
|
@ -2644,8 +2689,8 @@ static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
|
list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
|
||||||
|
char *pdev_set_uuid = pdev->dc->sb.set_uuid;
|
||||||
list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
|
list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
|
||||||
char *pdev_set_uuid = pdev->dc->sb.set_uuid;
|
|
||||||
char *set_uuid = c->set_uuid;
|
char *set_uuid = c->set_uuid;
|
||||||
|
|
||||||
if (!memcmp(pdev_set_uuid, set_uuid, 16)) {
|
if (!memcmp(pdev_set_uuid, set_uuid, 16)) {
|
||||||
|
|
|
@ -179,7 +179,7 @@ int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
|
EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
|
||||||
|
|
||||||
int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
|
static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -192,7 +192,6 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
|
|
||||||
|
|
||||||
static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
|
static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
|
||||||
{
|
{
|
||||||
|
@ -331,7 +330,7 @@ static inline void nvme_end_req(struct request *req)
|
||||||
req->__sector = nvme_lba_to_sect(req->q->queuedata,
|
req->__sector = nvme_lba_to_sect(req->q->queuedata,
|
||||||
le64_to_cpu(nvme_req(req)->result.u64));
|
le64_to_cpu(nvme_req(req)->result.u64));
|
||||||
|
|
||||||
nvme_trace_bio_complete(req, status);
|
nvme_trace_bio_complete(req);
|
||||||
blk_mq_end_request(req, status);
|
blk_mq_end_request(req, status);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -578,7 +577,7 @@ struct request *nvme_alloc_request(struct request_queue *q,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(nvme_alloc_request);
|
EXPORT_SYMBOL_GPL(nvme_alloc_request);
|
||||||
|
|
||||||
struct request *nvme_alloc_request_qid(struct request_queue *q,
|
static struct request *nvme_alloc_request_qid(struct request_queue *q,
|
||||||
struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
|
struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
|
||||||
{
|
{
|
||||||
struct request *req;
|
struct request *req;
|
||||||
|
@ -589,7 +588,6 @@ struct request *nvme_alloc_request_qid(struct request_queue *q,
|
||||||
nvme_init_request(req, cmd);
|
nvme_init_request(req, cmd);
|
||||||
return req;
|
return req;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(nvme_alloc_request_qid);
|
|
||||||
|
|
||||||
static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
|
static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
|
||||||
{
|
{
|
||||||
|
|
|
@ -166,6 +166,7 @@ struct nvme_fc_ctrl {
|
||||||
struct blk_mq_tag_set admin_tag_set;
|
struct blk_mq_tag_set admin_tag_set;
|
||||||
struct blk_mq_tag_set tag_set;
|
struct blk_mq_tag_set tag_set;
|
||||||
|
|
||||||
|
struct work_struct ioerr_work;
|
||||||
struct delayed_work connect_work;
|
struct delayed_work connect_work;
|
||||||
|
|
||||||
struct kref ref;
|
struct kref ref;
|
||||||
|
@ -1888,6 +1889,15 @@ __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
nvme_fc_ctrl_ioerr_work(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct nvme_fc_ctrl *ctrl =
|
||||||
|
container_of(work, struct nvme_fc_ctrl, ioerr_work);
|
||||||
|
|
||||||
|
nvme_fc_error_recovery(ctrl, "transport detected io error");
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
|
nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
|
||||||
{
|
{
|
||||||
|
@ -2046,7 +2056,7 @@ done:
|
||||||
|
|
||||||
check_error:
|
check_error:
|
||||||
if (terminate_assoc)
|
if (terminate_assoc)
|
||||||
nvme_fc_error_recovery(ctrl, "transport detected io error");
|
queue_work(nvme_reset_wq, &ctrl->ioerr_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
@ -3233,6 +3243,7 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
|
||||||
{
|
{
|
||||||
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
|
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
|
||||||
|
|
||||||
|
cancel_work_sync(&ctrl->ioerr_work);
|
||||||
cancel_delayed_work_sync(&ctrl->connect_work);
|
cancel_delayed_work_sync(&ctrl->connect_work);
|
||||||
/*
|
/*
|
||||||
* kill the association on the link side. this will block
|
* kill the association on the link side. this will block
|
||||||
|
@ -3449,6 +3460,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
|
||||||
|
|
||||||
INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
|
INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
|
||||||
INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
|
INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
|
||||||
|
INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work);
|
||||||
spin_lock_init(&ctrl->lock);
|
spin_lock_init(&ctrl->lock);
|
||||||
|
|
||||||
/* io queue count */
|
/* io queue count */
|
||||||
|
@ -3540,6 +3552,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
|
||||||
|
|
||||||
fail_ctrl:
|
fail_ctrl:
|
||||||
nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
|
nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
|
||||||
|
cancel_work_sync(&ctrl->ioerr_work);
|
||||||
cancel_work_sync(&ctrl->ctrl.reset_work);
|
cancel_work_sync(&ctrl->ctrl.reset_work);
|
||||||
cancel_delayed_work_sync(&ctrl->connect_work);
|
cancel_delayed_work_sync(&ctrl->connect_work);
|
||||||
|
|
||||||
|
|
|
@ -610,8 +610,6 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl);
|
||||||
#define NVME_QID_ANY -1
|
#define NVME_QID_ANY -1
|
||||||
struct request *nvme_alloc_request(struct request_queue *q,
|
struct request *nvme_alloc_request(struct request_queue *q,
|
||||||
struct nvme_command *cmd, blk_mq_req_flags_t flags);
|
struct nvme_command *cmd, blk_mq_req_flags_t flags);
|
||||||
struct request *nvme_alloc_request_qid(struct request_queue *q,
|
|
||||||
struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid);
|
|
||||||
void nvme_cleanup_cmd(struct request *req);
|
void nvme_cleanup_cmd(struct request *req);
|
||||||
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
|
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
|
||||||
struct nvme_command *cmd);
|
struct nvme_command *cmd);
|
||||||
|
@ -630,7 +628,6 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
|
||||||
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
|
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
|
||||||
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
|
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
|
||||||
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
|
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
|
||||||
int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
|
|
||||||
int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
|
int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
|
||||||
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
|
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
|
||||||
|
|
||||||
|
@ -675,8 +672,7 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
|
||||||
kblockd_schedule_work(&head->requeue_work);
|
kblockd_schedule_work(&head->requeue_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void nvme_trace_bio_complete(struct request *req,
|
static inline void nvme_trace_bio_complete(struct request *req)
|
||||||
blk_status_t status)
|
|
||||||
{
|
{
|
||||||
struct nvme_ns *ns = req->q->queuedata;
|
struct nvme_ns *ns = req->q->queuedata;
|
||||||
|
|
||||||
|
@ -731,8 +727,7 @@ static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
|
||||||
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
|
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
static inline void nvme_trace_bio_complete(struct request *req,
|
static inline void nvme_trace_bio_complete(struct request *req)
|
||||||
blk_status_t status)
|
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
|
static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
|
||||||
|
|
|
@ -967,6 +967,7 @@ static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq)
|
||||||
static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
|
static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
|
||||||
{
|
{
|
||||||
struct nvme_completion *cqe = &nvmeq->cqes[idx];
|
struct nvme_completion *cqe = &nvmeq->cqes[idx];
|
||||||
|
__u16 command_id = READ_ONCE(cqe->command_id);
|
||||||
struct request *req;
|
struct request *req;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -975,17 +976,17 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
|
||||||
* aborts. We don't even bother to allocate a struct request
|
* aborts. We don't even bother to allocate a struct request
|
||||||
* for them but rather special case them here.
|
* for them but rather special case them here.
|
||||||
*/
|
*/
|
||||||
if (unlikely(nvme_is_aen_req(nvmeq->qid, cqe->command_id))) {
|
if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) {
|
||||||
nvme_complete_async_event(&nvmeq->dev->ctrl,
|
nvme_complete_async_event(&nvmeq->dev->ctrl,
|
||||||
cqe->status, &cqe->result);
|
cqe->status, &cqe->result);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
|
req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), command_id);
|
||||||
if (unlikely(!req)) {
|
if (unlikely(!req)) {
|
||||||
dev_warn(nvmeq->dev->ctrl.device,
|
dev_warn(nvmeq->dev->ctrl.device,
|
||||||
"invalid id %d completed on queue %d\n",
|
"invalid id %d completed on queue %d\n",
|
||||||
cqe->command_id, le16_to_cpu(cqe->sq_id));
|
command_id, le16_to_cpu(cqe->sq_id));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3196,7 +3197,8 @@ static const struct pci_device_id nvme_id_table[] = {
|
||||||
{ PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */
|
{ PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */
|
||||||
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
|
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
|
||||||
{ PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */
|
{ PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */
|
||||||
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
|
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
|
||||||
|
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
|
||||||
{ PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */
|
{ PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */
|
||||||
.driver_data = NVME_QUIRK_LIGHTNVM, },
|
.driver_data = NVME_QUIRK_LIGHTNVM, },
|
||||||
{ PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */
|
{ PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */
|
||||||
|
|
|
@ -262,6 +262,16 @@ static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
/* drain the send queue as much as we can... */
|
||||||
|
do {
|
||||||
|
ret = nvme_tcp_try_send(queue);
|
||||||
|
} while (ret > 0);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
|
static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
|
||||||
bool sync, bool last)
|
bool sync, bool last)
|
||||||
{
|
{
|
||||||
|
@ -279,7 +289,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
|
||||||
if (queue->io_cpu == smp_processor_id() &&
|
if (queue->io_cpu == smp_processor_id() &&
|
||||||
sync && empty && mutex_trylock(&queue->send_mutex)) {
|
sync && empty && mutex_trylock(&queue->send_mutex)) {
|
||||||
queue->more_requests = !last;
|
queue->more_requests = !last;
|
||||||
nvme_tcp_try_send(queue);
|
nvme_tcp_send_all(queue);
|
||||||
queue->more_requests = false;
|
queue->more_requests = false;
|
||||||
mutex_unlock(&queue->send_mutex);
|
mutex_unlock(&queue->send_mutex);
|
||||||
} else if (last) {
|
} else if (last) {
|
||||||
|
|
|
@ -1501,7 +1501,8 @@ static ssize_t
|
||||||
fcloop_set_cmd_drop(struct device *dev, struct device_attribute *attr,
|
fcloop_set_cmd_drop(struct device *dev, struct device_attribute *attr,
|
||||||
const char *buf, size_t count)
|
const char *buf, size_t count)
|
||||||
{
|
{
|
||||||
int opcode, starting, amount;
|
unsigned int opcode;
|
||||||
|
int starting, amount;
|
||||||
|
|
||||||
if (sscanf(buf, "%x:%d:%d", &opcode, &starting, &amount) != 3)
|
if (sscanf(buf, "%x:%d:%d", &opcode, &starting, &amount) != 3)
|
||||||
return -EBADRQC;
|
return -EBADRQC;
|
||||||
|
@ -1588,8 +1589,8 @@ out_destroy_class:
|
||||||
|
|
||||||
static void __exit fcloop_exit(void)
|
static void __exit fcloop_exit(void)
|
||||||
{
|
{
|
||||||
struct fcloop_lport *lport;
|
struct fcloop_lport *lport = NULL;
|
||||||
struct fcloop_nport *nport;
|
struct fcloop_nport *nport = NULL;
|
||||||
struct fcloop_tport *tport;
|
struct fcloop_tport *tport;
|
||||||
struct fcloop_rport *rport;
|
struct fcloop_rport *rport;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
|
@ -1641,6 +1641,16 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
|
||||||
spin_lock_irqsave(&queue->state_lock, flags);
|
spin_lock_irqsave(&queue->state_lock, flags);
|
||||||
switch (queue->state) {
|
switch (queue->state) {
|
||||||
case NVMET_RDMA_Q_CONNECTING:
|
case NVMET_RDMA_Q_CONNECTING:
|
||||||
|
while (!list_empty(&queue->rsp_wait_list)) {
|
||||||
|
struct nvmet_rdma_rsp *rsp;
|
||||||
|
|
||||||
|
rsp = list_first_entry(&queue->rsp_wait_list,
|
||||||
|
struct nvmet_rdma_rsp,
|
||||||
|
wait_list);
|
||||||
|
list_del(&rsp->wait_list);
|
||||||
|
nvmet_rdma_put_rsp(rsp);
|
||||||
|
}
|
||||||
|
fallthrough;
|
||||||
case NVMET_RDMA_Q_LIVE:
|
case NVMET_RDMA_Q_LIVE:
|
||||||
queue->state = NVMET_RDMA_Q_DISCONNECTING;
|
queue->state = NVMET_RDMA_Q_DISCONNECTING;
|
||||||
disconnect = true;
|
disconnect = true;
|
||||||
|
|
|
@ -605,6 +605,8 @@ int thaw_bdev(struct block_device *bdev)
|
||||||
error = thaw_super(sb);
|
error = thaw_super(sb);
|
||||||
if (error)
|
if (error)
|
||||||
bdev->bd_fsfreeze_count++;
|
bdev->bd_fsfreeze_count++;
|
||||||
|
else
|
||||||
|
bdev->bd_fsfreeze_sb = NULL;
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&bdev->bd_fsfreeze_mutex);
|
mutex_unlock(&bdev->bd_fsfreeze_mutex);
|
||||||
return error;
|
return error;
|
||||||
|
@ -774,8 +776,11 @@ static struct kmem_cache * bdev_cachep __read_mostly;
|
||||||
static struct inode *bdev_alloc_inode(struct super_block *sb)
|
static struct inode *bdev_alloc_inode(struct super_block *sb)
|
||||||
{
|
{
|
||||||
struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
|
struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
|
||||||
|
|
||||||
if (!ei)
|
if (!ei)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
memset(&ei->bdev, 0, sizeof(ei->bdev));
|
||||||
|
ei->bdev.bd_bdi = &noop_backing_dev_info;
|
||||||
return &ei->vfs_inode;
|
return &ei->vfs_inode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -869,14 +874,12 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
|
||||||
mapping_set_gfp_mask(&inode->i_data, GFP_USER);
|
mapping_set_gfp_mask(&inode->i_data, GFP_USER);
|
||||||
|
|
||||||
bdev = I_BDEV(inode);
|
bdev = I_BDEV(inode);
|
||||||
memset(bdev, 0, sizeof(*bdev));
|
|
||||||
mutex_init(&bdev->bd_mutex);
|
mutex_init(&bdev->bd_mutex);
|
||||||
mutex_init(&bdev->bd_fsfreeze_mutex);
|
mutex_init(&bdev->bd_fsfreeze_mutex);
|
||||||
spin_lock_init(&bdev->bd_size_lock);
|
spin_lock_init(&bdev->bd_size_lock);
|
||||||
bdev->bd_disk = disk;
|
bdev->bd_disk = disk;
|
||||||
bdev->bd_partno = partno;
|
bdev->bd_partno = partno;
|
||||||
bdev->bd_inode = inode;
|
bdev->bd_inode = inode;
|
||||||
bdev->bd_bdi = &noop_backing_dev_info;
|
|
||||||
#ifdef CONFIG_SYSFS
|
#ifdef CONFIG_SYSFS
|
||||||
INIT_LIST_HEAD(&bdev->bd_holder_disks);
|
INIT_LIST_HEAD(&bdev->bd_holder_disks);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -213,7 +213,7 @@ struct cache_sb_disk {
|
||||||
__le16 keys;
|
__le16 keys;
|
||||||
};
|
};
|
||||||
__le64 d[SB_JOURNAL_BUCKETS]; /* journal buckets */
|
__le64 d[SB_JOURNAL_BUCKETS]; /* journal buckets */
|
||||||
__le16 bucket_size_hi;
|
__le16 obso_bucket_size_hi; /* obsoleted */
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in New Issue