for-linus-20180830
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAluITzAQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpnlzD/0bDvKP73KLRJhqYQSKeRU98gcZmr6FglsH U/XohBRTu0q/5KEVru4YC/44XLaUzK/WJ+mq/IGPqiCmH9YF2nqgD56WN+KL7hCe 4YhmpyuwoR0iyTpY1qnKJwkS7ymd1IQCWW83c3dQ3vWeturGNg0X2ueuSTl2N+8N 2g+6/M80fVycHCBT8ewvSihDMLwfPVdwMyg8xVzSCclO9MLGN714ag9NDM7aN9vf QHu8vdRPtIwj/0ZQ8ttLTF/2k3t6CUHzvbN/9OWQ+8gFPF/ASop87Dg3P1DBmkj3 RFrlg0QzMzJyBeRtmUlT83Cka7KzOONscJyZPTxJwZrudtgP+xye6ArOP0oKePyn 9HGCcqsnIY05mifx9LXxWRdG1R2M7av47V5qAs9wJwP1bwijhpLXErs/6k8gJTCX rr0/5AirAJbRG73P0wkU0aiaTZVIyIS5f9TLpNJZ6EAnRnaE9R1t4gAzDLl4t4Jg iGKz8GKlzWapdU00kEs4Jq2wpA39HAn5cClsbbOaPFVoKQzZfzE9+hQpvx6xfCxP K07ky2JhoelNwqfOQ7EiTuBSv+jeV2TheUhu27rC3IHwq+kFk2nzzM9xvk+mXO5I B7v+yqBjhcJiO2799WwIlpkPkQR1vZJFmMe8HEw7QaX+B5jncdhyjP/BguntWgMT LCQb4x5trQ== =bnq/ -----END PGP SIGNATURE----- Merge tag 'for-linus-20180830' of git://git.kernel.dk/linux-block Pull block fixes from Jens Axboe: "Small collection of fixes that should go into this series. This pull contains: - NVMe pull request with three small fixes (via Christoph) - Kill useless NULL check before kmem_cache_destroy (Chengguang Xu) - Xen block driver pull request with persistent grant flushing fixes (Juergen Gross) - Final wbt fixes, wrapping up the changes for this series. These have been heavily tested (me) - cdrom info leak fix (Scott Bauer) - ATA dma quirk for SQ201 (Linus Walleij) - Straight forward bsg refcount_t conversion (John Pittman)" * tag 'for-linus-20180830' of git://git.kernel.dk/linux-block: cdrom: Fix info leak/OOB read in cdrom_ioctl_drive_status nvmet: free workqueue object if module init fails nvme-fcloop: Fix dropped LS's to removed target port nvme-pci: add a memory barrier to nvme_dbbuf_update_and_check_event block: bsg: move atomic_t ref_count variable to refcount API block: remove unnecessary condition check ata: ftide010: Add a quirk for SQ201 blk-wbt: remove dead code blk-wbt: improve waking of tasks blk-wbt: abstract out end IO completion handler xen/blkback: remove unused pers_gnts_lock from struct xen_blkif_ring xen/blkback: move persistent grants flags to bool xen/blkfront: reorder tests in xlblk_init() xen/blkfront: cleanup stale persistent grants xen/blkback: don't keep persistent grants too long
This commit is contained in:
commit
fb64638566
|
@ -15,3 +15,13 @@ Description:
|
||||||
blkback. If the frontend tries to use more than
|
blkback. If the frontend tries to use more than
|
||||||
max_persistent_grants, the LRU kicks in and starts
|
max_persistent_grants, the LRU kicks in and starts
|
||||||
removing 5% of max_persistent_grants every 100ms.
|
removing 5% of max_persistent_grants every 100ms.
|
||||||
|
|
||||||
|
What: /sys/module/xen_blkback/parameters/persistent_grant_unused_seconds
|
||||||
|
Date: August 2018
|
||||||
|
KernelVersion: 4.19
|
||||||
|
Contact: Roger Pau Monné <roger.pau@citrix.com>
|
||||||
|
Description:
|
||||||
|
How long a persistent grant is allowed to remain
|
||||||
|
allocated without being in use. The time is in
|
||||||
|
seconds, 0 means indefinitely long.
|
||||||
|
The default is 60 seconds.
|
||||||
|
|
|
@ -123,16 +123,11 @@ static void rwb_wake_all(struct rq_wb *rwb)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
|
static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw,
|
||||||
|
enum wbt_flags wb_acct)
|
||||||
{
|
{
|
||||||
struct rq_wb *rwb = RQWB(rqos);
|
|
||||||
struct rq_wait *rqw;
|
|
||||||
int inflight, limit;
|
int inflight, limit;
|
||||||
|
|
||||||
if (!(wb_acct & WBT_TRACKED))
|
|
||||||
return;
|
|
||||||
|
|
||||||
rqw = get_rq_wait(rwb, wb_acct);
|
|
||||||
inflight = atomic_dec_return(&rqw->inflight);
|
inflight = atomic_dec_return(&rqw->inflight);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -166,10 +161,22 @@ static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
|
||||||
int diff = limit - inflight;
|
int diff = limit - inflight;
|
||||||
|
|
||||||
if (!inflight || diff >= rwb->wb_background / 2)
|
if (!inflight || diff >= rwb->wb_background / 2)
|
||||||
wake_up(&rqw->wait);
|
wake_up_all(&rqw->wait);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
|
||||||
|
{
|
||||||
|
struct rq_wb *rwb = RQWB(rqos);
|
||||||
|
struct rq_wait *rqw;
|
||||||
|
|
||||||
|
if (!(wb_acct & WBT_TRACKED))
|
||||||
|
return;
|
||||||
|
|
||||||
|
rqw = get_rq_wait(rwb, wb_acct);
|
||||||
|
wbt_rqw_done(rwb, rqw, wb_acct);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Called on completion of a request. Note that it's also called when
|
* Called on completion of a request. Note that it's also called when
|
||||||
* a request is merged, when the request gets freed.
|
* a request is merged, when the request gets freed.
|
||||||
|
@ -481,6 +488,34 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
|
||||||
return limit;
|
return limit;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct wbt_wait_data {
|
||||||
|
struct wait_queue_entry wq;
|
||||||
|
struct task_struct *task;
|
||||||
|
struct rq_wb *rwb;
|
||||||
|
struct rq_wait *rqw;
|
||||||
|
unsigned long rw;
|
||||||
|
bool got_token;
|
||||||
|
};
|
||||||
|
|
||||||
|
static int wbt_wake_function(struct wait_queue_entry *curr, unsigned int mode,
|
||||||
|
int wake_flags, void *key)
|
||||||
|
{
|
||||||
|
struct wbt_wait_data *data = container_of(curr, struct wbt_wait_data,
|
||||||
|
wq);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we fail to get a budget, return -1 to interrupt the wake up
|
||||||
|
* loop in __wake_up_common.
|
||||||
|
*/
|
||||||
|
if (!rq_wait_inc_below(data->rqw, get_limit(data->rwb, data->rw)))
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
data->got_token = true;
|
||||||
|
list_del_init(&curr->entry);
|
||||||
|
wake_up_process(data->task);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Block if we will exceed our limit, or if we are currently waiting for
|
* Block if we will exceed our limit, or if we are currently waiting for
|
||||||
* the timer to kick off queuing again.
|
* the timer to kick off queuing again.
|
||||||
|
@ -491,31 +526,52 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
|
||||||
__acquires(lock)
|
__acquires(lock)
|
||||||
{
|
{
|
||||||
struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
|
struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
|
||||||
DECLARE_WAITQUEUE(wait, current);
|
struct wbt_wait_data data = {
|
||||||
|
.wq = {
|
||||||
|
.func = wbt_wake_function,
|
||||||
|
.entry = LIST_HEAD_INIT(data.wq.entry),
|
||||||
|
},
|
||||||
|
.task = current,
|
||||||
|
.rwb = rwb,
|
||||||
|
.rqw = rqw,
|
||||||
|
.rw = rw,
|
||||||
|
};
|
||||||
bool has_sleeper;
|
bool has_sleeper;
|
||||||
|
|
||||||
has_sleeper = wq_has_sleeper(&rqw->wait);
|
has_sleeper = wq_has_sleeper(&rqw->wait);
|
||||||
if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw)))
|
if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
add_wait_queue_exclusive(&rqw->wait, &wait);
|
prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE);
|
||||||
do {
|
do {
|
||||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
if (data.got_token)
|
||||||
|
|
||||||
if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw)))
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
if (!has_sleeper &&
|
||||||
|
rq_wait_inc_below(rqw, get_limit(rwb, rw))) {
|
||||||
|
finish_wait(&rqw->wait, &data.wq);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We raced with wbt_wake_function() getting a token,
|
||||||
|
* which means we now have two. Put our local token
|
||||||
|
* and wake anyone else potentially waiting for one.
|
||||||
|
*/
|
||||||
|
if (data.got_token)
|
||||||
|
wbt_rqw_done(rwb, rqw, wb_acct);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
if (lock) {
|
if (lock) {
|
||||||
spin_unlock_irq(lock);
|
spin_unlock_irq(lock);
|
||||||
io_schedule();
|
io_schedule();
|
||||||
spin_lock_irq(lock);
|
spin_lock_irq(lock);
|
||||||
} else
|
} else
|
||||||
io_schedule();
|
io_schedule();
|
||||||
|
|
||||||
has_sleeper = false;
|
has_sleeper = false;
|
||||||
} while (1);
|
} while (1);
|
||||||
|
|
||||||
__set_current_state(TASK_RUNNING);
|
finish_wait(&rqw->wait, &data.wq);
|
||||||
remove_wait_queue(&rqw->wait, &wait);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
|
static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
|
||||||
|
@ -580,11 +636,6 @@ static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (current_is_kswapd())
|
|
||||||
flags |= WBT_KSWAPD;
|
|
||||||
if (bio_op(bio) == REQ_OP_DISCARD)
|
|
||||||
flags |= WBT_DISCARD;
|
|
||||||
|
|
||||||
__wbt_wait(rwb, flags, bio->bi_opf, lock);
|
__wbt_wait(rwb, flags, bio->bi_opf, lock);
|
||||||
|
|
||||||
if (!blk_stat_is_active(rwb->cb))
|
if (!blk_stat_is_active(rwb->cb))
|
||||||
|
|
|
@ -37,7 +37,7 @@ struct bsg_device {
|
||||||
struct request_queue *queue;
|
struct request_queue *queue;
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
struct hlist_node dev_list;
|
struct hlist_node dev_list;
|
||||||
atomic_t ref_count;
|
refcount_t ref_count;
|
||||||
char name[20];
|
char name[20];
|
||||||
int max_queue;
|
int max_queue;
|
||||||
};
|
};
|
||||||
|
@ -252,7 +252,7 @@ static int bsg_put_device(struct bsg_device *bd)
|
||||||
|
|
||||||
mutex_lock(&bsg_mutex);
|
mutex_lock(&bsg_mutex);
|
||||||
|
|
||||||
if (!atomic_dec_and_test(&bd->ref_count)) {
|
if (!refcount_dec_and_test(&bd->ref_count)) {
|
||||||
mutex_unlock(&bsg_mutex);
|
mutex_unlock(&bsg_mutex);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -290,7 +290,7 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
|
||||||
|
|
||||||
bd->queue = rq;
|
bd->queue = rq;
|
||||||
|
|
||||||
atomic_set(&bd->ref_count, 1);
|
refcount_set(&bd->ref_count, 1);
|
||||||
hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
|
hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
|
||||||
|
|
||||||
strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
|
strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
|
||||||
|
@ -308,7 +308,7 @@ static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
|
||||||
|
|
||||||
hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) {
|
hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) {
|
||||||
if (bd->queue == q) {
|
if (bd->queue == q) {
|
||||||
atomic_inc(&bd->ref_count);
|
refcount_inc(&bd->ref_count);
|
||||||
goto found;
|
goto found;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -895,8 +895,7 @@ int elv_register(struct elevator_type *e)
|
||||||
spin_lock(&elv_list_lock);
|
spin_lock(&elv_list_lock);
|
||||||
if (elevator_find(e->elevator_name, e->uses_mq)) {
|
if (elevator_find(e->elevator_name, e->uses_mq)) {
|
||||||
spin_unlock(&elv_list_lock);
|
spin_unlock(&elv_list_lock);
|
||||||
if (e->icq_cache)
|
kmem_cache_destroy(e->icq_cache);
|
||||||
kmem_cache_destroy(e->icq_cache);
|
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
list_add_tail(&e->list, &elv_list);
|
list_add_tail(&e->list, &elv_list);
|
||||||
|
|
|
@ -256,14 +256,12 @@ static struct ata_port_operations pata_ftide010_port_ops = {
|
||||||
.qc_issue = ftide010_qc_issue,
|
.qc_issue = ftide010_qc_issue,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct ata_port_info ftide010_port_info[] = {
|
static struct ata_port_info ftide010_port_info = {
|
||||||
{
|
.flags = ATA_FLAG_SLAVE_POSS,
|
||||||
.flags = ATA_FLAG_SLAVE_POSS,
|
.mwdma_mask = ATA_MWDMA2,
|
||||||
.mwdma_mask = ATA_MWDMA2,
|
.udma_mask = ATA_UDMA6,
|
||||||
.udma_mask = ATA_UDMA6,
|
.pio_mask = ATA_PIO4,
|
||||||
.pio_mask = ATA_PIO4,
|
.port_ops = &pata_ftide010_port_ops,
|
||||||
.port_ops = &pata_ftide010_port_ops,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_SATA_GEMINI)
|
#if IS_ENABLED(CONFIG_SATA_GEMINI)
|
||||||
|
@ -349,6 +347,7 @@ static int pata_ftide010_gemini_cable_detect(struct ata_port *ap)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pata_ftide010_gemini_init(struct ftide010 *ftide,
|
static int pata_ftide010_gemini_init(struct ftide010 *ftide,
|
||||||
|
struct ata_port_info *pi,
|
||||||
bool is_ata1)
|
bool is_ata1)
|
||||||
{
|
{
|
||||||
struct device *dev = ftide->dev;
|
struct device *dev = ftide->dev;
|
||||||
|
@ -373,7 +372,13 @@ static int pata_ftide010_gemini_init(struct ftide010 *ftide,
|
||||||
|
|
||||||
/* Flag port as SATA-capable */
|
/* Flag port as SATA-capable */
|
||||||
if (gemini_sata_bridge_enabled(sg, is_ata1))
|
if (gemini_sata_bridge_enabled(sg, is_ata1))
|
||||||
ftide010_port_info[0].flags |= ATA_FLAG_SATA;
|
pi->flags |= ATA_FLAG_SATA;
|
||||||
|
|
||||||
|
/* This device has broken DMA, only PIO works */
|
||||||
|
if (of_machine_is_compatible("itian,sq201")) {
|
||||||
|
pi->mwdma_mask = 0;
|
||||||
|
pi->udma_mask = 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We assume that a simple 40-wire cable is used in the PATA mode.
|
* We assume that a simple 40-wire cable is used in the PATA mode.
|
||||||
|
@ -435,6 +440,7 @@ static int pata_ftide010_gemini_init(struct ftide010 *ftide,
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static int pata_ftide010_gemini_init(struct ftide010 *ftide,
|
static int pata_ftide010_gemini_init(struct ftide010 *ftide,
|
||||||
|
struct ata_port_info *pi,
|
||||||
bool is_ata1)
|
bool is_ata1)
|
||||||
{
|
{
|
||||||
return -ENOTSUPP;
|
return -ENOTSUPP;
|
||||||
|
@ -446,7 +452,7 @@ static int pata_ftide010_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct device *dev = &pdev->dev;
|
struct device *dev = &pdev->dev;
|
||||||
struct device_node *np = dev->of_node;
|
struct device_node *np = dev->of_node;
|
||||||
const struct ata_port_info pi = ftide010_port_info[0];
|
struct ata_port_info pi = ftide010_port_info;
|
||||||
const struct ata_port_info *ppi[] = { &pi, NULL };
|
const struct ata_port_info *ppi[] = { &pi, NULL };
|
||||||
struct ftide010 *ftide;
|
struct ftide010 *ftide;
|
||||||
struct resource *res;
|
struct resource *res;
|
||||||
|
@ -490,6 +496,7 @@ static int pata_ftide010_probe(struct platform_device *pdev)
|
||||||
* are ATA0. This will also set up the cable types.
|
* are ATA0. This will also set up the cable types.
|
||||||
*/
|
*/
|
||||||
ret = pata_ftide010_gemini_init(ftide,
|
ret = pata_ftide010_gemini_init(ftide,
|
||||||
|
&pi,
|
||||||
(res->start == 0x63400000));
|
(res->start == 0x63400000));
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_dis_clk;
|
goto err_dis_clk;
|
||||||
|
|
|
@ -83,6 +83,18 @@ module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
|
||||||
MODULE_PARM_DESC(max_persistent_grants,
|
MODULE_PARM_DESC(max_persistent_grants,
|
||||||
"Maximum number of grants to map persistently");
|
"Maximum number of grants to map persistently");
|
||||||
|
|
||||||
|
/*
|
||||||
|
* How long a persistent grant is allowed to remain allocated without being in
|
||||||
|
* use. The time is in seconds, 0 means indefinitely long.
|
||||||
|
*/
|
||||||
|
|
||||||
|
static unsigned int xen_blkif_pgrant_timeout = 60;
|
||||||
|
module_param_named(persistent_grant_unused_seconds, xen_blkif_pgrant_timeout,
|
||||||
|
uint, 0644);
|
||||||
|
MODULE_PARM_DESC(persistent_grant_unused_seconds,
|
||||||
|
"Time in seconds an unused persistent grant is allowed to "
|
||||||
|
"remain allocated. Default is 60, 0 means unlimited.");
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Maximum number of rings/queues blkback supports, allow as many queues as there
|
* Maximum number of rings/queues blkback supports, allow as many queues as there
|
||||||
* are CPUs if user has not specified a value.
|
* are CPUs if user has not specified a value.
|
||||||
|
@ -123,6 +135,13 @@ module_param(log_stats, int, 0644);
|
||||||
/* Number of free pages to remove on each call to gnttab_free_pages */
|
/* Number of free pages to remove on each call to gnttab_free_pages */
|
||||||
#define NUM_BATCH_FREE_PAGES 10
|
#define NUM_BATCH_FREE_PAGES 10
|
||||||
|
|
||||||
|
static inline bool persistent_gnt_timeout(struct persistent_gnt *persistent_gnt)
|
||||||
|
{
|
||||||
|
return xen_blkif_pgrant_timeout &&
|
||||||
|
(jiffies - persistent_gnt->last_used >=
|
||||||
|
HZ * xen_blkif_pgrant_timeout);
|
||||||
|
}
|
||||||
|
|
||||||
static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page)
|
static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -236,8 +255,7 @@ static int add_persistent_gnt(struct xen_blkif_ring *ring,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE);
|
persistent_gnt->active = true;
|
||||||
set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
|
|
||||||
/* Add new node and rebalance tree. */
|
/* Add new node and rebalance tree. */
|
||||||
rb_link_node(&(persistent_gnt->node), parent, new);
|
rb_link_node(&(persistent_gnt->node), parent, new);
|
||||||
rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts);
|
rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts);
|
||||||
|
@ -261,11 +279,11 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring,
|
||||||
else if (gref > data->gnt)
|
else if (gref > data->gnt)
|
||||||
node = node->rb_right;
|
node = node->rb_right;
|
||||||
else {
|
else {
|
||||||
if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) {
|
if (data->active) {
|
||||||
pr_alert_ratelimited("requesting a grant already in use\n");
|
pr_alert_ratelimited("requesting a grant already in use\n");
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
set_bit(PERSISTENT_GNT_ACTIVE, data->flags);
|
data->active = true;
|
||||||
atomic_inc(&ring->persistent_gnt_in_use);
|
atomic_inc(&ring->persistent_gnt_in_use);
|
||||||
return data;
|
return data;
|
||||||
}
|
}
|
||||||
|
@ -276,10 +294,10 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring,
|
||||||
static void put_persistent_gnt(struct xen_blkif_ring *ring,
|
static void put_persistent_gnt(struct xen_blkif_ring *ring,
|
||||||
struct persistent_gnt *persistent_gnt)
|
struct persistent_gnt *persistent_gnt)
|
||||||
{
|
{
|
||||||
if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
|
if (!persistent_gnt->active)
|
||||||
pr_alert_ratelimited("freeing a grant already unused\n");
|
pr_alert_ratelimited("freeing a grant already unused\n");
|
||||||
set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
|
persistent_gnt->last_used = jiffies;
|
||||||
clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
|
persistent_gnt->active = false;
|
||||||
atomic_dec(&ring->persistent_gnt_in_use);
|
atomic_dec(&ring->persistent_gnt_in_use);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -371,26 +389,26 @@ static void purge_persistent_gnt(struct xen_blkif_ring *ring)
|
||||||
struct persistent_gnt *persistent_gnt;
|
struct persistent_gnt *persistent_gnt;
|
||||||
struct rb_node *n;
|
struct rb_node *n;
|
||||||
unsigned int num_clean, total;
|
unsigned int num_clean, total;
|
||||||
bool scan_used = false, clean_used = false;
|
bool scan_used = false;
|
||||||
struct rb_root *root;
|
struct rb_root *root;
|
||||||
|
|
||||||
if (ring->persistent_gnt_c < xen_blkif_max_pgrants ||
|
|
||||||
(ring->persistent_gnt_c == xen_blkif_max_pgrants &&
|
|
||||||
!ring->blkif->vbd.overflow_max_grants)) {
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (work_busy(&ring->persistent_purge_work)) {
|
if (work_busy(&ring->persistent_purge_work)) {
|
||||||
pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
|
pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
|
if (ring->persistent_gnt_c < xen_blkif_max_pgrants ||
|
||||||
num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants + num_clean;
|
(ring->persistent_gnt_c == xen_blkif_max_pgrants &&
|
||||||
num_clean = min(ring->persistent_gnt_c, num_clean);
|
!ring->blkif->vbd.overflow_max_grants)) {
|
||||||
if ((num_clean == 0) ||
|
num_clean = 0;
|
||||||
(num_clean > (ring->persistent_gnt_c - atomic_read(&ring->persistent_gnt_in_use))))
|
} else {
|
||||||
goto out;
|
num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
|
||||||
|
num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants +
|
||||||
|
num_clean;
|
||||||
|
num_clean = min(ring->persistent_gnt_c, num_clean);
|
||||||
|
pr_debug("Going to purge at least %u persistent grants\n",
|
||||||
|
num_clean);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* At this point, we can assure that there will be no calls
|
* At this point, we can assure that there will be no calls
|
||||||
|
@ -401,9 +419,7 @@ static void purge_persistent_gnt(struct xen_blkif_ring *ring)
|
||||||
* number of grants.
|
* number of grants.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
total = num_clean;
|
total = 0;
|
||||||
|
|
||||||
pr_debug("Going to purge %u persistent grants\n", num_clean);
|
|
||||||
|
|
||||||
BUG_ON(!list_empty(&ring->persistent_purge_list));
|
BUG_ON(!list_empty(&ring->persistent_purge_list));
|
||||||
root = &ring->persistent_gnts;
|
root = &ring->persistent_gnts;
|
||||||
|
@ -412,47 +428,38 @@ purge_list:
|
||||||
BUG_ON(persistent_gnt->handle ==
|
BUG_ON(persistent_gnt->handle ==
|
||||||
BLKBACK_INVALID_HANDLE);
|
BLKBACK_INVALID_HANDLE);
|
||||||
|
|
||||||
if (clean_used) {
|
if (persistent_gnt->active)
|
||||||
clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
|
|
||||||
continue;
|
continue;
|
||||||
}
|
if (!scan_used && !persistent_gnt_timeout(persistent_gnt))
|
||||||
|
|
||||||
if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
|
|
||||||
continue;
|
continue;
|
||||||
if (!scan_used &&
|
if (scan_used && total >= num_clean)
|
||||||
(test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags)))
|
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
rb_erase(&persistent_gnt->node, root);
|
rb_erase(&persistent_gnt->node, root);
|
||||||
list_add(&persistent_gnt->remove_node,
|
list_add(&persistent_gnt->remove_node,
|
||||||
&ring->persistent_purge_list);
|
&ring->persistent_purge_list);
|
||||||
if (--num_clean == 0)
|
total++;
|
||||||
goto finished;
|
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* If we get here it means we also need to start cleaning
|
* Check whether we also need to start cleaning
|
||||||
* grants that were used since last purge in order to cope
|
* grants that were used since last purge in order to cope
|
||||||
* with the requested num
|
* with the requested num
|
||||||
*/
|
*/
|
||||||
if (!scan_used && !clean_used) {
|
if (!scan_used && total < num_clean) {
|
||||||
pr_debug("Still missing %u purged frames\n", num_clean);
|
pr_debug("Still missing %u purged frames\n", num_clean - total);
|
||||||
scan_used = true;
|
scan_used = true;
|
||||||
goto purge_list;
|
goto purge_list;
|
||||||
}
|
}
|
||||||
finished:
|
|
||||||
if (!clean_used) {
|
if (total) {
|
||||||
pr_debug("Finished scanning for grants to clean, removing used flag\n");
|
ring->persistent_gnt_c -= total;
|
||||||
clean_used = true;
|
ring->blkif->vbd.overflow_max_grants = 0;
|
||||||
goto purge_list;
|
|
||||||
|
/* We can defer this work */
|
||||||
|
schedule_work(&ring->persistent_purge_work);
|
||||||
|
pr_debug("Purged %u/%u\n", num_clean, total);
|
||||||
}
|
}
|
||||||
|
|
||||||
ring->persistent_gnt_c -= (total - num_clean);
|
|
||||||
ring->blkif->vbd.overflow_max_grants = 0;
|
|
||||||
|
|
||||||
/* We can defer this work */
|
|
||||||
schedule_work(&ring->persistent_purge_work);
|
|
||||||
pr_debug("Purged %u/%u\n", (total - num_clean), total);
|
|
||||||
|
|
||||||
out:
|
out:
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -233,16 +233,6 @@ struct xen_vbd {
|
||||||
|
|
||||||
struct backend_info;
|
struct backend_info;
|
||||||
|
|
||||||
/* Number of available flags */
|
|
||||||
#define PERSISTENT_GNT_FLAGS_SIZE 2
|
|
||||||
/* This persistent grant is currently in use */
|
|
||||||
#define PERSISTENT_GNT_ACTIVE 0
|
|
||||||
/*
|
|
||||||
* This persistent grant has been used, this flag is set when we remove the
|
|
||||||
* PERSISTENT_GNT_ACTIVE, to know that this grant has been used recently.
|
|
||||||
*/
|
|
||||||
#define PERSISTENT_GNT_WAS_ACTIVE 1
|
|
||||||
|
|
||||||
/* Number of requests that we can fit in a ring */
|
/* Number of requests that we can fit in a ring */
|
||||||
#define XEN_BLKIF_REQS_PER_PAGE 32
|
#define XEN_BLKIF_REQS_PER_PAGE 32
|
||||||
|
|
||||||
|
@ -250,7 +240,8 @@ struct persistent_gnt {
|
||||||
struct page *page;
|
struct page *page;
|
||||||
grant_ref_t gnt;
|
grant_ref_t gnt;
|
||||||
grant_handle_t handle;
|
grant_handle_t handle;
|
||||||
DECLARE_BITMAP(flags, PERSISTENT_GNT_FLAGS_SIZE);
|
unsigned long last_used;
|
||||||
|
bool active;
|
||||||
struct rb_node node;
|
struct rb_node node;
|
||||||
struct list_head remove_node;
|
struct list_head remove_node;
|
||||||
};
|
};
|
||||||
|
@ -278,7 +269,6 @@ struct xen_blkif_ring {
|
||||||
wait_queue_head_t pending_free_wq;
|
wait_queue_head_t pending_free_wq;
|
||||||
|
|
||||||
/* Tree to store persistent grants. */
|
/* Tree to store persistent grants. */
|
||||||
spinlock_t pers_gnts_lock;
|
|
||||||
struct rb_root persistent_gnts;
|
struct rb_root persistent_gnts;
|
||||||
unsigned int persistent_gnt_c;
|
unsigned int persistent_gnt_c;
|
||||||
atomic_t persistent_gnt_in_use;
|
atomic_t persistent_gnt_in_use;
|
||||||
|
|
|
@ -46,6 +46,7 @@
|
||||||
#include <linux/scatterlist.h>
|
#include <linux/scatterlist.h>
|
||||||
#include <linux/bitmap.h>
|
#include <linux/bitmap.h>
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
|
#include <linux/workqueue.h>
|
||||||
|
|
||||||
#include <xen/xen.h>
|
#include <xen/xen.h>
|
||||||
#include <xen/xenbus.h>
|
#include <xen/xenbus.h>
|
||||||
|
@ -121,6 +122,8 @@ static inline struct blkif_req *blkif_req(struct request *rq)
|
||||||
|
|
||||||
static DEFINE_MUTEX(blkfront_mutex);
|
static DEFINE_MUTEX(blkfront_mutex);
|
||||||
static const struct block_device_operations xlvbd_block_fops;
|
static const struct block_device_operations xlvbd_block_fops;
|
||||||
|
static struct delayed_work blkfront_work;
|
||||||
|
static LIST_HEAD(info_list);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Maximum number of segments in indirect requests, the actual value used by
|
* Maximum number of segments in indirect requests, the actual value used by
|
||||||
|
@ -216,6 +219,7 @@ struct blkfront_info
|
||||||
/* Save uncomplete reqs and bios for migration. */
|
/* Save uncomplete reqs and bios for migration. */
|
||||||
struct list_head requests;
|
struct list_head requests;
|
||||||
struct bio_list bio_list;
|
struct bio_list bio_list;
|
||||||
|
struct list_head info_list;
|
||||||
};
|
};
|
||||||
|
|
||||||
static unsigned int nr_minors;
|
static unsigned int nr_minors;
|
||||||
|
@ -1759,6 +1763,12 @@ abort_transaction:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void free_info(struct blkfront_info *info)
|
||||||
|
{
|
||||||
|
list_del(&info->info_list);
|
||||||
|
kfree(info);
|
||||||
|
}
|
||||||
|
|
||||||
/* Common code used when first setting up, and when resuming. */
|
/* Common code used when first setting up, and when resuming. */
|
||||||
static int talk_to_blkback(struct xenbus_device *dev,
|
static int talk_to_blkback(struct xenbus_device *dev,
|
||||||
struct blkfront_info *info)
|
struct blkfront_info *info)
|
||||||
|
@ -1880,7 +1890,10 @@ again:
|
||||||
destroy_blkring:
|
destroy_blkring:
|
||||||
blkif_free(info, 0);
|
blkif_free(info, 0);
|
||||||
|
|
||||||
kfree(info);
|
mutex_lock(&blkfront_mutex);
|
||||||
|
free_info(info);
|
||||||
|
mutex_unlock(&blkfront_mutex);
|
||||||
|
|
||||||
dev_set_drvdata(&dev->dev, NULL);
|
dev_set_drvdata(&dev->dev, NULL);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
|
@ -1991,6 +2004,10 @@ static int blkfront_probe(struct xenbus_device *dev,
|
||||||
info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
|
info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
|
||||||
dev_set_drvdata(&dev->dev, info);
|
dev_set_drvdata(&dev->dev, info);
|
||||||
|
|
||||||
|
mutex_lock(&blkfront_mutex);
|
||||||
|
list_add(&info->info_list, &info_list);
|
||||||
|
mutex_unlock(&blkfront_mutex);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2301,6 +2318,12 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
|
||||||
if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST)
|
if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST)
|
||||||
indirect_segments = 0;
|
indirect_segments = 0;
|
||||||
info->max_indirect_segments = indirect_segments;
|
info->max_indirect_segments = indirect_segments;
|
||||||
|
|
||||||
|
if (info->feature_persistent) {
|
||||||
|
mutex_lock(&blkfront_mutex);
|
||||||
|
schedule_delayed_work(&blkfront_work, HZ * 10);
|
||||||
|
mutex_unlock(&blkfront_mutex);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2482,7 +2505,9 @@ static int blkfront_remove(struct xenbus_device *xbdev)
|
||||||
mutex_unlock(&info->mutex);
|
mutex_unlock(&info->mutex);
|
||||||
|
|
||||||
if (!bdev) {
|
if (!bdev) {
|
||||||
kfree(info);
|
mutex_lock(&blkfront_mutex);
|
||||||
|
free_info(info);
|
||||||
|
mutex_unlock(&blkfront_mutex);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2502,7 +2527,9 @@ static int blkfront_remove(struct xenbus_device *xbdev)
|
||||||
if (info && !bdev->bd_openers) {
|
if (info && !bdev->bd_openers) {
|
||||||
xlvbd_release_gendisk(info);
|
xlvbd_release_gendisk(info);
|
||||||
disk->private_data = NULL;
|
disk->private_data = NULL;
|
||||||
kfree(info);
|
mutex_lock(&blkfront_mutex);
|
||||||
|
free_info(info);
|
||||||
|
mutex_unlock(&blkfront_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&bdev->bd_mutex);
|
mutex_unlock(&bdev->bd_mutex);
|
||||||
|
@ -2585,7 +2612,7 @@ static void blkif_release(struct gendisk *disk, fmode_t mode)
|
||||||
dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
|
dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
|
||||||
xlvbd_release_gendisk(info);
|
xlvbd_release_gendisk(info);
|
||||||
disk->private_data = NULL;
|
disk->private_data = NULL;
|
||||||
kfree(info);
|
free_info(info);
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
@ -2618,6 +2645,61 @@ static struct xenbus_driver blkfront_driver = {
|
||||||
.is_ready = blkfront_is_ready,
|
.is_ready = blkfront_is_ready,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void purge_persistent_grants(struct blkfront_info *info)
|
||||||
|
{
|
||||||
|
unsigned int i;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
for (i = 0; i < info->nr_rings; i++) {
|
||||||
|
struct blkfront_ring_info *rinfo = &info->rinfo[i];
|
||||||
|
struct grant *gnt_list_entry, *tmp;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&rinfo->ring_lock, flags);
|
||||||
|
|
||||||
|
if (rinfo->persistent_gnts_c == 0) {
|
||||||
|
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants,
|
||||||
|
node) {
|
||||||
|
if (gnt_list_entry->gref == GRANT_INVALID_REF ||
|
||||||
|
gnttab_query_foreign_access(gnt_list_entry->gref))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
list_del(&gnt_list_entry->node);
|
||||||
|
gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL);
|
||||||
|
rinfo->persistent_gnts_c--;
|
||||||
|
__free_page(gnt_list_entry->page);
|
||||||
|
kfree(gnt_list_entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void blkfront_delay_work(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct blkfront_info *info;
|
||||||
|
bool need_schedule_work = false;
|
||||||
|
|
||||||
|
mutex_lock(&blkfront_mutex);
|
||||||
|
|
||||||
|
list_for_each_entry(info, &info_list, info_list) {
|
||||||
|
if (info->feature_persistent) {
|
||||||
|
need_schedule_work = true;
|
||||||
|
mutex_lock(&info->mutex);
|
||||||
|
purge_persistent_grants(info);
|
||||||
|
mutex_unlock(&info->mutex);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (need_schedule_work)
|
||||||
|
schedule_delayed_work(&blkfront_work, HZ * 10);
|
||||||
|
|
||||||
|
mutex_unlock(&blkfront_mutex);
|
||||||
|
}
|
||||||
|
|
||||||
static int __init xlblk_init(void)
|
static int __init xlblk_init(void)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -2626,6 +2708,15 @@ static int __init xlblk_init(void)
|
||||||
if (!xen_domain())
|
if (!xen_domain())
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
|
if (!xen_has_pv_disk_devices())
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
|
||||||
|
pr_warn("xen_blk: can't get major %d with name %s\n",
|
||||||
|
XENVBD_MAJOR, DEV_NAME);
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST)
|
if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST)
|
||||||
xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
|
xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
|
||||||
|
|
||||||
|
@ -2641,14 +2732,7 @@ static int __init xlblk_init(void)
|
||||||
xen_blkif_max_queues = nr_cpus;
|
xen_blkif_max_queues = nr_cpus;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!xen_has_pv_disk_devices())
|
INIT_DELAYED_WORK(&blkfront_work, blkfront_delay_work);
|
||||||
return -ENODEV;
|
|
||||||
|
|
||||||
if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
|
|
||||||
printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n",
|
|
||||||
XENVBD_MAJOR, DEV_NAME);
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = xenbus_register_frontend(&blkfront_driver);
|
ret = xenbus_register_frontend(&blkfront_driver);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
@ -2663,6 +2747,8 @@ module_init(xlblk_init);
|
||||||
|
|
||||||
static void __exit xlblk_exit(void)
|
static void __exit xlblk_exit(void)
|
||||||
{
|
{
|
||||||
|
cancel_delayed_work_sync(&blkfront_work);
|
||||||
|
|
||||||
xenbus_unregister_driver(&blkfront_driver);
|
xenbus_unregister_driver(&blkfront_driver);
|
||||||
unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
|
unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
|
||||||
kfree(minors);
|
kfree(minors);
|
||||||
|
|
|
@ -2546,7 +2546,7 @@ static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi,
|
||||||
if (!CDROM_CAN(CDC_SELECT_DISC) ||
|
if (!CDROM_CAN(CDC_SELECT_DISC) ||
|
||||||
(arg == CDSL_CURRENT || arg == CDSL_NONE))
|
(arg == CDSL_CURRENT || arg == CDSL_NONE))
|
||||||
return cdi->ops->drive_status(cdi, CDSL_CURRENT);
|
return cdi->ops->drive_status(cdi, CDSL_CURRENT);
|
||||||
if (((int)arg >= cdi->capacity))
|
if (arg >= cdi->capacity)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
return cdrom_slot_status(cdi, arg);
|
return cdrom_slot_status(cdi, arg);
|
||||||
}
|
}
|
||||||
|
|
|
@ -316,6 +316,14 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
|
||||||
old_value = *dbbuf_db;
|
old_value = *dbbuf_db;
|
||||||
*dbbuf_db = value;
|
*dbbuf_db = value;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Ensure that the doorbell is updated before reading the event
|
||||||
|
* index from memory. The controller needs to provide similar
|
||||||
|
* ordering to ensure the envent index is updated before reading
|
||||||
|
* the doorbell.
|
||||||
|
*/
|
||||||
|
mb();
|
||||||
|
|
||||||
if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value))
|
if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value))
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1210,7 +1210,7 @@ static int __init nvmet_init(void)
|
||||||
|
|
||||||
error = nvmet_init_discovery();
|
error = nvmet_init_discovery();
|
||||||
if (error)
|
if (error)
|
||||||
goto out;
|
goto out_free_work_queue;
|
||||||
|
|
||||||
error = nvmet_init_configfs();
|
error = nvmet_init_configfs();
|
||||||
if (error)
|
if (error)
|
||||||
|
@ -1219,6 +1219,8 @@ static int __init nvmet_init(void)
|
||||||
|
|
||||||
out_exit_discovery:
|
out_exit_discovery:
|
||||||
nvmet_exit_discovery();
|
nvmet_exit_discovery();
|
||||||
|
out_free_work_queue:
|
||||||
|
destroy_workqueue(buffered_io_wq);
|
||||||
out:
|
out:
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
|
@ -311,7 +311,7 @@ fcloop_tgt_lsrqst_done_work(struct work_struct *work)
|
||||||
struct fcloop_tport *tport = tls_req->tport;
|
struct fcloop_tport *tport = tls_req->tport;
|
||||||
struct nvmefc_ls_req *lsreq = tls_req->lsreq;
|
struct nvmefc_ls_req *lsreq = tls_req->lsreq;
|
||||||
|
|
||||||
if (tport->remoteport)
|
if (!tport || tport->remoteport)
|
||||||
lsreq->done(lsreq, tls_req->status);
|
lsreq->done(lsreq, tls_req->status);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -329,6 +329,7 @@ fcloop_ls_req(struct nvme_fc_local_port *localport,
|
||||||
|
|
||||||
if (!rport->targetport) {
|
if (!rport->targetport) {
|
||||||
tls_req->status = -ECONNREFUSED;
|
tls_req->status = -ECONNREFUSED;
|
||||||
|
tls_req->tport = NULL;
|
||||||
schedule_work(&tls_req->work);
|
schedule_work(&tls_req->work);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue