Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "A small set of fixes that should go into this release. This contains: - An NVMe pull request from Christoph, with a few select fixes. One of them fix a polling regression in this series, in which it's trivial to cause the kernel to disable most of the hardware queue interrupts. - Fixup for a blk-mq queue usage imbalance on request allocation, from Keith. - A xen block pull request from Konrad, fixing two issues with xen/xen-blkfront" * 'for-linus' of git://git.kernel.dk/linux-block: blk-mq-pci: add a fallback when pci_irq_get_affinity returns NULL nvme-pci: set cqe_seen on polled completions nvme-fabrics: fix reporting of unrecognized options nvmet-fc: eliminate incorrect static markers on local variables nvmet-fc: correct use after free on list teardown nvmet: don't overwrite identify sn/fr with 0-bytes xen-blkfront: use a right index when checking requests xen: fix bio vec merging blk-mq: Fix queue usage on failed request allocation
This commit is contained in:
commit
70bfc741f8
|
@ -36,12 +36,18 @@ int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev)
|
||||||
for (queue = 0; queue < set->nr_hw_queues; queue++) {
|
for (queue = 0; queue < set->nr_hw_queues; queue++) {
|
||||||
mask = pci_irq_get_affinity(pdev, queue);
|
mask = pci_irq_get_affinity(pdev, queue);
|
||||||
if (!mask)
|
if (!mask)
|
||||||
return -EINVAL;
|
goto fallback;
|
||||||
|
|
||||||
for_each_cpu(cpu, mask)
|
for_each_cpu(cpu, mask)
|
||||||
set->mq_map[cpu] = queue;
|
set->mq_map[cpu] = queue;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
fallback:
|
||||||
|
WARN_ON_ONCE(set->nr_hw_queues > 1);
|
||||||
|
for_each_possible_cpu(cpu)
|
||||||
|
set->mq_map[cpu] = 0;
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);
|
EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);
|
||||||
|
|
|
@ -360,12 +360,12 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
|
|
||||||
rq = blk_mq_get_request(q, NULL, op, &alloc_data);
|
rq = blk_mq_get_request(q, NULL, op, &alloc_data);
|
||||||
|
blk_queue_exit(q);
|
||||||
|
|
||||||
if (!rq)
|
if (!rq)
|
||||||
return ERR_PTR(-EWOULDBLOCK);
|
return ERR_PTR(-EWOULDBLOCK);
|
||||||
|
|
||||||
blk_mq_put_ctx(alloc_data.ctx);
|
blk_mq_put_ctx(alloc_data.ctx);
|
||||||
blk_queue_exit(q);
|
|
||||||
|
|
||||||
rq->__data_len = 0;
|
rq->__data_len = 0;
|
||||||
rq->__sector = (sector_t) -1;
|
rq->__sector = (sector_t) -1;
|
||||||
|
@ -411,12 +411,11 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
|
||||||
alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
|
alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
|
||||||
|
|
||||||
rq = blk_mq_get_request(q, NULL, op, &alloc_data);
|
rq = blk_mq_get_request(q, NULL, op, &alloc_data);
|
||||||
|
blk_queue_exit(q);
|
||||||
|
|
||||||
if (!rq)
|
if (!rq)
|
||||||
return ERR_PTR(-EWOULDBLOCK);
|
return ERR_PTR(-EWOULDBLOCK);
|
||||||
|
|
||||||
blk_queue_exit(q);
|
|
||||||
|
|
||||||
return rq;
|
return rq;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
|
EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
|
||||||
|
|
|
@ -2075,9 +2075,9 @@ static int blkfront_resume(struct xenbus_device *dev)
|
||||||
/*
|
/*
|
||||||
* Get the bios in the request so we can re-queue them.
|
* Get the bios in the request so we can re-queue them.
|
||||||
*/
|
*/
|
||||||
if (req_op(shadow[i].request) == REQ_OP_FLUSH ||
|
if (req_op(shadow[j].request) == REQ_OP_FLUSH ||
|
||||||
req_op(shadow[i].request) == REQ_OP_DISCARD ||
|
req_op(shadow[j].request) == REQ_OP_DISCARD ||
|
||||||
req_op(shadow[i].request) == REQ_OP_SECURE_ERASE ||
|
req_op(shadow[j].request) == REQ_OP_SECURE_ERASE ||
|
||||||
shadow[j].request->cmd_flags & REQ_FUA) {
|
shadow[j].request->cmd_flags & REQ_FUA) {
|
||||||
/*
|
/*
|
||||||
* Flush operations don't contain bios, so
|
* Flush operations don't contain bios, so
|
||||||
|
|
|
@ -794,7 +794,8 @@ static int nvmf_check_allowed_opts(struct nvmf_ctrl_options *opts,
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
|
for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
|
||||||
if (opt_tokens[i].token & ~allowed_opts) {
|
if ((opt_tokens[i].token & opts->mask) &&
|
||||||
|
(opt_tokens[i].token & ~allowed_opts)) {
|
||||||
pr_warn("invalid parameter '%s'\n",
|
pr_warn("invalid parameter '%s'\n",
|
||||||
opt_tokens[i].pattern);
|
opt_tokens[i].pattern);
|
||||||
}
|
}
|
||||||
|
|
|
@ -801,6 +801,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
nvmeq->cqe_seen = 1;
|
||||||
req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id);
|
req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id);
|
||||||
nvme_end_request(req, cqe->status, cqe->result);
|
nvme_end_request(req, cqe->status, cqe->result);
|
||||||
}
|
}
|
||||||
|
@ -830,10 +831,8 @@ static void nvme_process_cq(struct nvme_queue *nvmeq)
|
||||||
consumed++;
|
consumed++;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (consumed) {
|
if (consumed)
|
||||||
nvme_ring_cq_doorbell(nvmeq);
|
nvme_ring_cq_doorbell(nvmeq);
|
||||||
nvmeq->cqe_seen = 1;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static irqreturn_t nvme_irq(int irq, void *data)
|
static irqreturn_t nvme_irq(int irq, void *data)
|
||||||
|
|
|
@ -199,12 +199,6 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
|
||||||
copy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1);
|
copy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1);
|
||||||
copy_and_pad(id->fr, sizeof(id->fr), UTS_RELEASE, strlen(UTS_RELEASE));
|
copy_and_pad(id->fr, sizeof(id->fr), UTS_RELEASE, strlen(UTS_RELEASE));
|
||||||
|
|
||||||
memset(id->mn, ' ', sizeof(id->mn));
|
|
||||||
strncpy((char *)id->mn, "Linux", sizeof(id->mn));
|
|
||||||
|
|
||||||
memset(id->fr, ' ', sizeof(id->fr));
|
|
||||||
strncpy((char *)id->fr, UTS_RELEASE, sizeof(id->fr));
|
|
||||||
|
|
||||||
id->rab = 6;
|
id->rab = 6;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -394,7 +394,7 @@ nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
|
||||||
static struct nvmet_fc_ls_iod *
|
static struct nvmet_fc_ls_iod *
|
||||||
nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
|
nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
|
||||||
{
|
{
|
||||||
static struct nvmet_fc_ls_iod *iod;
|
struct nvmet_fc_ls_iod *iod;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&tgtport->lock, flags);
|
spin_lock_irqsave(&tgtport->lock, flags);
|
||||||
|
@ -471,7 +471,7 @@ nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
|
||||||
static struct nvmet_fc_fcp_iod *
|
static struct nvmet_fc_fcp_iod *
|
||||||
nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
|
nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
|
||||||
{
|
{
|
||||||
static struct nvmet_fc_fcp_iod *fod;
|
struct nvmet_fc_fcp_iod *fod;
|
||||||
|
|
||||||
lockdep_assert_held(&queue->qlock);
|
lockdep_assert_held(&queue->qlock);
|
||||||
|
|
||||||
|
@ -704,7 +704,7 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
|
||||||
{
|
{
|
||||||
struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
|
struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
|
||||||
struct nvmet_fc_fcp_iod *fod = queue->fod;
|
struct nvmet_fc_fcp_iod *fod = queue->fod;
|
||||||
struct nvmet_fc_defer_fcp_req *deferfcp;
|
struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int i, writedataactive;
|
int i, writedataactive;
|
||||||
bool disconnect;
|
bool disconnect;
|
||||||
|
@ -735,7 +735,8 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Cleanup defer'ed IOs in queue */
|
/* Cleanup defer'ed IOs in queue */
|
||||||
list_for_each_entry(deferfcp, &queue->avail_defer_list, req_list) {
|
list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
|
||||||
|
req_list) {
|
||||||
list_del(&deferfcp->req_list);
|
list_del(&deferfcp->req_list);
|
||||||
kfree(deferfcp);
|
kfree(deferfcp);
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,8 +10,7 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
|
||||||
unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page));
|
unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page));
|
||||||
unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page));
|
unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page));
|
||||||
|
|
||||||
return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&
|
return bfn1 + PFN_DOWN(vec1->bv_offset + vec1->bv_len) == bfn2;
|
||||||
((bfn1 == bfn2) || ((bfn1+1) == bfn2));
|
|
||||||
#else
|
#else
|
||||||
/*
|
/*
|
||||||
* XXX: Add support for merging bio_vec when using different page
|
* XXX: Add support for merging bio_vec when using different page
|
||||||
|
|
Loading…
Reference in New Issue