nvme-pci: remove nvme_queue from nvme_iod
We can get the nvme_queue from the req just as easily, so remove the duplicate path to the same structure to save some space. Signed-off-by: Keith Busch <kbusch@kernel.org> Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
parent
4cde03d82e
commit
a53232cb3a
|
@ -226,7 +226,6 @@ struct nvme_queue {
|
|||
struct nvme_iod {
|
||||
struct nvme_request req;
|
||||
struct nvme_command cmd;
|
||||
struct nvme_queue *nvmeq;
|
||||
bool use_sgl;
|
||||
int aborted;
|
||||
int npages; /* In the PRP list. 0 means small pool in use */
|
||||
|
@ -430,11 +429,6 @@ static int nvme_pci_init_request(struct blk_mq_tag_set *set,
|
|||
{
|
||||
struct nvme_dev *dev = set->driver_data;
|
||||
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
||||
int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0;
|
||||
struct nvme_queue *nvmeq = &dev->queues[queue_idx];
|
||||
|
||||
BUG_ON(!nvmeq);
|
||||
iod->nvmeq = nvmeq;
|
||||
|
||||
nvme_req(req)->ctrl = &dev->ctrl;
|
||||
nvme_req(req)->cmd = &iod->cmd;
|
||||
|
@ -526,7 +520,7 @@ static void **nvme_pci_iod_list(struct request *req)
|
|||
|
||||
static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
|
||||
{
|
||||
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
||||
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
|
||||
int nseg = blk_rq_nr_phys_segments(req);
|
||||
unsigned int avg_seg_size;
|
||||
|
||||
|
@ -534,7 +528,7 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
|
|||
|
||||
if (!nvme_ctrl_sgl_supported(&dev->ctrl))
|
||||
return false;
|
||||
if (!iod->nvmeq->qid)
|
||||
if (!nvmeq->qid)
|
||||
return false;
|
||||
if (!sgl_threshold || avg_seg_size < sgl_threshold)
|
||||
return false;
|
||||
|
@ -831,6 +825,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
|
|||
int rc;
|
||||
|
||||
if (blk_rq_nr_phys_segments(req) == 1) {
|
||||
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
|
||||
struct bio_vec bv = req_bvec(req);
|
||||
|
||||
if (!is_pci_p2pdma_page(bv.bv_page)) {
|
||||
|
@ -838,7 +833,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
|
|||
return nvme_setup_prp_simple(dev, req,
|
||||
&cmnd->rw, &bv);
|
||||
|
||||
if (iod->nvmeq->qid && sgl_threshold &&
|
||||
if (nvmeq->qid && sgl_threshold &&
|
||||
nvme_ctrl_sgl_supported(&dev->ctrl))
|
||||
return nvme_setup_sgl_simple(dev, req,
|
||||
&cmnd->rw, &bv);
|
||||
|
@ -1017,12 +1012,16 @@ static void nvme_queue_rqs(struct request **rqlist)
|
|||
|
||||
static __always_inline void nvme_pci_unmap_rq(struct request *req)
|
||||
{
|
||||
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
||||
struct nvme_dev *dev = iod->nvmeq->dev;
|
||||
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
|
||||
struct nvme_dev *dev = nvmeq->dev;
|
||||
|
||||
if (blk_integrity_rq(req)) {
|
||||
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
||||
|
||||
if (blk_integrity_rq(req))
|
||||
dma_unmap_page(dev->dev, iod->meta_dma,
|
||||
rq_integrity_vec(req)->bv_len, rq_data_dir(req));
|
||||
}
|
||||
|
||||
if (blk_rq_nr_phys_segments(req))
|
||||
nvme_unmap_data(dev, req);
|
||||
}
|
||||
|
@ -1270,8 +1269,7 @@ static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
|
|||
|
||||
static void abort_endio(struct request *req, blk_status_t error)
|
||||
{
|
||||
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
||||
struct nvme_queue *nvmeq = iod->nvmeq;
|
||||
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
|
||||
|
||||
dev_warn(nvmeq->dev->ctrl.device,
|
||||
"Abort status: 0x%x", nvme_req(req)->status);
|
||||
|
@ -1333,7 +1331,7 @@ static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
|
|||
static enum blk_eh_timer_return nvme_timeout(struct request *req)
|
||||
{
|
||||
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
||||
struct nvme_queue *nvmeq = iod->nvmeq;
|
||||
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
|
||||
struct nvme_dev *dev = nvmeq->dev;
|
||||
struct request *abort_req;
|
||||
struct nvme_command cmd = { };
|
||||
|
|
Loading…
Reference in New Issue