NVMe: Unify SQ entry writing and doorbell ringing

This patch changes sq_cmd writers to instead create their command on
the stack. __nvme_submit_cmd copies the sq entry to the queue and writes
the doorbell.

Signed-off-by: Jon Derrick <jonathan.derrick@intel.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
Jon Derrick 2015-07-20 10:14:08 -06:00 committed by Jens Axboe
parent 0034af0365
commit 498c43949c
1 changed files with 35 additions and 45 deletions

View File

@ -730,18 +730,16 @@ static int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod,
static void nvme_submit_priv(struct nvme_queue *nvmeq, struct request *req, static void nvme_submit_priv(struct nvme_queue *nvmeq, struct request *req,
struct nvme_iod *iod) struct nvme_iod *iod)
{ {
struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; struct nvme_command cmnd;
memcpy(cmnd, req->cmd, sizeof(struct nvme_command)); memcpy(&cmnd, req->cmd, sizeof(cmnd));
cmnd->rw.command_id = req->tag; cmnd.rw.command_id = req->tag;
if (req->nr_phys_segments) { if (req->nr_phys_segments) {
cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); cmnd.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
cmnd->rw.prp2 = cpu_to_le64(iod->first_dma); cmnd.rw.prp2 = cpu_to_le64(iod->first_dma);
} }
if (++nvmeq->sq_tail == nvmeq->q_depth) __nvme_submit_cmd(nvmeq, &cmnd);
nvmeq->sq_tail = 0;
writel(nvmeq->sq_tail, nvmeq->q_db);
} }
/* /*
@ -754,45 +752,41 @@ static void nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
{ {
struct nvme_dsm_range *range = struct nvme_dsm_range *range =
(struct nvme_dsm_range *)iod_list(iod)[0]; (struct nvme_dsm_range *)iod_list(iod)[0];
struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; struct nvme_command cmnd;
range->cattr = cpu_to_le32(0); range->cattr = cpu_to_le32(0);
range->nlb = cpu_to_le32(blk_rq_bytes(req) >> ns->lba_shift); range->nlb = cpu_to_le32(blk_rq_bytes(req) >> ns->lba_shift);
range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
memset(cmnd, 0, sizeof(*cmnd)); memset(&cmnd, 0, sizeof(cmnd));
cmnd->dsm.opcode = nvme_cmd_dsm; cmnd.dsm.opcode = nvme_cmd_dsm;
cmnd->dsm.command_id = req->tag; cmnd.dsm.command_id = req->tag;
cmnd->dsm.nsid = cpu_to_le32(ns->ns_id); cmnd.dsm.nsid = cpu_to_le32(ns->ns_id);
cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma); cmnd.dsm.prp1 = cpu_to_le64(iod->first_dma);
cmnd->dsm.nr = 0; cmnd.dsm.nr = 0;
cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); cmnd.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
if (++nvmeq->sq_tail == nvmeq->q_depth) __nvme_submit_cmd(nvmeq, &cmnd);
nvmeq->sq_tail = 0;
writel(nvmeq->sq_tail, nvmeq->q_db);
} }
static void nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns, static void nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
int cmdid) int cmdid)
{ {
struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; struct nvme_command cmnd;
memset(cmnd, 0, sizeof(*cmnd)); memset(&cmnd, 0, sizeof(cmnd));
cmnd->common.opcode = nvme_cmd_flush; cmnd.common.opcode = nvme_cmd_flush;
cmnd->common.command_id = cmdid; cmnd.common.command_id = cmdid;
cmnd->common.nsid = cpu_to_le32(ns->ns_id); cmnd.common.nsid = cpu_to_le32(ns->ns_id);
if (++nvmeq->sq_tail == nvmeq->q_depth) __nvme_submit_cmd(nvmeq, &cmnd);
nvmeq->sq_tail = 0;
writel(nvmeq->sq_tail, nvmeq->q_db);
} }
static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod, static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod,
struct nvme_ns *ns) struct nvme_ns *ns)
{ {
struct request *req = iod_get_private(iod); struct request *req = iod_get_private(iod);
struct nvme_command *cmnd; struct nvme_command cmnd;
u16 control = 0; u16 control = 0;
u32 dsmgmt = 0; u32 dsmgmt = 0;
@ -804,19 +798,17 @@ static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod,
if (req->cmd_flags & REQ_RAHEAD) if (req->cmd_flags & REQ_RAHEAD)
dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; memset(&cmnd, 0, sizeof(cmnd));
memset(cmnd, 0, sizeof(*cmnd)); cmnd.rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
cmnd.rw.command_id = req->tag;
cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read); cmnd.rw.nsid = cpu_to_le32(ns->ns_id);
cmnd->rw.command_id = req->tag; cmnd.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
cmnd->rw.nsid = cpu_to_le32(ns->ns_id); cmnd.rw.prp2 = cpu_to_le64(iod->first_dma);
cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); cmnd.rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
cmnd->rw.prp2 = cpu_to_le64(iod->first_dma); cmnd.rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
if (blk_integrity_rq(req)) { if (blk_integrity_rq(req)) {
cmnd->rw.metadata = cpu_to_le64(sg_dma_address(iod->meta_sg)); cmnd.rw.metadata = cpu_to_le64(sg_dma_address(iod->meta_sg));
switch (ns->pi_type) { switch (ns->pi_type) {
case NVME_NS_DPS_PI_TYPE3: case NVME_NS_DPS_PI_TYPE3:
control |= NVME_RW_PRINFO_PRCHK_GUARD; control |= NVME_RW_PRINFO_PRCHK_GUARD;
@ -825,19 +817,17 @@ static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod,
case NVME_NS_DPS_PI_TYPE2: case NVME_NS_DPS_PI_TYPE2:
control |= NVME_RW_PRINFO_PRCHK_GUARD | control |= NVME_RW_PRINFO_PRCHK_GUARD |
NVME_RW_PRINFO_PRCHK_REF; NVME_RW_PRINFO_PRCHK_REF;
cmnd->rw.reftag = cpu_to_le32( cmnd.rw.reftag = cpu_to_le32(
nvme_block_nr(ns, blk_rq_pos(req))); nvme_block_nr(ns, blk_rq_pos(req)));
break; break;
} }
} else if (ns->ms) } else if (ns->ms)
control |= NVME_RW_PRINFO_PRACT; control |= NVME_RW_PRINFO_PRACT;
cmnd->rw.control = cpu_to_le16(control); cmnd.rw.control = cpu_to_le16(control);
cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); cmnd.rw.dsmgmt = cpu_to_le32(dsmgmt);
if (++nvmeq->sq_tail == nvmeq->q_depth) __nvme_submit_cmd(nvmeq, &cmnd);
nvmeq->sq_tail = 0;
writel(nvmeq->sq_tail, nvmeq->q_db);
return 0; return 0;
} }