nvme: use a single NVME_AQ_DEPTH and relax it to 32
No need to differentiate fabrics from pci/loop, also lower it to 32 as we don't really need 256 inflight admin commands. Signed-off-by: Sagi Grimberg <sagi@grimberg.me> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Max Gurtovoy <maxg@mellanox.com> Signed-off-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
6bfe04255d
commit
7aa1f42752
|
@ -392,13 +392,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
|
||||||
cmd.connect.opcode = nvme_fabrics_command;
|
cmd.connect.opcode = nvme_fabrics_command;
|
||||||
cmd.connect.fctype = nvme_fabrics_type_connect;
|
cmd.connect.fctype = nvme_fabrics_type_connect;
|
||||||
cmd.connect.qid = 0;
|
cmd.connect.qid = 0;
|
||||||
|
cmd.connect.sqsize = cpu_to_le16(NVME_AQ_DEPTH - 1);
|
||||||
/*
|
|
||||||
* fabrics spec sets a minimum of depth 32 for admin queue,
|
|
||||||
* so set the queue with this depth always until
|
|
||||||
* justification otherwise.
|
|
||||||
*/
|
|
||||||
cmd.connect.sqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set keep-alive timeout in seconds granularity (ms * 1000)
|
* Set keep-alive timeout in seconds granularity (ms * 1000)
|
||||||
|
|
|
@ -36,7 +36,7 @@
|
||||||
*/
|
*/
|
||||||
#define NVME_FC_NR_AEN_COMMANDS 1
|
#define NVME_FC_NR_AEN_COMMANDS 1
|
||||||
#define NVME_FC_AQ_BLKMQ_DEPTH \
|
#define NVME_FC_AQ_BLKMQ_DEPTH \
|
||||||
(NVMF_AQ_DEPTH - NVME_FC_NR_AEN_COMMANDS)
|
(NVME_AQ_DEPTH - NVME_FC_NR_AEN_COMMANDS)
|
||||||
#define AEN_CMDID_BASE (NVME_FC_AQ_BLKMQ_DEPTH + 1)
|
#define AEN_CMDID_BASE (NVME_FC_AQ_BLKMQ_DEPTH + 1)
|
||||||
|
|
||||||
enum nvme_fc_queue_flags {
|
enum nvme_fc_queue_flags {
|
||||||
|
|
|
@ -36,7 +36,6 @@
|
||||||
#include "nvme.h"
|
#include "nvme.h"
|
||||||
|
|
||||||
#define NVME_Q_DEPTH 1024
|
#define NVME_Q_DEPTH 1024
|
||||||
#define NVME_AQ_DEPTH 256
|
|
||||||
#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
|
#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
|
||||||
#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
|
#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
|
||||||
|
|
||||||
|
|
|
@ -48,7 +48,7 @@
|
||||||
*/
|
*/
|
||||||
#define NVME_RDMA_NR_AEN_COMMANDS 1
|
#define NVME_RDMA_NR_AEN_COMMANDS 1
|
||||||
#define NVME_RDMA_AQ_BLKMQ_DEPTH \
|
#define NVME_RDMA_AQ_BLKMQ_DEPTH \
|
||||||
(NVMF_AQ_DEPTH - NVME_RDMA_NR_AEN_COMMANDS)
|
(NVME_AQ_DEPTH - NVME_RDMA_NR_AEN_COMMANDS)
|
||||||
|
|
||||||
struct nvme_rdma_device {
|
struct nvme_rdma_device {
|
||||||
struct ib_device *dev;
|
struct ib_device *dev;
|
||||||
|
@ -719,7 +719,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto requeue;
|
goto requeue;
|
||||||
|
|
||||||
ret = nvme_rdma_init_queue(ctrl, 0, NVMF_AQ_DEPTH);
|
ret = nvme_rdma_init_queue(ctrl, 0, NVME_AQ_DEPTH);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto requeue;
|
goto requeue;
|
||||||
|
|
||||||
|
@ -1291,8 +1291,8 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
|
||||||
* specified by the Fabrics standard.
|
* specified by the Fabrics standard.
|
||||||
*/
|
*/
|
||||||
if (priv.qid == 0) {
|
if (priv.qid == 0) {
|
||||||
priv.hrqsize = cpu_to_le16(NVMF_AQ_DEPTH);
|
priv.hrqsize = cpu_to_le16(NVME_AQ_DEPTH);
|
||||||
priv.hsqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1);
|
priv.hsqsize = cpu_to_le16(NVME_AQ_DEPTH - 1);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* current interpretation of the fabrics spec
|
* current interpretation of the fabrics spec
|
||||||
|
@ -1530,7 +1530,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl)
|
||||||
{
|
{
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
error = nvme_rdma_init_queue(ctrl, 0, NVMF_AQ_DEPTH);
|
error = nvme_rdma_init_queue(ctrl, 0, NVME_AQ_DEPTH);
|
||||||
if (error)
|
if (error)
|
||||||
return error;
|
return error;
|
||||||
|
|
||||||
|
|
|
@ -53,7 +53,7 @@ static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr,
|
||||||
e->portid = port->disc_addr.portid;
|
e->portid = port->disc_addr.portid;
|
||||||
/* we support only dynamic controllers */
|
/* we support only dynamic controllers */
|
||||||
e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC);
|
e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC);
|
||||||
e->asqsz = cpu_to_le16(NVMF_AQ_DEPTH);
|
e->asqsz = cpu_to_le16(NVME_AQ_DEPTH);
|
||||||
e->subtype = type;
|
e->subtype = type;
|
||||||
memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE);
|
memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE);
|
||||||
memcpy(e->traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
|
memcpy(e->traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
|
||||||
|
|
|
@ -21,8 +21,6 @@
|
||||||
#include "../host/nvme.h"
|
#include "../host/nvme.h"
|
||||||
#include "../host/fabrics.h"
|
#include "../host/fabrics.h"
|
||||||
|
|
||||||
#define NVME_LOOP_AQ_DEPTH 256
|
|
||||||
|
|
||||||
#define NVME_LOOP_MAX_SEGMENTS 256
|
#define NVME_LOOP_MAX_SEGMENTS 256
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -31,7 +29,7 @@
|
||||||
*/
|
*/
|
||||||
#define NVME_LOOP_NR_AEN_COMMANDS 1
|
#define NVME_LOOP_NR_AEN_COMMANDS 1
|
||||||
#define NVME_LOOP_AQ_BLKMQ_DEPTH \
|
#define NVME_LOOP_AQ_BLKMQ_DEPTH \
|
||||||
(NVME_LOOP_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
|
(NVME_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
|
||||||
|
|
||||||
struct nvme_loop_iod {
|
struct nvme_loop_iod {
|
||||||
struct nvme_request nvme_req;
|
struct nvme_request nvme_req;
|
||||||
|
|
|
@ -1027,7 +1027,7 @@ nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn,
|
||||||
queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1;
|
queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1;
|
||||||
queue->send_queue_size = le16_to_cpu(req->hrqsize);
|
queue->send_queue_size = le16_to_cpu(req->hrqsize);
|
||||||
|
|
||||||
if (!queue->host_qid && queue->recv_queue_size > NVMF_AQ_DEPTH)
|
if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH)
|
||||||
return NVME_RDMA_CM_INVALID_HSQSIZE;
|
return NVME_RDMA_CM_INVALID_HSQSIZE;
|
||||||
|
|
||||||
/* XXX: Should we enforce some kind of max for IO queues? */
|
/* XXX: Should we enforce some kind of max for IO queues? */
|
||||||
|
|
|
@ -87,7 +87,7 @@ enum {
|
||||||
NVMF_RDMA_CMS_RDMA_CM = 1, /* Sockets based endpoint addressing */
|
NVMF_RDMA_CMS_RDMA_CM = 1, /* Sockets based endpoint addressing */
|
||||||
};
|
};
|
||||||
|
|
||||||
#define NVMF_AQ_DEPTH 32
|
#define NVME_AQ_DEPTH 32
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
NVME_REG_CAP = 0x0000, /* Controller Capabilities */
|
NVME_REG_CAP = 0x0000, /* Controller Capabilities */
|
||||||
|
|
Loading…
Reference in New Issue