qed: changes to ILT to support XRC

First ILT page for TSDM client is allocated for XRC-SRQ's.
For regular SRQ's skip first ILT page that is reserved for
XRC-SRQ's.

Signed-off-by: Michal Kalderon <mkalderon@marvell.com>
Signed-off-by: Yuval Bason <ybason@marvell.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Yuval Basson 2020-05-19 23:51:25 +03:00 committed by David S. Miller
parent d8bed686ab
commit b8204ad878
4 changed files with 64 additions and 14 deletions

View File

@ -110,6 +110,7 @@ struct src_ent {
ALIGNED_TYPE_SIZE(union conn_context, p_hwfn) ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
#define SRQ_CXT_SIZE (sizeof(struct rdma_srq_context)) #define SRQ_CXT_SIZE (sizeof(struct rdma_srq_context))
#define XRC_SRQ_CXT_SIZE (sizeof(struct rdma_xrc_srq_context))
#define TYPE0_TASK_CXT_SIZE(p_hwfn) \ #define TYPE0_TASK_CXT_SIZE(p_hwfn) \
ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn) ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn)
@ -293,18 +294,40 @@ static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn,
return NULL; return NULL;
} }
static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs) static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn,
u32 num_srqs, u32 num_xrc_srqs)
{ {
struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
p_mgr->srq_count = num_srqs; p_mgr->srq_count = num_srqs;
p_mgr->xrc_srq_count = num_xrc_srqs;
} }
u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn) u32 qed_cxt_get_ilt_page_size(struct qed_hwfn *p_hwfn,
enum ilt_clients ilt_client)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct qed_ilt_client_cfg *p_cli = &p_mngr->clients[ilt_client];
return ILT_PAGE_IN_BYTES(p_cli->p_size.val);
}
static u32 qed_cxt_xrc_srqs_per_page(struct qed_hwfn *p_hwfn)
{
u32 page_size;
page_size = qed_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM);
return page_size / XRC_SRQ_CXT_SIZE;
}
u32 qed_cxt_get_total_srq_count(struct qed_hwfn *p_hwfn)
{ {
struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
u32 total_srqs;
return p_mgr->srq_count; total_srqs = p_mgr->srq_count + p_mgr->xrc_srq_count;
return total_srqs;
} }
/* set the iids count per protocol */ /* set the iids count per protocol */
@ -692,7 +715,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
} }
/* TSDM (SRQ CONTEXT) */ /* TSDM (SRQ CONTEXT) */
total = qed_cxt_get_srq_count(p_hwfn); total = qed_cxt_get_total_srq_count(p_hwfn);
if (total) { if (total) {
p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]); p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]);
@ -1962,11 +1985,9 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
struct qed_rdma_pf_params *p_params, struct qed_rdma_pf_params *p_params,
u32 num_tasks) u32 num_tasks)
{ {
u32 num_cons, num_qps, num_srqs; u32 num_cons, num_qps;
enum protocol_type proto; enum protocol_type proto;
num_srqs = min_t(u32, QED_RDMA_MAX_SRQS, p_params->num_srqs);
if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) { if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) {
DP_NOTICE(p_hwfn, DP_NOTICE(p_hwfn,
"Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n"); "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n");
@ -1989,6 +2010,8 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
} }
if (num_cons && num_tasks) { if (num_cons && num_tasks) {
u32 num_srqs, num_xrc_srqs;
qed_cxt_set_proto_cid_count(p_hwfn, proto, num_cons, 0); qed_cxt_set_proto_cid_count(p_hwfn, proto, num_cons, 0);
/* Deliberatly passing ROCE for tasks id. This is because /* Deliberatly passing ROCE for tasks id. This is because
@ -1997,7 +2020,13 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ROCE, qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ROCE,
QED_CXT_ROCE_TID_SEG, 1, QED_CXT_ROCE_TID_SEG, 1,
num_tasks, false); num_tasks, false);
qed_cxt_set_srq_count(p_hwfn, num_srqs);
num_srqs = min_t(u32, QED_RDMA_MAX_SRQS, p_params->num_srqs);
/* XRC SRQs populate a single ILT page */
num_xrc_srqs = qed_cxt_xrc_srqs_per_page(p_hwfn);
qed_cxt_set_srq_count(p_hwfn, num_srqs, num_xrc_srqs);
} else { } else {
DP_INFO(p_hwfn->cdev, DP_INFO(p_hwfn->cdev,
"RDMA personality used without setting params!\n"); "RDMA personality used without setting params!\n");
@ -2163,10 +2192,17 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
p_blk = &p_cli->pf_blks[CDUC_BLK]; p_blk = &p_cli->pf_blks[CDUC_BLK];
break; break;
case QED_ELEM_SRQ: case QED_ELEM_SRQ:
/* The first ILT page is not used for regular SRQs. Skip it. */
iid += p_hwfn->p_cxt_mngr->xrc_srq_count;
p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM]; p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
elem_size = SRQ_CXT_SIZE; elem_size = SRQ_CXT_SIZE;
p_blk = &p_cli->pf_blks[SRQ_BLK]; p_blk = &p_cli->pf_blks[SRQ_BLK];
break; break;
case QED_ELEM_XRC_SRQ:
p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
elem_size = XRC_SRQ_CXT_SIZE;
p_blk = &p_cli->pf_blks[SRQ_BLK];
break;
case QED_ELEM_TASK: case QED_ELEM_TASK:
p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn); elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
@ -2386,8 +2422,12 @@ int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto)
return rc; return rc;
/* Free TSDM CXT */ /* Free TSDM CXT */
rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_SRQ, 0, rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_XRC_SRQ, 0,
qed_cxt_get_srq_count(p_hwfn)); p_hwfn->p_cxt_mngr->xrc_srq_count);
rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_SRQ,
p_hwfn->p_cxt_mngr->xrc_srq_count,
p_hwfn->p_cxt_mngr->srq_count);
return rc; return rc;
} }

View File

@ -82,7 +82,8 @@ int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
enum qed_cxt_elem_type { enum qed_cxt_elem_type {
QED_ELEM_CXT, QED_ELEM_CXT,
QED_ELEM_SRQ, QED_ELEM_SRQ,
QED_ELEM_TASK QED_ELEM_TASK,
QED_ELEM_XRC_SRQ,
}; };
u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn, u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
@ -235,7 +236,6 @@ u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
enum protocol_type type); enum protocol_type type);
u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn, u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
enum protocol_type type); enum protocol_type type);
u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn);
int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto); int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto);
#define QED_CTX_WORKING_MEM 0 #define QED_CTX_WORKING_MEM 0
@ -358,6 +358,7 @@ struct qed_cxt_mngr {
/* total number of SRQ's for this hwfn */ /* total number of SRQ's for this hwfn */
u32 srq_count; u32 srq_count;
u32 xrc_srq_count;
/* Maximal number of L2 steering filters */ /* Maximal number of L2 steering filters */
u32 arfs_count; u32 arfs_count;
@ -372,4 +373,9 @@ u16 qed_get_cdut_num_vf_init_pages(struct qed_hwfn *p_hwfn);
u16 qed_get_cdut_num_pf_work_pages(struct qed_hwfn *p_hwfn); u16 qed_get_cdut_num_pf_work_pages(struct qed_hwfn *p_hwfn);
u16 qed_get_cdut_num_vf_work_pages(struct qed_hwfn *p_hwfn); u16 qed_get_cdut_num_vf_work_pages(struct qed_hwfn *p_hwfn);
u32 qed_cxt_get_ilt_page_size(struct qed_hwfn *p_hwfn,
enum ilt_clients ilt_client);
u32 qed_cxt_get_total_srq_count(struct qed_hwfn *p_hwfn);
#endif #endif

View File

@ -2269,6 +2269,7 @@ int qed_resc_alloc(struct qed_dev *cdev)
/* EQ */ /* EQ */
n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain); n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain);
if (QED_IS_RDMA_PERSONALITY(p_hwfn)) { if (QED_IS_RDMA_PERSONALITY(p_hwfn)) {
u32 n_srq = qed_cxt_get_total_srq_count(p_hwfn);
enum protocol_type rdma_proto; enum protocol_type rdma_proto;
if (QED_IS_ROCE_PERSONALITY(p_hwfn)) if (QED_IS_ROCE_PERSONALITY(p_hwfn))
@ -2279,7 +2280,10 @@ int qed_resc_alloc(struct qed_dev *cdev)
num_cons = qed_cxt_get_proto_cid_count(p_hwfn, num_cons = qed_cxt_get_proto_cid_count(p_hwfn,
rdma_proto, rdma_proto,
NULL) * 2; NULL) * 2;
n_eqes += num_cons + 2 * MAX_NUM_VFS_BB; /* EQ should be able to get events from all SRQ's
* at the same time
*/
n_eqes += num_cons + 2 * MAX_NUM_VFS_BB + n_srq;
} else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
num_cons = num_cons =
qed_cxt_get_proto_cid_count(p_hwfn, qed_cxt_get_proto_cid_count(p_hwfn,

View File

@ -272,7 +272,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn)
} }
/* Allocate bitmap for srqs */ /* Allocate bitmap for srqs */
p_rdma_info->num_srqs = qed_cxt_get_srq_count(p_hwfn); p_rdma_info->num_srqs = p_hwfn->p_cxt_mngr->srq_count;
rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->srq_map, rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->srq_map,
p_rdma_info->num_srqs, "SRQ"); p_rdma_info->num_srqs, "SRQ");
if (rc) { if (rc) {