qed: Hold a single array for SBs

A PF today holds 2 different arrays - one holding information
about the HW configuration and one holding information about
the SBs that are used by the protocol drivers.
These arrays aren't really connected - e.g., protocol driver
initializing a given SB would not mark the same SB as occupied
in the HW shadow array.

Move into a single array [at least for PFs] - hold the mapping
of the driver-protocol SBs on the HW entry which they configure.

Signed-off-by: Yuval Mintz <Yuval.Mintz@cavium.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Mintz, Yuval 2017-06-01 15:29:09 +03:00 committed by David S. Miller
parent 09b6b14749
commit 50a207147f
8 changed files with 103 additions and 27 deletions

View File

@ -495,10 +495,6 @@ struct qed_hwfn {
bool b_rdma_enabled_in_prs;
u32 rdma_prs_search_reg;
/* Array of sb_info of all status blocks */
struct qed_sb_info *sbs_info[MAX_SB_PER_PF_MIMD];
u16 num_sbs;
struct qed_cxt_mngr *p_cxt_mngr;
/* Flag indicating whether interrupts are enabled or not*/

View File

@ -183,7 +183,10 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
p_data->q_params.queue_relative_offset = (u8)tmp;
for (i = 0; i < fcoe_pf_params->num_cqs; i++) {
tmp = cpu_to_le16(p_hwfn->sbs_info[i]->igu_sb_id);
u16 igu_sb_id;
igu_sb_id = qed_get_igu_sb_id(p_hwfn, i);
tmp = cpu_to_le16(igu_sb_id);
p_data->q_params.cq_cmdq_sb_num_arr[i] = tmp;
}

View File

@ -1452,7 +1452,7 @@ static u16 qed_get_pf_igu_sb_id(struct qed_hwfn *p_hwfn, u16 vector_id)
return QED_SB_INVALID_IDX;
}
static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
{
u16 igu_sb_id;
@ -1485,8 +1485,19 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn,
sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
if (sb_id != QED_SP_SB_ID) {
p_hwfn->sbs_info[sb_id] = sb_info;
p_hwfn->num_sbs++;
if (IS_PF(p_hwfn->cdev)) {
struct qed_igu_info *p_info;
struct qed_igu_block *p_block;
p_info = p_hwfn->hw_info.p_igu_info;
p_block = &p_info->entry[sb_info->igu_sb_id];
p_block->sb_info = sb_info;
p_block->status &= ~QED_IGU_STATUS_FREE;
p_info->usage.free_cnt--;
} else {
qed_vf_set_sb_info(p_hwfn, sb_id, sb_info);
}
}
sb_info->cdev = p_hwfn->cdev;
@ -1515,20 +1526,35 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn,
int qed_int_sb_release(struct qed_hwfn *p_hwfn,
struct qed_sb_info *sb_info, u16 sb_id)
{
if (sb_id == QED_SP_SB_ID) {
DP_ERR(p_hwfn, "Do Not free sp sb using this function");
return -EINVAL;
}
struct qed_igu_block *p_block;
struct qed_igu_info *p_info;
if (!sb_info)
return 0;
/* zero status block and ack counter */
sb_info->sb_ack = 0;
memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
if (p_hwfn->sbs_info[sb_id] != NULL) {
p_hwfn->sbs_info[sb_id] = NULL;
p_hwfn->num_sbs--;
if (IS_VF(p_hwfn->cdev)) {
qed_vf_set_sb_info(p_hwfn, sb_id, NULL);
return 0;
}
p_info = p_hwfn->hw_info.p_igu_info;
p_block = &p_info->entry[sb_info->igu_sb_id];
/* Vector 0 is reserved to Default SB */
if (!p_block->vector_number) {
DP_ERR(p_hwfn, "Do Not free sp sb using this function");
return -EINVAL;
}
/* Lose reference to client's SB info, and fix counters */
p_block->sb_info = NULL;
p_block->status |= QED_IGU_STATUS_FREE;
p_info->usage.free_cnt++;
return 0;
}

View File

@ -202,18 +202,20 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev);
#define QED_SB_INVALID_IDX 0xffff
struct qed_igu_block {
u8 status;
u8 status;
#define QED_IGU_STATUS_FREE 0x01
#define QED_IGU_STATUS_VALID 0x02
#define QED_IGU_STATUS_PF 0x04
#define QED_IGU_STATUS_DSB 0x08
u8 vector_number;
u8 function_id;
u8 is_pf;
u8 vector_number;
u8 function_id;
u8 is_pf;
/* Index inside IGU [meant for back reference] */
u16 igu_sb_id;
u16 igu_sb_id;
struct qed_sb_info *sb_info;
};
struct qed_igu_info {
@ -224,7 +226,16 @@ struct qed_igu_info {
};
/* TODO Names of function may change... */
/**
* @brief Translate the weakly-defined client sb-id into an IGU sb-id
*
* @param p_hwfn
* @param sb_id - user provided sb_id
*
* @return an index inside IGU CAM where the SB resides
*/
u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
/**
* @brief return a pointer to an unused valid SB
*

View File

@ -220,7 +220,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_queue->cmdq_sb_pi = p_params->gl_cmd_pi;
for (i = 0; i < p_params->num_queues; i++) {
val = p_hwfn->sbs_info[i]->igu_sb_id;
val = qed_get_igu_sb_id(p_hwfn, i);
p_queue->cq_cmdq_sb_num_arr[i] = cpu_to_le16(val);
}

View File

@ -581,6 +581,7 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
struct qed_sp_init_data init_data;
struct qed_spq_entry *p_ent;
u32 cnq_id, sb_id;
u16 igu_sb_id;
int rc;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n");
@ -612,10 +613,10 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) {
sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id);
igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
p_ramrod->cnq_params[cnq_id].sb_num = cpu_to_le16(igu_sb_id);
p_cnq_params = &p_ramrod->cnq_params[cnq_id];
p_cnq_pbl_list = &params->cnq_pbl_list[cnq_id];
p_cnq_params->sb_num =
cpu_to_le16(p_hwfn->sbs_info[sb_id]->igu_sb_id);
p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi;
p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages;

View File

@ -792,9 +792,12 @@ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
req->only_untagged = only_untagged;
/* status blocks */
for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++)
if (p_hwfn->sbs_info[i])
req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys;
for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) {
struct qed_sb_info *p_sb = p_hwfn->vf_iov_info->sbs_info[i];
if (p_sb)
req->sb_addr[i] = p_sb->sb_phys;
}
/* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset,
@ -1240,6 +1243,24 @@ u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id;
}
void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn,
u16 sb_id, struct qed_sb_info *p_sb)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
if (!p_iov) {
DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n");
return;
}
if (sb_id >= PFVF_MAX_SBS_PER_VF) {
DP_NOTICE(p_hwfn, "Can't configure SB %04x\n", sb_id);
return;
}
p_iov->sbs_info[sb_id] = p_sb;
}
int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;

View File

@ -627,6 +627,14 @@ struct qed_vf_iov {
* this has to be propagated as it affects the fastpath.
*/
bool b_pre_fp_hsi;
/* Current day VFs are passing the SBs physical address on vport
* start, and as they lack an IGU mapping they need to store the
* addresses of previously registered SBs.
* Even if we were to change configuration flow, due to backward
* compatibility [with older PFs] we'd still need to store these.
*/
struct qed_sb_info *sbs_info[PFVF_MAX_SBS_PER_VF];
};
#ifdef CONFIG_QED_SRIOV
@ -836,6 +844,16 @@ int qed_vf_pf_release(struct qed_hwfn *p_hwfn);
*/
u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
/**
* @brief Stores [or removes] a configured sb_info.
*
* @param p_hwfn
* @param sb_id - zero-based SB index [for fastpath]
* @param sb_info - may be NULL [during removal].
*/
void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn,
u16 sb_id, struct qed_sb_info *p_sb);
/**
* @brief qed_vf_pf_vport_start - perform vport start for VF.
*