Merge branch 'qed-Advance-to-FW-8.33.1.0'
Tomer Tayar says: ==================== qed*: Advance to FW 8.33.1.0 This series advances all qed* drivers to use firmware 8.33.1.0 which brings new capabilities and initial support of new HW. The changes are mostly in qed, and include changes in the FW interface files, as well as updating the FW initialization and debug collection code. The protocol drivers have minor functional changes for this firmware. Patch 1 Rearranges and refactors the FW interface files in preparation of the new FW (no functional change). Patch 2 Prepares the code for support of new HW (no functional change). Patch 3 Actual utilization of the new FW. Patch 4 Advances drivers' version. v3->v4: Fix a compilation issue which was reported by krobot (dependency on CRC8). v2->v3: Resend the series with a fixed title in the cover letter. v1->v2: - Break the previous single patch into several patches. - Fix compilation issues which were reported by krobot. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
4e3b95f198
|
@ -264,7 +264,7 @@ static int qedr_register_device(struct qedr_dev *dev)
|
|||
static int qedr_alloc_mem_sb(struct qedr_dev *dev,
|
||||
struct qed_sb_info *sb_info, u16 sb_id)
|
||||
{
|
||||
struct status_block *sb_virt;
|
||||
struct status_block_e4 *sb_virt;
|
||||
dma_addr_t sb_phys;
|
||||
int rc;
|
||||
|
||||
|
|
|
@ -164,6 +164,13 @@ struct rdma_srq_sge {
|
|||
__le32 l_key;
|
||||
};
|
||||
|
||||
/* Rdma doorbell data for flags update */
|
||||
struct rdma_pwm_flags_data {
|
||||
__le16 icid; /* internal CID */
|
||||
u8 agg_flags; /* aggregative flags */
|
||||
u8 reserved;
|
||||
};
|
||||
|
||||
/* Rdma doorbell data for SQ and RQ */
|
||||
struct rdma_pwm_val16_data {
|
||||
__le16 icid;
|
||||
|
@ -184,8 +191,12 @@ struct rdma_pwm_val32_data {
|
|||
#define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT 0
|
||||
#define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK 0x1
|
||||
#define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2
|
||||
#define RDMA_PWM_VAL32_DATA_RESERVED_MASK 0x1F
|
||||
#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT 3
|
||||
#define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_MASK 0x1
|
||||
#define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_SHIFT 3
|
||||
#define RDMA_PWM_VAL32_DATA_SET_16B_VAL_MASK 0x1
|
||||
#define RDMA_PWM_VAL32_DATA_SET_16B_VAL_SHIFT 4
|
||||
#define RDMA_PWM_VAL32_DATA_RESERVED_MASK 0x7
|
||||
#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT 5
|
||||
__le32 value;
|
||||
};
|
||||
|
||||
|
@ -492,9 +503,11 @@ struct rdma_sq_fmr_wqe {
|
|||
#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT 5
|
||||
#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6
|
||||
#define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0x1FF
|
||||
#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 7
|
||||
__le32 Reserved5;
|
||||
#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_SHIFT 7
|
||||
#define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0xFF
|
||||
#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 8
|
||||
__le32 reserved5;
|
||||
};
|
||||
|
||||
/* First element (16 bytes) of fmr wqe */
|
||||
|
@ -572,9 +585,11 @@ struct rdma_sq_fmr_wqe_3rd {
|
|||
#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT 5
|
||||
#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6
|
||||
#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0x1FF
|
||||
#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_SHIFT 7
|
||||
__le32 Reserved5;
|
||||
#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_SHIFT 7
|
||||
#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0xFF
|
||||
#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 8
|
||||
__le32 reserved5;
|
||||
};
|
||||
|
||||
struct rdma_sq_local_inv_wqe {
|
||||
|
@ -618,8 +633,10 @@ struct rdma_sq_rdma_wqe {
|
|||
#define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT 4
|
||||
#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK 0x1
|
||||
#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5
|
||||
#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK 0x3
|
||||
#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT 6
|
||||
#define RDMA_SQ_RDMA_WQE_READ_INV_FLG_MASK 0x1
|
||||
#define RDMA_SQ_RDMA_WQE_READ_INV_FLG_SHIFT 6
|
||||
#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK 0x1
|
||||
#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT 7
|
||||
u8 wqe_size;
|
||||
u8 prev_wqe_size;
|
||||
struct regpair remote_va;
|
||||
|
|
|
@ -85,6 +85,7 @@ config QED
|
|||
tristate "QLogic QED 25/40/100Gb core driver"
|
||||
depends on PCI
|
||||
select ZLIB_INFLATE
|
||||
select CRC8
|
||||
---help---
|
||||
This enables the support for ...
|
||||
|
||||
|
|
|
@ -53,9 +53,9 @@
|
|||
extern const struct qed_common_ops qed_common_ops_pass;
|
||||
|
||||
#define QED_MAJOR_VERSION 8
|
||||
#define QED_MINOR_VERSION 10
|
||||
#define QED_REVISION_VERSION 11
|
||||
#define QED_ENGINEERING_VERSION 21
|
||||
#define QED_MINOR_VERSION 33
|
||||
#define QED_REVISION_VERSION 0
|
||||
#define QED_ENGINEERING_VERSION 20
|
||||
|
||||
#define QED_VERSION \
|
||||
((QED_MAJOR_VERSION << 24) | (QED_MINOR_VERSION << 16) | \
|
||||
|
@ -778,8 +778,8 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
|
|||
return sw_fid;
|
||||
}
|
||||
|
||||
#define PURE_LB_TC 8
|
||||
#define PKT_LB_TC 9
|
||||
#define MAX_NUM_VOQS_E4 20
|
||||
|
||||
int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
|
||||
void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
|
||||
|
|
|
@ -86,22 +86,22 @@
|
|||
|
||||
/* connection context union */
|
||||
union conn_context {
|
||||
struct core_conn_context core_ctx;
|
||||
struct eth_conn_context eth_ctx;
|
||||
struct iscsi_conn_context iscsi_ctx;
|
||||
struct fcoe_conn_context fcoe_ctx;
|
||||
struct roce_conn_context roce_ctx;
|
||||
struct e4_core_conn_context core_ctx;
|
||||
struct e4_eth_conn_context eth_ctx;
|
||||
struct e4_iscsi_conn_context iscsi_ctx;
|
||||
struct e4_fcoe_conn_context fcoe_ctx;
|
||||
struct e4_roce_conn_context roce_ctx;
|
||||
};
|
||||
|
||||
/* TYPE-0 task context - iSCSI, FCOE */
|
||||
union type0_task_context {
|
||||
struct iscsi_task_context iscsi_ctx;
|
||||
struct fcoe_task_context fcoe_ctx;
|
||||
struct e4_iscsi_task_context iscsi_ctx;
|
||||
struct e4_fcoe_task_context fcoe_ctx;
|
||||
};
|
||||
|
||||
/* TYPE-1 task context - ROCE */
|
||||
union type1_task_context {
|
||||
struct rdma_task_context roce_ctx;
|
||||
struct e4_rdma_task_context roce_ctx;
|
||||
};
|
||||
|
||||
struct src_ent {
|
||||
|
@ -110,7 +110,7 @@ struct src_ent {
|
|||
};
|
||||
|
||||
#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */
|
||||
#define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12))
|
||||
#define CDUT_SEG_ALIGNMET_IN_BYTES BIT(CDUT_SEG_ALIGNMET + 12)
|
||||
|
||||
#define CONN_CXT_SIZE(p_hwfn) \
|
||||
ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
|
||||
|
@ -742,7 +742,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
|
|||
p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
|
||||
|
||||
qed_cxt_qm_iids(p_hwfn, &qm_iids);
|
||||
total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
|
||||
total = qed_qm_pf_mem_size(qm_iids.cids,
|
||||
qm_iids.vf_cids, qm_iids.tids,
|
||||
p_hwfn->qm_info.num_pqs,
|
||||
p_hwfn->qm_info.num_vf_pqs);
|
||||
|
@ -1496,20 +1496,24 @@ static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn)
|
|||
}
|
||||
}
|
||||
|
||||
void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
|
||||
void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt, bool is_pf_loading)
|
||||
{
|
||||
struct qed_qm_pf_rt_init_params params;
|
||||
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
|
||||
struct qed_qm_pf_rt_init_params params;
|
||||
struct qed_mcp_link_state *p_link;
|
||||
struct qed_qm_iids iids;
|
||||
|
||||
memset(&iids, 0, sizeof(iids));
|
||||
qed_cxt_qm_iids(p_hwfn, &iids);
|
||||
|
||||
p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
|
||||
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
params.port_id = p_hwfn->port_id;
|
||||
params.pf_id = p_hwfn->rel_pf_id;
|
||||
params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
|
||||
params.is_first_pf = p_hwfn->first_on_engine;
|
||||
params.is_pf_loading = is_pf_loading;
|
||||
params.num_pf_cids = iids.cids;
|
||||
params.num_vf_cids = iids.vf_cids;
|
||||
params.num_tids = iids.tids;
|
||||
|
@ -1520,6 +1524,7 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
|
|||
params.num_vports = qm_info->num_vports;
|
||||
params.pf_wfq = qm_info->pf_wfq;
|
||||
params.pf_rl = qm_info->pf_rl;
|
||||
params.link_speed = p_link->speed;
|
||||
params.pq_params = qm_info->qm_pq_params;
|
||||
params.vport_params = qm_info->qm_vport_params;
|
||||
|
||||
|
@ -1883,7 +1888,7 @@ void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
|
|||
|
||||
void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
|
||||
{
|
||||
qed_qm_init_pf(p_hwfn, p_ptt);
|
||||
qed_qm_init_pf(p_hwfn, p_ptt, true);
|
||||
qed_cm_init_pf(p_hwfn);
|
||||
qed_dq_init_pf(p_hwfn);
|
||||
qed_cdu_init_pf(p_hwfn);
|
||||
|
@ -2326,7 +2331,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
|
|||
for (elem_i = 0; elem_i < elems_per_p; elem_i++) {
|
||||
elem = (union type1_task_context *)elem_start;
|
||||
SET_FIELD(elem->roce_ctx.tdif_context.flags1,
|
||||
TDIF_TASK_CONTEXT_REFTAGMASK, 0xf);
|
||||
TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf);
|
||||
elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -169,8 +169,10 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
|
|||
*
|
||||
* @param p_hwfn
|
||||
* @param p_ptt
|
||||
* @param is_pf_loading
|
||||
*/
|
||||
void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
|
||||
void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt, bool is_pf_loading);
|
||||
|
||||
/**
|
||||
* @brief Reconfigures QM pf on the fly
|
||||
|
|
|
@ -954,9 +954,7 @@ void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
|
|||
struct pf_update_ramrod_data *p_dest)
|
||||
{
|
||||
struct protocol_dcb_data *p_dcb_data;
|
||||
bool update_flag = false;
|
||||
|
||||
p_dest->pf_id = p_src->pf_id;
|
||||
u8 update_flag;
|
||||
|
||||
update_flag = p_src->arr[DCBX_PROTOCOL_FCOE].update;
|
||||
p_dest->update_fcoe_dcb_data_mode = update_flag;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -758,7 +758,7 @@ static void qed_init_qm_info(struct qed_hwfn *p_hwfn)
|
|||
/* This function reconfigures the QM pf on the fly.
|
||||
* For this purpose we:
|
||||
* 1. reconfigure the QM database
|
||||
* 2. set new values to runtime arrat
|
||||
* 2. set new values to runtime array
|
||||
* 3. send an sdm_qm_cmd through the rbc interface to stop the QM
|
||||
* 4. activate init tool in QM_PF stage
|
||||
* 5. send an sdm_qm_cmd through rbc interface to release the QM
|
||||
|
@ -784,7 +784,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
|
|||
qed_init_clear_rt_data(p_hwfn);
|
||||
|
||||
/* prepare QM portion of runtime array */
|
||||
qed_qm_init_pf(p_hwfn, p_ptt);
|
||||
qed_qm_init_pf(p_hwfn, p_ptt, false);
|
||||
|
||||
/* activate init tool on runtime array */
|
||||
rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
|
||||
|
@ -1515,7 +1515,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
|
|||
NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
|
||||
}
|
||||
|
||||
/* Protocl Configuration */
|
||||
/* Protocol Configuration */
|
||||
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET,
|
||||
(p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0);
|
||||
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET,
|
||||
|
@ -1527,6 +1527,11 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* Sanity check before the PF init sequence that uses DMAE */
|
||||
rc = qed_dmae_sanity(p_hwfn, p_ptt, "pf_phase");
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* PF Init sequence */
|
||||
rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
|
||||
if (rc)
|
||||
|
@ -2192,7 +2197,7 @@ qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
|
|||
/* No need for a case for QED_CMDQS_CQS since
|
||||
* CNQ/CMDQS are the same resource.
|
||||
*/
|
||||
resc_max_val = NUM_OF_CMDQS_CQS;
|
||||
resc_max_val = NUM_OF_GLOBAL_QUEUES;
|
||||
break;
|
||||
case QED_RDMA_STATS_QUEUE:
|
||||
resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2
|
||||
|
@ -2267,7 +2272,7 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
|
|||
case QED_RDMA_CNQ_RAM:
|
||||
case QED_CMDQS_CQS:
|
||||
/* CNQ/CMDQS are the same resource */
|
||||
*p_resc_num = NUM_OF_CMDQS_CQS / num_funcs;
|
||||
*p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs;
|
||||
break;
|
||||
case QED_RDMA_STATS_QUEUE:
|
||||
*p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 :
|
||||
|
|
|
@ -115,7 +115,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
|
|||
struct qed_fcoe_pf_params *fcoe_pf_params = NULL;
|
||||
struct fcoe_init_ramrod_params *p_ramrod = NULL;
|
||||
struct fcoe_init_func_ramrod_data *p_data;
|
||||
struct fcoe_conn_context *p_cxt = NULL;
|
||||
struct e4_fcoe_conn_context *p_cxt = NULL;
|
||||
struct qed_spq_entry *p_ent = NULL;
|
||||
struct qed_sp_init_data init_data;
|
||||
struct qed_cxt_info cxt_info;
|
||||
|
@ -167,7 +167,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
|
|||
}
|
||||
p_cxt = cxt_info.p_cxt;
|
||||
SET_FIELD(p_cxt->tstorm_ag_context.flags3,
|
||||
TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
|
||||
E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
|
||||
|
||||
fcoe_pf_params->dummy_icid = (u16)dummy_cid;
|
||||
|
||||
|
@ -568,7 +568,7 @@ int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
|
|||
|
||||
void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
|
||||
{
|
||||
struct fcoe_task_context *p_task_ctx = NULL;
|
||||
struct e4_fcoe_task_context *p_task_ctx = NULL;
|
||||
int rc;
|
||||
u32 i;
|
||||
|
||||
|
@ -580,13 +580,13 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
|
|||
if (rc)
|
||||
continue;
|
||||
|
||||
memset(p_task_ctx, 0, sizeof(struct fcoe_task_context));
|
||||
memset(p_task_ctx, 0, sizeof(struct e4_fcoe_task_context));
|
||||
SET_FIELD(p_task_ctx->timer_context.logical_client_0,
|
||||
TIMERS_CONTEXT_VALIDLC0, 1);
|
||||
SET_FIELD(p_task_ctx->timer_context.logical_client_1,
|
||||
TIMERS_CONTEXT_VALIDLC1, 1);
|
||||
SET_FIELD(p_task_ctx->tstorm_ag_context.flags0,
|
||||
TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
|
||||
E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -807,3 +807,71 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
|
|||
return rc;
|
||||
}
|
||||
|
||||
int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt, const char *phase)
|
||||
{
|
||||
u32 size = PAGE_SIZE / 2, val;
|
||||
struct qed_dmae_params params;
|
||||
int rc = 0;
|
||||
dma_addr_t p_phys;
|
||||
void *p_virt;
|
||||
u32 *p_tmp;
|
||||
|
||||
p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
|
||||
2 * size, &p_phys, GFP_KERNEL);
|
||||
if (!p_virt) {
|
||||
DP_NOTICE(p_hwfn,
|
||||
"DMAE sanity [%s]: failed to allocate memory\n",
|
||||
phase);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Fill the bottom half of the allocated memory with a known pattern */
|
||||
for (p_tmp = (u32 *)p_virt;
|
||||
p_tmp < (u32 *)((u8 *)p_virt + size); p_tmp++) {
|
||||
/* Save the address itself as the value */
|
||||
val = (u32)(uintptr_t)p_tmp;
|
||||
*p_tmp = val;
|
||||
}
|
||||
|
||||
/* Zero the top half of the allocated memory */
|
||||
memset((u8 *)p_virt + size, 0, size);
|
||||
|
||||
DP_VERBOSE(p_hwfn,
|
||||
QED_MSG_SP,
|
||||
"DMAE sanity [%s]: src_addr={phys 0x%llx, virt %p}, dst_addr={phys 0x%llx, virt %p}, size 0x%x\n",
|
||||
phase,
|
||||
(u64)p_phys,
|
||||
p_virt, (u64)(p_phys + size), (u8 *)p_virt + size, size);
|
||||
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
rc = qed_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size,
|
||||
size / 4 /* size_in_dwords */, ¶ms);
|
||||
if (rc) {
|
||||
DP_NOTICE(p_hwfn,
|
||||
"DMAE sanity [%s]: qed_dmae_host2host() failed. rc = %d.\n",
|
||||
phase, rc);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Verify that the top half of the allocated memory has the pattern */
|
||||
for (p_tmp = (u32 *)((u8 *)p_virt + size);
|
||||
p_tmp < (u32 *)((u8 *)p_virt + (2 * size)); p_tmp++) {
|
||||
/* The corresponding address in the bottom half */
|
||||
val = (u32)(uintptr_t)p_tmp - size;
|
||||
|
||||
if (*p_tmp != val) {
|
||||
DP_NOTICE(p_hwfn,
|
||||
"DMAE sanity [%s]: addr={phys 0x%llx, virt %p}, read_val 0x%08x, expected_val 0x%08x\n",
|
||||
phase,
|
||||
(u64)p_phys + ((u8 *)p_tmp - (u8 *)p_virt),
|
||||
p_tmp, *p_tmp, val);
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
dma_free_coherent(&p_hwfn->cdev->pdev->dev, 2 * size, p_virt, p_phys);
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -299,4 +299,8 @@ union qed_qm_pq_params {
|
|||
|
||||
int qed_init_fw_data(struct qed_dev *cdev,
|
||||
const u8 *fw_data);
|
||||
|
||||
int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt, const char *phase);
|
||||
|
||||
#endif
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -414,11 +414,23 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
|
|||
}
|
||||
|
||||
/* init_ops callbacks entry point */
|
||||
static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
|
||||
static int qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt,
|
||||
struct init_callback_op *p_cmd)
|
||||
{
|
||||
DP_NOTICE(p_hwfn, "Currently init values have no need of callbacks\n");
|
||||
int rc;
|
||||
|
||||
switch (p_cmd->callback_id) {
|
||||
case DMAE_READY_CB:
|
||||
rc = qed_dmae_sanity(p_hwfn, p_ptt, "engine_phase");
|
||||
break;
|
||||
default:
|
||||
DP_NOTICE(p_hwfn, "Unexpected init op callback ID %d\n",
|
||||
p_cmd->callback_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
|
||||
|
@ -519,7 +531,7 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
|
|||
break;
|
||||
|
||||
case INIT_OP_CALLBACK:
|
||||
qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
|
||||
rc = qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -62,7 +62,7 @@ struct qed_sb_sp_info {
|
|||
struct qed_sb_info sb_info;
|
||||
|
||||
/* per protocol index data */
|
||||
struct qed_pi_info pi_info_arr[PIS_PER_SB];
|
||||
struct qed_pi_info pi_info_arr[PIS_PER_SB_E4];
|
||||
};
|
||||
|
||||
enum qed_attention_type {
|
||||
|
@ -82,7 +82,7 @@ struct aeu_invert_reg_bit {
|
|||
#define ATTENTION_LENGTH_SHIFT (4)
|
||||
#define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
|
||||
ATTENTION_LENGTH_SHIFT)
|
||||
#define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT)
|
||||
#define ATTENTION_SINGLE BIT(ATTENTION_LENGTH_SHIFT)
|
||||
#define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
|
||||
#define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
|
||||
ATTENTION_PARITY)
|
||||
|
@ -1313,7 +1313,7 @@ static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
|
|||
if (IS_VF(p_hwfn->cdev))
|
||||
return;
|
||||
|
||||
sb_offset = igu_sb_id * PIS_PER_SB;
|
||||
sb_offset = igu_sb_id * PIS_PER_SB_E4;
|
||||
memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
|
||||
|
||||
SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
|
||||
|
|
|
@ -197,7 +197,7 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev);
|
|||
#define QED_SB_EVENT_MASK 0x0003
|
||||
|
||||
#define SB_ALIGNED_SIZE(p_hwfn) \
|
||||
ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
|
||||
ALIGNED_TYPE_SIZE(struct status_block_e4, p_hwfn)
|
||||
|
||||
#define QED_SB_INVALID_IDX 0xffff
|
||||
|
||||
|
|
|
@ -62,22 +62,6 @@
|
|||
#include "qed_sriov.h"
|
||||
#include "qed_reg_addr.h"
|
||||
|
||||
static int
|
||||
qed_iscsi_async_event(struct qed_hwfn *p_hwfn,
|
||||
u8 fw_event_code,
|
||||
u16 echo, union event_ring_data *data, u8 fw_return_code)
|
||||
{
|
||||
if (p_hwfn->p_iscsi_info->event_cb) {
|
||||
struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
|
||||
|
||||
return p_iscsi->event_cb(p_iscsi->event_context,
|
||||
fw_event_code, data);
|
||||
} else {
|
||||
DP_NOTICE(p_hwfn, "iSCSI async completion is not set\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
struct qed_iscsi_conn {
|
||||
struct list_head list_entry;
|
||||
bool free_on_delete;
|
||||
|
@ -105,7 +89,7 @@ struct qed_iscsi_conn {
|
|||
u8 local_mac[6];
|
||||
u8 remote_mac[6];
|
||||
u16 vlan_id;
|
||||
u8 tcp_flags;
|
||||
u16 tcp_flags;
|
||||
u8 ip_version;
|
||||
u32 remote_ip[4];
|
||||
u32 local_ip[4];
|
||||
|
@ -122,7 +106,6 @@ struct qed_iscsi_conn {
|
|||
u32 ss_thresh;
|
||||
u16 srtt;
|
||||
u16 rtt_var;
|
||||
u32 ts_time;
|
||||
u32 ts_recent;
|
||||
u32 ts_recent_age;
|
||||
u32 total_rt;
|
||||
|
@ -144,7 +127,6 @@ struct qed_iscsi_conn {
|
|||
u16 mss;
|
||||
u8 snd_wnd_scale;
|
||||
u8 rcv_wnd_scale;
|
||||
u32 ts_ticks_per_second;
|
||||
u16 da_timeout_value;
|
||||
u8 ack_frequency;
|
||||
|
||||
|
@ -161,6 +143,22 @@ struct qed_iscsi_conn {
|
|||
u8 abortive_dsconnect;
|
||||
};
|
||||
|
||||
static int
|
||||
qed_iscsi_async_event(struct qed_hwfn *p_hwfn,
|
||||
u8 fw_event_code,
|
||||
u16 echo, union event_ring_data *data, u8 fw_return_code)
|
||||
{
|
||||
if (p_hwfn->p_iscsi_info->event_cb) {
|
||||
struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
|
||||
|
||||
return p_iscsi->event_cb(p_iscsi->event_context,
|
||||
fw_event_code, data);
|
||||
} else {
|
||||
DP_NOTICE(p_hwfn, "iSCSI async completion is not set\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
|
||||
enum spq_mode comp_mode,
|
||||
|
@ -214,9 +212,9 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
|
|||
p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
|
||||
p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
|
||||
p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
|
||||
p_init->ooo_enable = p_params->ooo_enable;
|
||||
p_init->ll2_rx_queue_id = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] +
|
||||
p_params->ll2_ooo_queue_id;
|
||||
|
||||
p_init->func_params.log_page_size = p_params->log_page_size;
|
||||
val = p_params->num_tasks;
|
||||
p_init->func_params.num_tasks = cpu_to_le16(val);
|
||||
|
@ -276,7 +274,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
|
|||
p_ramrod->tcp_init.two_msl_timer = cpu_to_le32(p_params->two_msl_timer);
|
||||
val = p_params->tx_sws_timer;
|
||||
p_ramrod->tcp_init.tx_sws_timer = cpu_to_le16(val);
|
||||
p_ramrod->tcp_init.maxfinrt = p_params->max_fin_rt;
|
||||
p_ramrod->tcp_init.max_fin_rt = p_params->max_fin_rt;
|
||||
|
||||
p_hwfn->p_iscsi_info->event_context = event_context;
|
||||
p_hwfn->p_iscsi_info->event_cb = async_event_cb;
|
||||
|
@ -304,8 +302,8 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
|
|||
int rc = 0;
|
||||
u32 dval;
|
||||
u16 wval;
|
||||
u8 i;
|
||||
u16 *p;
|
||||
u8 i;
|
||||
|
||||
/* Get SPQ entry */
|
||||
memset(&init_data, 0, sizeof(init_data));
|
||||
|
@ -371,7 +369,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
|
|||
|
||||
p_tcp->vlan_id = cpu_to_le16(p_conn->vlan_id);
|
||||
|
||||
p_tcp->flags = p_conn->tcp_flags;
|
||||
p_tcp->flags = cpu_to_le16(p_conn->tcp_flags);
|
||||
p_tcp->ip_version = p_conn->ip_version;
|
||||
for (i = 0; i < 4; i++) {
|
||||
dval = p_conn->remote_ip[i];
|
||||
|
@ -436,7 +434,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
|
|||
p_tcp2->remote_mac_addr_lo = swab16(get_unaligned(p + 2));
|
||||
|
||||
p_tcp2->vlan_id = cpu_to_le16(p_conn->vlan_id);
|
||||
p_tcp2->flags = p_conn->tcp_flags;
|
||||
p_tcp2->flags = cpu_to_le16(p_conn->tcp_flags);
|
||||
|
||||
p_tcp2->ip_version = p_conn->ip_version;
|
||||
for (i = 0; i < 4; i++) {
|
||||
|
@ -458,6 +456,11 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
|
|||
p_tcp2->syn_ip_payload_length = cpu_to_le16(wval);
|
||||
p_tcp2->syn_phy_addr_lo = DMA_LO_LE(p_conn->syn_phy_addr);
|
||||
p_tcp2->syn_phy_addr_hi = DMA_HI_LE(p_conn->syn_phy_addr);
|
||||
p_tcp2->cwnd = cpu_to_le32(p_conn->cwnd);
|
||||
p_tcp2->ka_max_probe_cnt = p_conn->ka_probe_cnt;
|
||||
p_tcp2->ka_timeout = cpu_to_le32(p_conn->ka_timeout);
|
||||
p_tcp2->max_rt_time = cpu_to_le32(p_conn->max_rt_time);
|
||||
p_tcp2->ka_interval = cpu_to_le32(p_conn->ka_interval);
|
||||
}
|
||||
|
||||
return qed_spq_post(p_hwfn, p_ent, NULL);
|
||||
|
@ -692,8 +695,7 @@ static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
|
|||
}
|
||||
}
|
||||
|
||||
static int qed_iscsi_setup_connection(struct qed_hwfn *p_hwfn,
|
||||
struct qed_iscsi_conn *p_conn)
|
||||
static int qed_iscsi_setup_connection(struct qed_iscsi_conn *p_conn)
|
||||
{
|
||||
if (!p_conn->queue_cnts_virt_addr)
|
||||
goto nomem;
|
||||
|
@ -844,7 +846,7 @@ static int qed_iscsi_acquire_connection(struct qed_hwfn *p_hwfn,
|
|||
rc = qed_iscsi_allocate_connection(p_hwfn, &p_conn);
|
||||
|
||||
if (!rc)
|
||||
rc = qed_iscsi_setup_connection(p_hwfn, p_conn);
|
||||
rc = qed_iscsi_setup_connection(p_conn);
|
||||
|
||||
if (rc) {
|
||||
spin_lock_bh(&p_hwfn->p_iscsi_info->lock);
|
||||
|
@ -1294,7 +1296,6 @@ static int qed_iscsi_offload_conn(struct qed_dev *cdev,
|
|||
con->ss_thresh = conn_info->ss_thresh;
|
||||
con->srtt = conn_info->srtt;
|
||||
con->rtt_var = conn_info->rtt_var;
|
||||
con->ts_time = conn_info->ts_time;
|
||||
con->ts_recent = conn_info->ts_recent;
|
||||
con->ts_recent_age = conn_info->ts_recent_age;
|
||||
con->total_rt = conn_info->total_rt;
|
||||
|
@ -1316,7 +1317,6 @@ static int qed_iscsi_offload_conn(struct qed_dev *cdev,
|
|||
con->mss = conn_info->mss;
|
||||
con->snd_wnd_scale = conn_info->snd_wnd_scale;
|
||||
con->rcv_wnd_scale = conn_info->rcv_wnd_scale;
|
||||
con->ts_ticks_per_second = conn_info->ts_ticks_per_second;
|
||||
con->da_timeout_value = conn_info->da_timeout_value;
|
||||
con->ack_frequency = conn_info->ack_frequency;
|
||||
|
||||
|
|
|
@ -64,14 +64,21 @@ struct mpa_v2_hdr {
|
|||
|
||||
#define QED_IWARP_INVALID_TCP_CID 0xffffffff
|
||||
#define QED_IWARP_RCV_WND_SIZE_DEF (256 * 1024)
|
||||
#define QED_IWARP_RCV_WND_SIZE_MIN (64 * 1024)
|
||||
#define QED_IWARP_RCV_WND_SIZE_MIN (0xffff)
|
||||
#define TIMESTAMP_HEADER_SIZE (12)
|
||||
#define QED_IWARP_MAX_FIN_RT_DEFAULT (2)
|
||||
|
||||
#define QED_IWARP_TS_EN BIT(0)
|
||||
#define QED_IWARP_DA_EN BIT(1)
|
||||
#define QED_IWARP_PARAM_CRC_NEEDED (1)
|
||||
#define QED_IWARP_PARAM_P2P (1)
|
||||
|
||||
#define QED_IWARP_DEF_MAX_RT_TIME (0)
|
||||
#define QED_IWARP_DEF_CWND_FACTOR (4)
|
||||
#define QED_IWARP_DEF_KA_MAX_PROBE_CNT (5)
|
||||
#define QED_IWARP_DEF_KA_TIMEOUT (1200000) /* 20 min */
|
||||
#define QED_IWARP_DEF_KA_INTERVAL (1000) /* 1 sec */
|
||||
|
||||
static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
|
||||
u8 fw_event_code, u16 echo,
|
||||
union event_ring_data *data,
|
||||
|
@ -120,11 +127,17 @@ static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid)
|
|||
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
|
||||
}
|
||||
|
||||
void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
|
||||
struct iwarp_init_func_params *p_ramrod)
|
||||
void
|
||||
qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
|
||||
struct iwarp_init_func_ramrod_data *p_ramrod)
|
||||
{
|
||||
p_ramrod->ll2_ooo_q_index = RESC_START(p_hwfn, QED_LL2_QUEUE) +
|
||||
p_ramrod->iwarp.ll2_ooo_q_index =
|
||||
RESC_START(p_hwfn, QED_LL2_QUEUE) +
|
||||
p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
|
||||
|
||||
p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid)
|
||||
|
@ -699,6 +712,12 @@ qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
|
|||
tcp->ttl = 0x40;
|
||||
tcp->tos_or_tc = 0;
|
||||
|
||||
tcp->max_rt_time = QED_IWARP_DEF_MAX_RT_TIME;
|
||||
tcp->cwnd = QED_IWARP_DEF_CWND_FACTOR * tcp->mss;
|
||||
tcp->ka_max_probe_cnt = QED_IWARP_DEF_KA_MAX_PROBE_CNT;
|
||||
tcp->ka_timeout = QED_IWARP_DEF_KA_TIMEOUT;
|
||||
tcp->ka_interval = QED_IWARP_DEF_KA_INTERVAL;
|
||||
|
||||
tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale;
|
||||
tcp->connect_mode = ep->connect_mode;
|
||||
|
||||
|
@ -807,6 +826,7 @@ static int
|
|||
qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
|
||||
{
|
||||
struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod;
|
||||
struct qed_iwarp_info *iwarp_info;
|
||||
struct qed_sp_init_data init_data;
|
||||
dma_addr_t async_output_phys;
|
||||
struct qed_spq_entry *p_ent;
|
||||
|
@ -874,6 +894,8 @@ qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
|
|||
p_mpa_ramrod->common.reject = 1;
|
||||
}
|
||||
|
||||
iwarp_info = &p_hwfn->p_rdma_info->iwarp;
|
||||
p_mpa_ramrod->rcv_wnd = iwarp_info->rcv_wnd_size;
|
||||
p_mpa_ramrod->mode = ep->mpa_rev;
|
||||
SET_FIELD(p_mpa_ramrod->rtr_pref,
|
||||
IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type);
|
||||
|
@ -2745,6 +2767,7 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
|
|||
/* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */
|
||||
iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) -
|
||||
ilog2(QED_IWARP_RCV_WND_SIZE_MIN);
|
||||
iwarp_info->rcv_wnd_size = rcv_wnd_size >> iwarp_info->rcv_wnd_scale;
|
||||
iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED;
|
||||
iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
|
||||
|
||||
|
|
|
@ -95,6 +95,7 @@ struct qed_iwarp_info {
|
|||
spinlock_t iw_lock; /* for iwarp resources */
|
||||
spinlock_t qp_lock; /* for teardown races */
|
||||
u32 rcv_wnd_scale;
|
||||
u16 rcv_wnd_size;
|
||||
u16 max_mtu;
|
||||
u8 mac_addr[ETH_ALEN];
|
||||
u8 crc_needed;
|
||||
|
@ -187,7 +188,7 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
|
|||
struct qed_rdma_start_in_params *params);
|
||||
|
||||
void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
|
||||
struct iwarp_init_func_params *p_ramrod);
|
||||
struct iwarp_init_func_ramrod_data *p_ramrod);
|
||||
|
||||
int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
|
||||
|
||||
|
|
|
@ -1969,33 +1969,45 @@ void qed_reset_vport_stats(struct qed_dev *cdev)
|
|||
_qed_get_vport_stats(cdev, cdev->reset_stats);
|
||||
}
|
||||
|
||||
static void
|
||||
qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
|
||||
static enum gft_profile_type
|
||||
qed_arfs_mode_to_hsi(enum qed_filter_config_mode mode)
|
||||
{
|
||||
if (mode == QED_FILTER_CONFIG_MODE_5_TUPLE)
|
||||
return GFT_PROFILE_TYPE_4_TUPLE;
|
||||
if (mode == QED_FILTER_CONFIG_MODE_IP_DEST)
|
||||
return GFT_PROFILE_TYPE_IP_DST_PORT;
|
||||
return GFT_PROFILE_TYPE_L4_DST_PORT;
|
||||
}
|
||||
|
||||
void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt,
|
||||
struct qed_arfs_config_params *p_cfg_params)
|
||||
{
|
||||
if (p_cfg_params->arfs_enable) {
|
||||
qed_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
|
||||
p_cfg_params->tcp, p_cfg_params->udp,
|
||||
p_cfg_params->ipv4, p_cfg_params->ipv6);
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_SP,
|
||||
"tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
|
||||
if (p_cfg_params->mode != QED_FILTER_CONFIG_MODE_DISABLE) {
|
||||
qed_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
|
||||
p_cfg_params->tcp,
|
||||
p_cfg_params->udp,
|
||||
p_cfg_params->ipv4,
|
||||
p_cfg_params->ipv6,
|
||||
qed_arfs_mode_to_hsi(p_cfg_params->mode));
|
||||
DP_VERBOSE(p_hwfn,
|
||||
QED_MSG_SP,
|
||||
"Configured Filtering: tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s mode=%08x\n",
|
||||
p_cfg_params->tcp ? "Enable" : "Disable",
|
||||
p_cfg_params->udp ? "Enable" : "Disable",
|
||||
p_cfg_params->ipv4 ? "Enable" : "Disable",
|
||||
p_cfg_params->ipv6 ? "Enable" : "Disable");
|
||||
p_cfg_params->ipv6 ? "Enable" : "Disable",
|
||||
(u32)p_cfg_params->mode);
|
||||
} else {
|
||||
qed_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_SP, "Disabled Filtering\n");
|
||||
qed_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
|
||||
}
|
||||
}
|
||||
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_SP, "Configured ARFS mode : %s\n",
|
||||
p_cfg_params->arfs_enable ? "Enable" : "Disable");
|
||||
}
|
||||
|
||||
static int
|
||||
qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
|
||||
int
|
||||
qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
|
||||
struct qed_spq_comp_cb *p_cb,
|
||||
dma_addr_t p_addr, u16 length, u16 qid,
|
||||
u8 vport_id, bool b_is_add)
|
||||
struct qed_ntuple_filter_params *p_params)
|
||||
{
|
||||
struct rx_update_gft_filter_data *p_ramrod = NULL;
|
||||
struct qed_spq_entry *p_ent = NULL;
|
||||
|
@ -2004,13 +2016,15 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
|
|||
u8 abs_vport_id = 0;
|
||||
int rc = -EINVAL;
|
||||
|
||||
rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
|
||||
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
|
||||
if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
|
||||
rc = qed_fw_l2_queue(p_hwfn, p_params->qid, &abs_rx_q_id);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Get SPQ entry */
|
||||
memset(&init_data, 0, sizeof(init_data));
|
||||
|
@ -2032,17 +2046,27 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
|
|||
return rc;
|
||||
|
||||
p_ramrod = &p_ent->ramrod.rx_update_gft;
|
||||
DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr);
|
||||
p_ramrod->pkt_hdr_length = cpu_to_le16(length);
|
||||
p_ramrod->rx_qid_or_action_icid = cpu_to_le16(abs_rx_q_id);
|
||||
p_ramrod->vport_id = abs_vport_id;
|
||||
p_ramrod->filter_type = RFS_FILTER_TYPE;
|
||||
p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER : GFT_DELETE_FILTER;
|
||||
|
||||
DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr);
|
||||
p_ramrod->pkt_hdr_length = cpu_to_le16(p_params->length);
|
||||
|
||||
if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
|
||||
p_ramrod->rx_qid_valid = 1;
|
||||
p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id);
|
||||
}
|
||||
|
||||
p_ramrod->flow_id_valid = 0;
|
||||
p_ramrod->flow_id = 0;
|
||||
|
||||
p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id);
|
||||
p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER
|
||||
: GFT_DELETE_FILTER;
|
||||
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_SP,
|
||||
"V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n",
|
||||
abs_vport_id, abs_rx_q_id,
|
||||
b_is_add ? "Adding" : "Removing", (u64)p_addr, length);
|
||||
p_params->b_is_add ? "Adding" : "Removing",
|
||||
(u64)p_params->addr, p_params->length);
|
||||
|
||||
return qed_spq_post(p_hwfn, p_ent, NULL);
|
||||
}
|
||||
|
@ -2743,7 +2767,8 @@ static int qed_configure_filter(struct qed_dev *cdev,
|
|||
}
|
||||
}
|
||||
|
||||
static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
|
||||
static int qed_configure_arfs_searcher(struct qed_dev *cdev,
|
||||
enum qed_filter_config_mode mode)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
|
||||
struct qed_arfs_config_params arfs_config_params;
|
||||
|
@ -2753,8 +2778,7 @@ static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
|
|||
arfs_config_params.udp = true;
|
||||
arfs_config_params.ipv4 = true;
|
||||
arfs_config_params.ipv6 = true;
|
||||
arfs_config_params.arfs_enable = en_searcher;
|
||||
|
||||
arfs_config_params.mode = mode;
|
||||
qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
|
||||
&arfs_config_params);
|
||||
return 0;
|
||||
|
@ -2762,8 +2786,8 @@ static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
|
|||
|
||||
static void
|
||||
qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
|
||||
void *cookie, union event_ring_data *data,
|
||||
u8 fw_return_code)
|
||||
void *cookie,
|
||||
union event_ring_data *data, u8 fw_return_code)
|
||||
{
|
||||
struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common;
|
||||
void *dev = p_hwfn->cdev->ops_cookie;
|
||||
|
@ -2771,10 +2795,10 @@ qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
|
|||
op->arfs_filter_op(dev, cookie, fw_return_code);
|
||||
}
|
||||
|
||||
static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie,
|
||||
dma_addr_t mapping, u16 length,
|
||||
u16 vport_id, u16 rx_queue_id,
|
||||
bool add_filter)
|
||||
static int
|
||||
qed_ntuple_arfs_filter_config(struct qed_dev *cdev,
|
||||
void *cookie,
|
||||
struct qed_ntuple_filter_params *params)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
|
||||
struct qed_spq_comp_cb cb;
|
||||
|
@ -2783,9 +2807,19 @@ static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie,
|
|||
cb.function = qed_arfs_sp_response_handler;
|
||||
cb.cookie = cookie;
|
||||
|
||||
rc = qed_configure_rfs_ntuple_filter(p_hwfn, p_hwfn->p_arfs_ptt,
|
||||
&cb, mapping, length, rx_queue_id,
|
||||
vport_id, add_filter);
|
||||
if (params->b_is_vf) {
|
||||
if (!qed_iov_is_valid_vfid(p_hwfn, params->vf_id, false,
|
||||
false)) {
|
||||
DP_INFO(p_hwfn, "vfid 0x%02x is out of bounds\n",
|
||||
params->vf_id);
|
||||
return rc;
|
||||
}
|
||||
|
||||
params->vport_id = params->vf_id + 1;
|
||||
params->qid = QED_RFS_NTUPLE_QID_RSS;
|
||||
}
|
||||
|
||||
rc = qed_configure_rfs_ntuple_filter(p_hwfn, &cb, params);
|
||||
if (rc)
|
||||
DP_NOTICE(p_hwfn,
|
||||
"Failed to issue a-RFS filter configuration\n");
|
||||
|
|
|
@ -190,7 +190,7 @@ struct qed_arfs_config_params {
|
|||
bool udp;
|
||||
bool ipv4;
|
||||
bool ipv6;
|
||||
bool arfs_enable;
|
||||
enum qed_filter_config_mode mode;
|
||||
};
|
||||
|
||||
struct qed_sp_vport_update_params {
|
||||
|
@ -277,6 +277,37 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);
|
|||
|
||||
void qed_reset_vport_stats(struct qed_dev *cdev);
|
||||
|
||||
/**
|
||||
* *@brief qed_arfs_mode_configure -
|
||||
*
|
||||
**Enable or disable rfs mode. It must accept atleast one of tcp or udp true
|
||||
**and atleast one of ipv4 or ipv6 true to enable rfs mode.
|
||||
*
|
||||
**@param p_hwfn
|
||||
**@param p_ptt
|
||||
**@param p_cfg_params - arfs mode configuration parameters.
|
||||
*
|
||||
*/
|
||||
void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt,
|
||||
struct qed_arfs_config_params *p_cfg_params);
|
||||
|
||||
/**
|
||||
* @brief - qed_configure_rfs_ntuple_filter
|
||||
*
|
||||
* This ramrod should be used to add or remove arfs hw filter
|
||||
*
|
||||
* @params p_hwfn
|
||||
* @params p_cb - Used for QED_SPQ_MODE_CB,where client would initialize
|
||||
* it with cookie and callback function address, if not
|
||||
* using this mode then client must pass NULL.
|
||||
* @params p_params
|
||||
*/
|
||||
int
|
||||
qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
|
||||
struct qed_spq_comp_cb *p_cb,
|
||||
struct qed_ntuple_filter_params *p_params);
|
||||
|
||||
#define MAX_QUEUES_PER_QZONE (sizeof(unsigned long) * 8)
|
||||
#define QED_QUEUE_CID_SELF (0xff)
|
||||
|
||||
|
|
|
@ -406,6 +406,9 @@ static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn,
|
|||
data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
|
||||
data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
|
||||
data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error;
|
||||
data->qp_id = le16_to_cpu(p_cqe->rx_cqe_gsi.qp_id);
|
||||
|
||||
data->src_qp = le32_to_cpu(p_cqe->rx_cqe_gsi.src_qp);
|
||||
}
|
||||
|
||||
static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
|
||||
|
@ -927,7 +930,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
|
|||
qed_chain_get_pbl_phys(&p_rx->rcq_chain));
|
||||
|
||||
p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
|
||||
p_ramrod->inner_vlan_removal_en = p_ll2_conn->input.rx_vlan_removal_en;
|
||||
p_ramrod->inner_vlan_stripping_en =
|
||||
p_ll2_conn->input.rx_vlan_removal_en;
|
||||
p_ramrod->queue_id = p_ll2_conn->queue_id;
|
||||
p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0;
|
||||
|
||||
|
@ -1299,8 +1303,20 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
|
|||
|
||||
memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input));
|
||||
|
||||
p_ll2_info->tx_dest = (data->input.tx_dest == QED_LL2_TX_DEST_NW) ?
|
||||
CORE_TX_DEST_NW : CORE_TX_DEST_LB;
|
||||
switch (data->input.tx_dest) {
|
||||
case QED_LL2_TX_DEST_NW:
|
||||
p_ll2_info->tx_dest = CORE_TX_DEST_NW;
|
||||
break;
|
||||
case QED_LL2_TX_DEST_LB:
|
||||
p_ll2_info->tx_dest = CORE_TX_DEST_LB;
|
||||
break;
|
||||
case QED_LL2_TX_DEST_DROP:
|
||||
p_ll2_info->tx_dest = CORE_TX_DEST_DROP;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (data->input.conn_type == QED_LL2_TYPE_OOO ||
|
||||
data->input.secondary_queue)
|
||||
p_ll2_info->main_func_queue = false;
|
||||
|
@ -2281,8 +2297,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
|
|||
goto release_terminate;
|
||||
}
|
||||
|
||||
if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
|
||||
cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) {
|
||||
if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI) {
|
||||
DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
|
||||
rc = qed_ll2_start_ooo(cdev, params);
|
||||
if (rc) {
|
||||
|
@ -2340,8 +2355,7 @@ static int qed_ll2_stop(struct qed_dev *cdev)
|
|||
qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
|
||||
eth_zero_addr(cdev->ll2_mac_address);
|
||||
|
||||
if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
|
||||
cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable)
|
||||
if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI)
|
||||
qed_ll2_stop_ooo(cdev);
|
||||
|
||||
rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
|
||||
|
|
|
@ -2234,7 +2234,7 @@ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
|
|||
DRV_MSG_CODE_NVM_READ_NVRAM,
|
||||
addr + offset +
|
||||
(bytes_to_copy <<
|
||||
DRV_MB_PARAM_NVM_LEN_SHIFT),
|
||||
DRV_MB_PARAM_NVM_LEN_OFFSET),
|
||||
&resp, &resp_param,
|
||||
&read_len,
|
||||
(u32 *)(p_buf + offset));
|
||||
|
|
|
@ -553,7 +553,7 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
|
|||
|
||||
if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
|
||||
qed_iwarp_init_fw_ramrod(p_hwfn,
|
||||
&p_ent->ramrod.iwarp_init_func.iwarp);
|
||||
&p_ent->ramrod.iwarp_init_func);
|
||||
p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma;
|
||||
} else {
|
||||
p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
|
||||
|
|
|
@ -124,6 +124,8 @@
|
|||
0x1f0434UL
|
||||
#define PRS_REG_SEARCH_TAG1 \
|
||||
0x1f0444UL
|
||||
#define PRS_REG_SEARCH_TENANT_ID \
|
||||
0x1f044cUL
|
||||
#define PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST \
|
||||
0x1f0a0cUL
|
||||
#define PRS_REG_SEARCH_TCP_FIRST_FRAG \
|
||||
|
@ -200,6 +202,12 @@
|
|||
0x2e8800UL
|
||||
#define CCFC_REG_STRONG_ENABLE_VF \
|
||||
0x2e070cUL
|
||||
#define CDU_REG_CCFC_CTX_VALID0 \
|
||||
0x580400UL
|
||||
#define CDU_REG_CCFC_CTX_VALID1 \
|
||||
0x580404UL
|
||||
#define CDU_REG_TCFC_CTX_VALID0 \
|
||||
0x580408UL
|
||||
#define CDU_REG_CID_ADDR_PARAMS \
|
||||
0x580900UL
|
||||
#define DBG_REG_CLIENT_ENABLE \
|
||||
|
@ -564,7 +572,7 @@
|
|||
#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
|
||||
#define PRS_REG_GRE_PROTOCOL 0x1f0734UL
|
||||
#define PRS_REG_VXLAN_PORT 0x1f0738UL
|
||||
#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL
|
||||
#define PRS_REG_OUTPUT_FORMAT_4_0_BB_K2 0x1f099cUL
|
||||
#define NIG_REG_ENC_TYPE_ENABLE 0x501058UL
|
||||
|
||||
#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE (0x1 << 0)
|
||||
|
@ -583,8 +591,8 @@
|
|||
#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL
|
||||
#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL
|
||||
#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL
|
||||
#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN 0x10092cUL
|
||||
#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN 0x100930UL
|
||||
#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5 0x10092cUL
|
||||
#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5 0x100930UL
|
||||
|
||||
#define NIG_REG_NGE_IP_ENABLE 0x508b28UL
|
||||
#define NIG_REG_NGE_ETH_ENABLE 0x508b2cUL
|
||||
|
@ -595,15 +603,15 @@
|
|||
#define QM_REG_WFQPFWEIGHT 0x2f4e80UL
|
||||
#define QM_REG_WFQVPWEIGHT 0x2fa000UL
|
||||
|
||||
#define PGLCS_REG_DBG_SELECT_K2 \
|
||||
#define PGLCS_REG_DBG_SELECT_K2_E5 \
|
||||
0x001d14UL
|
||||
#define PGLCS_REG_DBG_DWORD_ENABLE_K2 \
|
||||
#define PGLCS_REG_DBG_DWORD_ENABLE_K2_E5 \
|
||||
0x001d18UL
|
||||
#define PGLCS_REG_DBG_SHIFT_K2 \
|
||||
#define PGLCS_REG_DBG_SHIFT_K2_E5 \
|
||||
0x001d1cUL
|
||||
#define PGLCS_REG_DBG_FORCE_VALID_K2 \
|
||||
#define PGLCS_REG_DBG_FORCE_VALID_K2_E5 \
|
||||
0x001d20UL
|
||||
#define PGLCS_REG_DBG_FORCE_FRAME_K2 \
|
||||
#define PGLCS_REG_DBG_FORCE_FRAME_K2_E5 \
|
||||
0x001d24UL
|
||||
#define MISC_REG_RESET_PL_PDA_VMAIN_1 \
|
||||
0x008070UL
|
||||
|
@ -615,7 +623,7 @@
|
|||
0x009050UL
|
||||
#define MISCS_REG_RESET_PL_HV \
|
||||
0x009060UL
|
||||
#define MISCS_REG_RESET_PL_HV_2_K2 \
|
||||
#define MISCS_REG_RESET_PL_HV_2_K2_E5 \
|
||||
0x009150UL
|
||||
#define DMAE_REG_DBG_SELECT \
|
||||
0x00c510UL
|
||||
|
@ -647,15 +655,15 @@
|
|||
0x0500b0UL
|
||||
#define GRC_REG_DBG_FORCE_FRAME \
|
||||
0x0500b4UL
|
||||
#define UMAC_REG_DBG_SELECT_K2 \
|
||||
#define UMAC_REG_DBG_SELECT_K2_E5 \
|
||||
0x051094UL
|
||||
#define UMAC_REG_DBG_DWORD_ENABLE_K2 \
|
||||
#define UMAC_REG_DBG_DWORD_ENABLE_K2_E5 \
|
||||
0x051098UL
|
||||
#define UMAC_REG_DBG_SHIFT_K2 \
|
||||
#define UMAC_REG_DBG_SHIFT_K2_E5 \
|
||||
0x05109cUL
|
||||
#define UMAC_REG_DBG_FORCE_VALID_K2 \
|
||||
#define UMAC_REG_DBG_FORCE_VALID_K2_E5 \
|
||||
0x0510a0UL
|
||||
#define UMAC_REG_DBG_FORCE_FRAME_K2 \
|
||||
#define UMAC_REG_DBG_FORCE_FRAME_K2_E5 \
|
||||
0x0510a4UL
|
||||
#define MCP2_REG_DBG_SELECT \
|
||||
0x052400UL
|
||||
|
@ -717,15 +725,15 @@
|
|||
0x1f0ba0UL
|
||||
#define PRS_REG_DBG_FORCE_FRAME \
|
||||
0x1f0ba4UL
|
||||
#define CNIG_REG_DBG_SELECT_K2 \
|
||||
#define CNIG_REG_DBG_SELECT_K2_E5 \
|
||||
0x218254UL
|
||||
#define CNIG_REG_DBG_DWORD_ENABLE_K2 \
|
||||
#define CNIG_REG_DBG_DWORD_ENABLE_K2_E5 \
|
||||
0x218258UL
|
||||
#define CNIG_REG_DBG_SHIFT_K2 \
|
||||
#define CNIG_REG_DBG_SHIFT_K2_E5 \
|
||||
0x21825cUL
|
||||
#define CNIG_REG_DBG_FORCE_VALID_K2 \
|
||||
#define CNIG_REG_DBG_FORCE_VALID_K2_E5 \
|
||||
0x218260UL
|
||||
#define CNIG_REG_DBG_FORCE_FRAME_K2 \
|
||||
#define CNIG_REG_DBG_FORCE_FRAME_K2_E5 \
|
||||
0x218264UL
|
||||
#define PRM_REG_DBG_SELECT \
|
||||
0x2306a8UL
|
||||
|
@ -997,35 +1005,35 @@
|
|||
0x580710UL
|
||||
#define CDU_REG_DBG_FORCE_FRAME \
|
||||
0x580714UL
|
||||
#define WOL_REG_DBG_SELECT_K2 \
|
||||
#define WOL_REG_DBG_SELECT_K2_E5 \
|
||||
0x600140UL
|
||||
#define WOL_REG_DBG_DWORD_ENABLE_K2 \
|
||||
#define WOL_REG_DBG_DWORD_ENABLE_K2_E5 \
|
||||
0x600144UL
|
||||
#define WOL_REG_DBG_SHIFT_K2 \
|
||||
#define WOL_REG_DBG_SHIFT_K2_E5 \
|
||||
0x600148UL
|
||||
#define WOL_REG_DBG_FORCE_VALID_K2 \
|
||||
#define WOL_REG_DBG_FORCE_VALID_K2_E5 \
|
||||
0x60014cUL
|
||||
#define WOL_REG_DBG_FORCE_FRAME_K2 \
|
||||
#define WOL_REG_DBG_FORCE_FRAME_K2_E5 \
|
||||
0x600150UL
|
||||
#define BMBN_REG_DBG_SELECT_K2 \
|
||||
#define BMBN_REG_DBG_SELECT_K2_E5 \
|
||||
0x610140UL
|
||||
#define BMBN_REG_DBG_DWORD_ENABLE_K2 \
|
||||
#define BMBN_REG_DBG_DWORD_ENABLE_K2_E5 \
|
||||
0x610144UL
|
||||
#define BMBN_REG_DBG_SHIFT_K2 \
|
||||
#define BMBN_REG_DBG_SHIFT_K2_E5 \
|
||||
0x610148UL
|
||||
#define BMBN_REG_DBG_FORCE_VALID_K2 \
|
||||
#define BMBN_REG_DBG_FORCE_VALID_K2_E5 \
|
||||
0x61014cUL
|
||||
#define BMBN_REG_DBG_FORCE_FRAME_K2 \
|
||||
#define BMBN_REG_DBG_FORCE_FRAME_K2_E5 \
|
||||
0x610150UL
|
||||
#define NWM_REG_DBG_SELECT_K2 \
|
||||
#define NWM_REG_DBG_SELECT_K2_E5 \
|
||||
0x8000ecUL
|
||||
#define NWM_REG_DBG_DWORD_ENABLE_K2 \
|
||||
#define NWM_REG_DBG_DWORD_ENABLE_K2_E5 \
|
||||
0x8000f0UL
|
||||
#define NWM_REG_DBG_SHIFT_K2 \
|
||||
#define NWM_REG_DBG_SHIFT_K2_E5 \
|
||||
0x8000f4UL
|
||||
#define NWM_REG_DBG_FORCE_VALID_K2 \
|
||||
#define NWM_REG_DBG_FORCE_VALID_K2_E5 \
|
||||
0x8000f8UL
|
||||
#define NWM_REG_DBG_FORCE_FRAME_K2\
|
||||
#define NWM_REG_DBG_FORCE_FRAME_K2_E5 \
|
||||
0x8000fcUL
|
||||
#define PBF_REG_DBG_SELECT \
|
||||
0xd80060UL
|
||||
|
@ -1247,36 +1255,76 @@
|
|||
0x1901534UL
|
||||
#define USEM_REG_DBG_FORCE_FRAME \
|
||||
0x1901538UL
|
||||
#define NWS_REG_DBG_SELECT_K2 \
|
||||
#define NWS_REG_DBG_SELECT_K2_E5 \
|
||||
0x700128UL
|
||||
#define NWS_REG_DBG_DWORD_ENABLE_K2 \
|
||||
#define NWS_REG_DBG_DWORD_ENABLE_K2_E5 \
|
||||
0x70012cUL
|
||||
#define NWS_REG_DBG_SHIFT_K2 \
|
||||
#define NWS_REG_DBG_SHIFT_K2_E5 \
|
||||
0x700130UL
|
||||
#define NWS_REG_DBG_FORCE_VALID_K2 \
|
||||
#define NWS_REG_DBG_FORCE_VALID_K2_E5 \
|
||||
0x700134UL
|
||||
#define NWS_REG_DBG_FORCE_FRAME_K2 \
|
||||
#define NWS_REG_DBG_FORCE_FRAME_K2_E5 \
|
||||
0x700138UL
|
||||
#define MS_REG_DBG_SELECT_K2 \
|
||||
#define MS_REG_DBG_SELECT_K2_E5 \
|
||||
0x6a0228UL
|
||||
#define MS_REG_DBG_DWORD_ENABLE_K2 \
|
||||
#define MS_REG_DBG_DWORD_ENABLE_K2_E5 \
|
||||
0x6a022cUL
|
||||
#define MS_REG_DBG_SHIFT_K2 \
|
||||
#define MS_REG_DBG_SHIFT_K2_E5 \
|
||||
0x6a0230UL
|
||||
#define MS_REG_DBG_FORCE_VALID_K2 \
|
||||
#define MS_REG_DBG_FORCE_VALID_K2_E5 \
|
||||
0x6a0234UL
|
||||
#define MS_REG_DBG_FORCE_FRAME_K2 \
|
||||
#define MS_REG_DBG_FORCE_FRAME_K2_E5 \
|
||||
0x6a0238UL
|
||||
#define PCIE_REG_DBG_COMMON_SELECT_K2 \
|
||||
#define PCIE_REG_DBG_COMMON_SELECT_K2_E5 \
|
||||
0x054398UL
|
||||
#define PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2 \
|
||||
#define PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5 \
|
||||
0x05439cUL
|
||||
#define PCIE_REG_DBG_COMMON_SHIFT_K2 \
|
||||
#define PCIE_REG_DBG_COMMON_SHIFT_K2_E5 \
|
||||
0x0543a0UL
|
||||
#define PCIE_REG_DBG_COMMON_FORCE_VALID_K2 \
|
||||
#define PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5 \
|
||||
0x0543a4UL
|
||||
#define PCIE_REG_DBG_COMMON_FORCE_FRAME_K2 \
|
||||
#define PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5 \
|
||||
0x0543a8UL
|
||||
#define PTLD_REG_DBG_SELECT_E5 \
|
||||
0x5a1600UL
|
||||
#define PTLD_REG_DBG_DWORD_ENABLE_E5 \
|
||||
0x5a1604UL
|
||||
#define PTLD_REG_DBG_SHIFT_E5 \
|
||||
0x5a1608UL
|
||||
#define PTLD_REG_DBG_FORCE_VALID_E5 \
|
||||
0x5a160cUL
|
||||
#define PTLD_REG_DBG_FORCE_FRAME_E5 \
|
||||
0x5a1610UL
|
||||
#define YPLD_REG_DBG_SELECT_E5 \
|
||||
0x5c1600UL
|
||||
#define YPLD_REG_DBG_DWORD_ENABLE_E5 \
|
||||
0x5c1604UL
|
||||
#define YPLD_REG_DBG_SHIFT_E5 \
|
||||
0x5c1608UL
|
||||
#define YPLD_REG_DBG_FORCE_VALID_E5 \
|
||||
0x5c160cUL
|
||||
#define YPLD_REG_DBG_FORCE_FRAME_E5 \
|
||||
0x5c1610UL
|
||||
#define RGSRC_REG_DBG_SELECT_E5 \
|
||||
0x320040UL
|
||||
#define RGSRC_REG_DBG_DWORD_ENABLE_E5 \
|
||||
0x320044UL
|
||||
#define RGSRC_REG_DBG_SHIFT_E5 \
|
||||
0x320048UL
|
||||
#define RGSRC_REG_DBG_FORCE_VALID_E5 \
|
||||
0x32004cUL
|
||||
#define RGSRC_REG_DBG_FORCE_FRAME_E5 \
|
||||
0x320050UL
|
||||
#define TGSRC_REG_DBG_SELECT_E5 \
|
||||
0x322040UL
|
||||
#define TGSRC_REG_DBG_DWORD_ENABLE_E5 \
|
||||
0x322044UL
|
||||
#define TGSRC_REG_DBG_SHIFT_E5 \
|
||||
0x322048UL
|
||||
#define TGSRC_REG_DBG_FORCE_VALID_E5 \
|
||||
0x32204cUL
|
||||
#define TGSRC_REG_DBG_FORCE_FRAME_E5 \
|
||||
0x322050UL
|
||||
#define MISC_REG_RESET_PL_UA \
|
||||
0x008050UL
|
||||
#define MISC_REG_RESET_PL_HV \
|
||||
|
@ -1415,7 +1463,7 @@
|
|||
0x1940000UL
|
||||
#define SEM_FAST_REG_INT_RAM \
|
||||
0x020000UL
|
||||
#define SEM_FAST_REG_INT_RAM_SIZE \
|
||||
#define SEM_FAST_REG_INT_RAM_SIZE_BB_K2 \
|
||||
20480
|
||||
#define GRC_REG_TRACE_FIFO_VALID_DATA \
|
||||
0x050064UL
|
||||
|
@ -1433,6 +1481,8 @@
|
|||
0x340800UL
|
||||
#define BRB_REG_BIG_RAM_DATA \
|
||||
0x341500UL
|
||||
#define BRB_REG_BIG_RAM_DATA_SIZE \
|
||||
64
|
||||
#define SEM_FAST_REG_STALL_0_BB_K2 \
|
||||
0x000488UL
|
||||
#define SEM_FAST_REG_STALLED \
|
||||
|
@ -1451,7 +1501,7 @@
|
|||
0x238c30UL
|
||||
#define MISCS_REG_BLOCK_256B_EN \
|
||||
0x009074UL
|
||||
#define MCP_REG_SCRATCH_SIZE \
|
||||
#define MCP_REG_SCRATCH_SIZE_BB_K2 \
|
||||
57344
|
||||
#define MCP_REG_CPU_REG_FILE \
|
||||
0xe05200UL
|
||||
|
@ -1485,35 +1535,35 @@
|
|||
0x008c14UL
|
||||
#define NWS_REG_NWS_CMU_K2 \
|
||||
0x720000UL
|
||||
#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2 \
|
||||
#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5 \
|
||||
0x000680UL
|
||||
#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2 \
|
||||
#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5 \
|
||||
0x000684UL
|
||||
#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2 \
|
||||
#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5 \
|
||||
0x0006c0UL
|
||||
#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2 \
|
||||
#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5 \
|
||||
0x0006c4UL
|
||||
#define MS_REG_MS_CMU_K2 \
|
||||
#define MS_REG_MS_CMU_K2_E5 \
|
||||
0x6a4000UL
|
||||
#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2 \
|
||||
#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5 \
|
||||
0x000208UL
|
||||
#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2 \
|
||||
#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 \
|
||||
0x00020cUL
|
||||
#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2 \
|
||||
#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5 \
|
||||
0x000210UL
|
||||
#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2 \
|
||||
#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5 \
|
||||
0x000214UL
|
||||
#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2 \
|
||||
#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5 \
|
||||
0x000208UL
|
||||
#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2 \
|
||||
#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 \
|
||||
0x00020cUL
|
||||
#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2 \
|
||||
#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5 \
|
||||
0x000210UL
|
||||
#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2 \
|
||||
#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5 \
|
||||
0x000214UL
|
||||
#define PHY_PCIE_REG_PHY0_K2 \
|
||||
#define PHY_PCIE_REG_PHY0_K2_E5 \
|
||||
0x620000UL
|
||||
#define PHY_PCIE_REG_PHY1_K2 \
|
||||
#define PHY_PCIE_REG_PHY1_K2_E5 \
|
||||
0x624000UL
|
||||
#define NIG_REG_ROCE_DUPLICATE_TO_HOST 0x5088f0UL
|
||||
#define PRS_REG_LIGHT_L2_ETHERTYPE_EN 0x1f0968UL
|
||||
|
|
|
@ -351,7 +351,9 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
|
|||
DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
|
||||
p_ramrod->mf_mode = MF_NPAR;
|
||||
}
|
||||
p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
|
||||
|
||||
p_ramrod->outer_tag_config.outer_tag.tci =
|
||||
cpu_to_le16(p_hwfn->hw_info.ovlan);
|
||||
|
||||
/* Place EQ address in RAMROD */
|
||||
DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
|
||||
|
@ -396,8 +398,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
|
|||
p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
|
||||
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
|
||||
"Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
|
||||
sb, sb_index, p_ramrod->outer_tag);
|
||||
"Setting event_ring_sb [id %04x index %02x], outer_tag.tci [%d]\n",
|
||||
sb, sb_index, p_ramrod->outer_tag_config.outer_tag.tci);
|
||||
|
||||
rc = qed_spq_post(p_hwfn, p_ent, NULL);
|
||||
|
||||
|
|
|
@ -215,7 +215,7 @@ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
|
|||
static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
|
||||
struct qed_spq *p_spq)
|
||||
{
|
||||
struct core_conn_context *p_cxt;
|
||||
struct e4_core_conn_context *p_cxt;
|
||||
struct qed_cxt_info cxt_info;
|
||||
u16 physical_q;
|
||||
int rc;
|
||||
|
@ -233,11 +233,11 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
|
|||
p_cxt = cxt_info.p_cxt;
|
||||
|
||||
SET_FIELD(p_cxt->xstorm_ag_context.flags10,
|
||||
XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
|
||||
E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
|
||||
SET_FIELD(p_cxt->xstorm_ag_context.flags1,
|
||||
XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
|
||||
E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
|
||||
SET_FIELD(p_cxt->xstorm_ag_context.flags9,
|
||||
XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
|
||||
E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
|
||||
|
||||
/* QM physical queue */
|
||||
physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
|
||||
|
|
|
@ -153,7 +153,7 @@ static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
|
|||
return qed_spq_post(p_hwfn, p_ent, NULL);
|
||||
}
|
||||
|
||||
static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
|
||||
bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
|
||||
int rel_vf_id,
|
||||
bool b_enabled_only, bool b_non_malicious)
|
||||
{
|
||||
|
@ -1621,7 +1621,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
|
|||
/* fill in pfdev info */
|
||||
pfdev_info->chip_num = p_hwfn->cdev->chip_num;
|
||||
pfdev_info->db_size = 0;
|
||||
pfdev_info->indices_per_sb = PIS_PER_SB;
|
||||
pfdev_info->indices_per_sb = PIS_PER_SB_E4;
|
||||
|
||||
pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
|
||||
PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
|
||||
|
@ -3582,11 +3582,11 @@ static int
|
|||
qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
|
||||
struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
|
||||
{
|
||||
u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
|
||||
u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4];
|
||||
int i, cnt;
|
||||
|
||||
/* Read initial consumers & producers */
|
||||
for (i = 0; i < MAX_NUM_VOQS; i++) {
|
||||
for (i = 0; i < MAX_NUM_VOQS_E4; i++) {
|
||||
u32 prod;
|
||||
|
||||
cons[i] = qed_rd(p_hwfn, p_ptt,
|
||||
|
@ -3601,7 +3601,7 @@ qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
|
|||
/* Wait for consumers to pass the producers */
|
||||
i = 0;
|
||||
for (cnt = 0; cnt < 50; cnt++) {
|
||||
for (; i < MAX_NUM_VOQS; i++) {
|
||||
for (; i < MAX_NUM_VOQS_E4; i++) {
|
||||
u32 tmp;
|
||||
|
||||
tmp = qed_rd(p_hwfn, p_ptt,
|
||||
|
@ -3611,7 +3611,7 @@ qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
|
|||
break;
|
||||
}
|
||||
|
||||
if (i == MAX_NUM_VOQS)
|
||||
if (i == MAX_NUM_VOQS_E4)
|
||||
break;
|
||||
|
||||
msleep(20);
|
||||
|
@ -4237,6 +4237,7 @@ qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
|
|||
static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt, int vfid, int val)
|
||||
{
|
||||
struct qed_mcp_link_state *p_link;
|
||||
struct qed_vf_info *vf;
|
||||
u8 abs_vp_id = 0;
|
||||
int rc;
|
||||
|
@ -4249,7 +4250,10 @@ static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
|
||||
p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
|
||||
|
||||
return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val,
|
||||
p_link->speed);
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -273,6 +273,23 @@ enum qed_iov_wq_flag {
|
|||
};
|
||||
|
||||
#ifdef CONFIG_QED_SRIOV
|
||||
/**
|
||||
* @brief Check if given VF ID @vfid is valid
|
||||
* w.r.t. @b_enabled_only value
|
||||
* if b_enabled_only = true - only enabled VF id is valid
|
||||
* else any VF id less than max_vfs is valid
|
||||
*
|
||||
* @param p_hwfn
|
||||
* @param rel_vf_id - Relative VF ID
|
||||
* @param b_enabled_only - consider only enabled VF
|
||||
* @param b_non_malicious - true iff we want to validate vf isn't malicious.
|
||||
*
|
||||
* @return bool - true for valid VF ID
|
||||
*/
|
||||
bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
|
||||
int rel_vf_id,
|
||||
bool b_enabled_only, bool b_non_malicious);
|
||||
|
||||
/**
|
||||
* @brief - Given a VF index, return index of next [including that] active VF.
|
||||
*
|
||||
|
@ -376,6 +393,13 @@ void qed_vf_start_iov_wq(struct qed_dev *cdev);
|
|||
int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled);
|
||||
void qed_inform_vf_link_state(struct qed_hwfn *hwfn);
|
||||
#else
|
||||
static inline bool
|
||||
qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
|
||||
int rel_vf_id, bool b_enabled_only, bool b_non_malicious)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
|
||||
u16 rel_vf_id)
|
||||
{
|
||||
|
|
|
@ -52,9 +52,9 @@
|
|||
#include <linux/qed/qed_eth_if.h>
|
||||
|
||||
#define QEDE_MAJOR_VERSION 8
|
||||
#define QEDE_MINOR_VERSION 10
|
||||
#define QEDE_REVISION_VERSION 10
|
||||
#define QEDE_ENGINEERING_VERSION 21
|
||||
#define QEDE_MINOR_VERSION 33
|
||||
#define QEDE_REVISION_VERSION 0
|
||||
#define QEDE_ENGINEERING_VERSION 20
|
||||
#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
|
||||
__stringify(QEDE_MINOR_VERSION) "." \
|
||||
__stringify(QEDE_REVISION_VERSION) "." \
|
||||
|
|
|
@ -98,10 +98,18 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev,
|
|||
u16 rxq_id, bool add_fltr)
|
||||
{
|
||||
const struct qed_eth_ops *op = edev->ops;
|
||||
struct qed_ntuple_filter_params params;
|
||||
|
||||
if (n->used)
|
||||
return;
|
||||
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
|
||||
params.addr = n->mapping;
|
||||
params.length = n->buf_len;
|
||||
params.qid = rxq_id;
|
||||
params.b_is_add = add_fltr;
|
||||
|
||||
DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
|
||||
"%s arfs filter flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
|
||||
add_fltr ? "Adding" : "Deleting",
|
||||
|
@ -110,8 +118,7 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev,
|
|||
|
||||
n->used = true;
|
||||
n->filter_op = add_fltr;
|
||||
op->ntuple_filter_config(edev->cdev, n, n->mapping, n->buf_len, 0,
|
||||
rxq_id, add_fltr);
|
||||
op->ntuple_filter_config(edev->cdev, n, ¶ms);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -141,7 +148,10 @@ qede_enqueue_fltr_and_config_searcher(struct qede_dev *edev,
|
|||
edev->arfs->filter_count++;
|
||||
|
||||
if (edev->arfs->filter_count == 1 && !edev->arfs->enable) {
|
||||
edev->ops->configure_arfs_searcher(edev->cdev, true);
|
||||
enum qed_filter_config_mode mode;
|
||||
|
||||
mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
|
||||
edev->ops->configure_arfs_searcher(edev->cdev, mode);
|
||||
edev->arfs->enable = true;
|
||||
}
|
||||
|
||||
|
@ -160,8 +170,11 @@ qede_dequeue_fltr_and_config_searcher(struct qede_dev *edev,
|
|||
edev->arfs->filter_count--;
|
||||
|
||||
if (!edev->arfs->filter_count && edev->arfs->enable) {
|
||||
enum qed_filter_config_mode mode;
|
||||
|
||||
mode = QED_FILTER_CONFIG_MODE_DISABLE;
|
||||
edev->arfs->enable = false;
|
||||
edev->ops->configure_arfs_searcher(edev->cdev, false);
|
||||
edev->ops->configure_arfs_searcher(edev->cdev, mode);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -255,8 +268,11 @@ void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr)
|
|||
|
||||
if (!edev->arfs->filter_count) {
|
||||
if (edev->arfs->enable) {
|
||||
enum qed_filter_config_mode mode;
|
||||
|
||||
mode = QED_FILTER_CONFIG_MODE_DISABLE;
|
||||
edev->arfs->enable = false;
|
||||
edev->ops->configure_arfs_searcher(edev->cdev, false);
|
||||
edev->ops->configure_arfs_searcher(edev->cdev, mode);
|
||||
}
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
} else {
|
||||
|
|
|
@ -1147,7 +1147,7 @@ static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
|
|||
static int qede_alloc_mem_sb(struct qede_dev *edev,
|
||||
struct qed_sb_info *sb_info, u16 sb_id)
|
||||
{
|
||||
struct status_block *sb_virt;
|
||||
struct status_block_e4 *sb_virt;
|
||||
dma_addr_t sb_phys;
|
||||
int rc;
|
||||
|
||||
|
|
|
@ -25,15 +25,17 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
|
|||
u32 task_retry_id,
|
||||
u8 fcp_cmd_payload[32])
|
||||
{
|
||||
struct fcoe_task_context *ctx = task_params->context;
|
||||
struct e4_fcoe_task_context *ctx = task_params->context;
|
||||
const u8 val_byte = ctx->ystorm_ag_context.byte0;
|
||||
struct e4_ustorm_fcoe_task_ag_ctx *u_ag_ctx;
|
||||
struct ystorm_fcoe_task_st_ctx *y_st_ctx;
|
||||
struct tstorm_fcoe_task_st_ctx *t_st_ctx;
|
||||
struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
|
||||
struct mstorm_fcoe_task_st_ctx *m_st_ctx;
|
||||
u32 io_size, val;
|
||||
bool slow_sgl;
|
||||
|
||||
memset(ctx, 0, sizeof(*(ctx)));
|
||||
ctx->ystorm_ag_context.byte0 = val_byte;
|
||||
slow_sgl = scsi_is_slow_sgl(sgl_task_params->num_sges,
|
||||
sgl_task_params->small_mid_sge);
|
||||
io_size = (task_params->task_type == FCOE_TASK_TYPE_WRITE_INITIATOR ?
|
||||
|
@ -43,20 +45,20 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
|
|||
y_st_ctx = &ctx->ystorm_st_context;
|
||||
y_st_ctx->data_2_trns_rem = cpu_to_le32(io_size);
|
||||
y_st_ctx->task_rety_identifier = cpu_to_le32(task_retry_id);
|
||||
y_st_ctx->task_type = task_params->task_type;
|
||||
y_st_ctx->task_type = (u8)task_params->task_type;
|
||||
memcpy(&y_st_ctx->tx_info_union.fcp_cmd_payload,
|
||||
fcp_cmd_payload, sizeof(struct fcoe_fcp_cmd_payload));
|
||||
|
||||
/* Tstorm ctx */
|
||||
t_st_ctx = &ctx->tstorm_st_context;
|
||||
t_st_ctx->read_only.dev_type = (task_params->is_tape_device == 1 ?
|
||||
t_st_ctx->read_only.dev_type = (u8)(task_params->is_tape_device == 1 ?
|
||||
FCOE_TASK_DEV_TYPE_TAPE :
|
||||
FCOE_TASK_DEV_TYPE_DISK);
|
||||
t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
|
||||
val = cpu_to_le32(task_params->cq_rss_number);
|
||||
t_st_ctx->read_only.glbl_q_num = val;
|
||||
t_st_ctx->read_only.fcp_cmd_trns_size = cpu_to_le32(io_size);
|
||||
t_st_ctx->read_only.task_type = task_params->task_type;
|
||||
t_st_ctx->read_only.task_type = (u8)task_params->task_type;
|
||||
SET_FIELD(t_st_ctx->read_write.flags,
|
||||
FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
|
||||
t_st_ctx->read_write.rx_id = cpu_to_le16(FCOE_RX_ID);
|
||||
|
@ -88,6 +90,8 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
|
|||
SET_FIELD(m_st_ctx->flags,
|
||||
MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
|
||||
(slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
|
||||
m_st_ctx->sgl_params.sgl_num_sges =
|
||||
cpu_to_le16(sgl_task_params->num_sges);
|
||||
} else {
|
||||
/* Tstorm ctx */
|
||||
SET_FIELD(t_st_ctx->read_write.flags,
|
||||
|
@ -101,7 +105,9 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
|
|||
sgl_task_params);
|
||||
}
|
||||
|
||||
/* Init Sqe */
|
||||
init_common_sqe(task_params, SEND_FCOE_CMD);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -112,14 +118,16 @@ int init_initiator_midpath_unsolicited_fcoe_task(
|
|||
struct scsi_sgl_task_params *rx_sgl_task_params,
|
||||
u8 fw_to_place_fc_header)
|
||||
{
|
||||
struct fcoe_task_context *ctx = task_params->context;
|
||||
struct e4_fcoe_task_context *ctx = task_params->context;
|
||||
const u8 val_byte = ctx->ystorm_ag_context.byte0;
|
||||
struct e4_ustorm_fcoe_task_ag_ctx *u_ag_ctx;
|
||||
struct ystorm_fcoe_task_st_ctx *y_st_ctx;
|
||||
struct tstorm_fcoe_task_st_ctx *t_st_ctx;
|
||||
struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
|
||||
struct mstorm_fcoe_task_st_ctx *m_st_ctx;
|
||||
u32 val;
|
||||
|
||||
memset(ctx, 0, sizeof(*(ctx)));
|
||||
ctx->ystorm_ag_context.byte0 = val_byte;
|
||||
|
||||
/* Init Ystorm */
|
||||
y_st_ctx = &ctx->ystorm_st_context;
|
||||
|
@ -129,7 +137,7 @@ int init_initiator_midpath_unsolicited_fcoe_task(
|
|||
SET_FIELD(y_st_ctx->sgl_mode,
|
||||
YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE, SCSI_FAST_SGL);
|
||||
y_st_ctx->data_2_trns_rem = cpu_to_le32(task_params->tx_io_size);
|
||||
y_st_ctx->task_type = task_params->task_type;
|
||||
y_st_ctx->task_type = (u8)task_params->task_type;
|
||||
memcpy(&y_st_ctx->tx_info_union.tx_params.mid_path,
|
||||
mid_path_fc_header, sizeof(struct fcoe_tx_mid_path_params));
|
||||
|
||||
|
@ -148,7 +156,7 @@ int init_initiator_midpath_unsolicited_fcoe_task(
|
|||
t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
|
||||
val = cpu_to_le32(task_params->cq_rss_number);
|
||||
t_st_ctx->read_only.glbl_q_num = val;
|
||||
t_st_ctx->read_only.task_type = task_params->task_type;
|
||||
t_st_ctx->read_only.task_type = (u8)task_params->task_type;
|
||||
SET_FIELD(t_st_ctx->read_write.flags,
|
||||
FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
|
||||
t_st_ctx->read_write.rx_id = cpu_to_le16(FCOE_RX_ID);
|
||||
|
@ -182,9 +190,10 @@ int init_initiator_cleanup_fcoe_task(struct fcoe_task_params *task_params)
|
|||
}
|
||||
|
||||
int init_initiator_sequence_recovery_fcoe_task(
|
||||
struct fcoe_task_params *task_params, u32 off)
|
||||
struct fcoe_task_params *task_params, u32 desired_offset)
|
||||
{
|
||||
init_common_sqe(task_params, FCOE_SEQUENCE_RECOVERY);
|
||||
task_params->sqe->additional_info_union.seq_rec_updated_offset = off;
|
||||
task_params->sqe->additional_info_union.seq_rec_updated_offset =
|
||||
desired_offset;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
|
||||
struct fcoe_task_params {
|
||||
/* Output parameter [set/filled by the HSI function] */
|
||||
struct fcoe_task_context *context;
|
||||
struct e4_fcoe_task_context *context;
|
||||
|
||||
/* Output parameter [set/filled by the HSI function] */
|
||||
struct fcoe_wqe *sqe;
|
||||
|
|
|
@ -129,7 +129,7 @@ struct qedf_ioreq {
|
|||
struct delayed_work timeout_work;
|
||||
struct completion tm_done;
|
||||
struct completion abts_done;
|
||||
struct fcoe_task_context *task;
|
||||
struct e4_fcoe_task_context *task;
|
||||
struct fcoe_task_params *task_params;
|
||||
struct scsi_sgl_task_params *sgl_task_params;
|
||||
int idx;
|
||||
|
@ -465,7 +465,7 @@ extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
|
|||
unsigned int timer_msec);
|
||||
extern int qedf_init_mp_req(struct qedf_ioreq *io_req);
|
||||
extern void qedf_init_mp_task(struct qedf_ioreq *io_req,
|
||||
struct fcoe_task_context *task_ctx, struct fcoe_wqe *wqe);
|
||||
struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe);
|
||||
extern u16 qedf_get_sqe_idx(struct qedf_rport *fcport);
|
||||
extern void qedf_ring_doorbell(struct qedf_rport *fcport);
|
||||
extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
|
||||
|
|
|
@ -19,7 +19,7 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
|
|||
struct qedf_ioreq *els_req;
|
||||
struct qedf_mp_req *mp_req;
|
||||
struct fc_frame_header *fc_hdr;
|
||||
struct fcoe_task_context *task;
|
||||
struct e4_fcoe_task_context *task;
|
||||
int rc = 0;
|
||||
uint32_t did, sid;
|
||||
uint16_t xid;
|
||||
|
|
|
@ -225,19 +225,6 @@ enum fcoe_cqe_type {
|
|||
MAX_FCOE_CQE_TYPE
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* FCoE device type
|
||||
*/
|
||||
enum fcoe_device_type {
|
||||
FCOE_TASK_DEV_TYPE_DISK,
|
||||
FCOE_TASK_DEV_TYPE_TAPE,
|
||||
MAX_FCOE_DEVICE_TYPE
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* FCoE fast path error codes
|
||||
*/
|
||||
|
@ -332,31 +319,6 @@ enum fcoe_sp_error_code {
|
|||
MAX_FCOE_SP_ERROR_CODE
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* FCoE SQE request type
|
||||
*/
|
||||
enum fcoe_sqe_request_type {
|
||||
SEND_FCOE_CMD,
|
||||
SEND_FCOE_MIDPATH,
|
||||
SEND_FCOE_ABTS_REQUEST,
|
||||
FCOE_EXCHANGE_CLEANUP,
|
||||
FCOE_SEQUENCE_RECOVERY,
|
||||
SEND_FCOE_XFER_RDY,
|
||||
SEND_FCOE_RSP,
|
||||
SEND_FCOE_RSP_WITH_SENSE_DATA,
|
||||
SEND_FCOE_TARGET_DATA,
|
||||
SEND_FCOE_INITIATOR_DATA,
|
||||
/*
|
||||
* Xfer Continuation (==1) ready to be sent. Previous XFERs data
|
||||
* received successfully.
|
||||
*/
|
||||
SEND_FCOE_XFER_CONTINUATION_RDY,
|
||||
SEND_FCOE_TARGET_ABTS_RSP,
|
||||
MAX_FCOE_SQE_REQUEST_TYPE
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* FCoE task TX state
|
||||
*/
|
||||
|
@ -389,34 +351,4 @@ enum fcoe_task_tx_state {
|
|||
MAX_FCOE_TASK_TX_STATE
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* FCoE task type
|
||||
*/
|
||||
enum fcoe_task_type {
|
||||
FCOE_TASK_TYPE_WRITE_INITIATOR,
|
||||
FCOE_TASK_TYPE_READ_INITIATOR,
|
||||
FCOE_TASK_TYPE_MIDPATH,
|
||||
FCOE_TASK_TYPE_UNSOLICITED,
|
||||
FCOE_TASK_TYPE_ABTS,
|
||||
FCOE_TASK_TYPE_EXCHANGE_CLEANUP,
|
||||
FCOE_TASK_TYPE_SEQUENCE_CLEANUP,
|
||||
FCOE_TASK_TYPE_WRITE_TARGET,
|
||||
FCOE_TASK_TYPE_READ_TARGET,
|
||||
FCOE_TASK_TYPE_RSP,
|
||||
FCOE_TASK_TYPE_RSP_SENSE_DATA,
|
||||
FCOE_TASK_TYPE_ABTS_TARGET,
|
||||
FCOE_TASK_TYPE_ENUM_SIZE,
|
||||
MAX_FCOE_TASK_TYPE
|
||||
};
|
||||
|
||||
struct scsi_glbl_queue_entry {
|
||||
/* Start physical address for the RQ (receive queue) PBL. */
|
||||
struct regpair rq_pbl_addr;
|
||||
/* Start physical address for the CQ (completion queue) PBL. */
|
||||
struct regpair cq_pbl_addr;
|
||||
/* Start physical address for the CMDQ (command queue) PBL. */
|
||||
struct regpair cmdq_pbl_addr;
|
||||
};
|
||||
|
||||
#endif /* __QEDF_HSI__ */
|
||||
|
|
|
@ -579,7 +579,7 @@ static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
|
|||
}
|
||||
|
||||
static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
|
||||
struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx,
|
||||
struct qedf_ioreq *io_req, struct e4_fcoe_task_context *task_ctx,
|
||||
struct fcoe_wqe *sqe)
|
||||
{
|
||||
enum fcoe_task_type task_type;
|
||||
|
@ -597,7 +597,7 @@ static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
|
|||
|
||||
/* Note init_initiator_rw_fcoe_task memsets the task context */
|
||||
io_req->task = task_ctx;
|
||||
memset(task_ctx, 0, sizeof(struct fcoe_task_context));
|
||||
memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
|
||||
memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
|
||||
memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
|
||||
|
||||
|
@ -673,7 +673,7 @@ static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
|
|||
}
|
||||
|
||||
void qedf_init_mp_task(struct qedf_ioreq *io_req,
|
||||
struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
|
||||
struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
|
||||
{
|
||||
struct qedf_mp_req *mp_req = &(io_req->mp_req);
|
||||
struct qedf_rport *fcport = io_req->fcport;
|
||||
|
@ -691,7 +691,7 @@ void qedf_init_mp_task(struct qedf_ioreq *io_req,
|
|||
|
||||
memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
|
||||
memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
|
||||
memset(task_ctx, 0, sizeof(struct fcoe_task_context));
|
||||
memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
|
||||
memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
|
||||
|
||||
/* Setup the task from io_req for easy reference */
|
||||
|
@ -844,7 +844,7 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
|
|||
struct Scsi_Host *host = sc_cmd->device->host;
|
||||
struct fc_lport *lport = shost_priv(host);
|
||||
struct qedf_ctx *qedf = lport_priv(lport);
|
||||
struct fcoe_task_context *task_ctx;
|
||||
struct e4_fcoe_task_context *task_ctx;
|
||||
u16 xid;
|
||||
enum fcoe_task_type req_type = 0;
|
||||
struct fcoe_wqe *sqe;
|
||||
|
@ -1065,7 +1065,7 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
|
|||
struct qedf_ioreq *io_req)
|
||||
{
|
||||
u16 xid, rval;
|
||||
struct fcoe_task_context *task_ctx;
|
||||
struct e4_fcoe_task_context *task_ctx;
|
||||
struct scsi_cmnd *sc_cmd;
|
||||
struct fcoe_cqe_rsp_info *fcp_rsp;
|
||||
struct qedf_rport *fcport;
|
||||
|
@ -1722,7 +1722,7 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
|
|||
struct qedf_rport *fcport;
|
||||
struct qedf_ctx *qedf;
|
||||
uint16_t xid;
|
||||
struct fcoe_task_context *task;
|
||||
struct e4_fcoe_task_context *task;
|
||||
int tmo = 0;
|
||||
int rc = SUCCESS;
|
||||
unsigned long flags;
|
||||
|
@ -1835,7 +1835,7 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
|
|||
uint8_t tm_flags)
|
||||
{
|
||||
struct qedf_ioreq *io_req;
|
||||
struct fcoe_task_context *task;
|
||||
struct e4_fcoe_task_context *task;
|
||||
struct qedf_ctx *qedf = fcport->qedf;
|
||||
struct fc_lport *lport = qedf->lport;
|
||||
int rc = 0;
|
||||
|
@ -2005,17 +2005,18 @@ void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
|
|||
struct qedf_io_work *io_work;
|
||||
u32 bdq_idx;
|
||||
void *bdq_addr;
|
||||
struct scsi_bd *p_bd_info;
|
||||
|
||||
p_bd_info = &cqe->cqe_info.unsolic_info.bd_info;
|
||||
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
|
||||
"address.hi=%x address.lo=%x opaque_data.hi=%x "
|
||||
"opaque_data.lo=%x bdq_prod_idx=%u len=%u.\n",
|
||||
le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.hi),
|
||||
le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.lo),
|
||||
le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.hi),
|
||||
le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo),
|
||||
"address.hi=%x, address.lo=%x, opaque_data.hi=%x, opaque_data.lo=%x, bdq_prod_idx=%u, len=%u\n",
|
||||
le32_to_cpu(p_bd_info->address.hi),
|
||||
le32_to_cpu(p_bd_info->address.lo),
|
||||
le32_to_cpu(p_bd_info->opaque.fcoe_opaque.hi),
|
||||
le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo),
|
||||
qedf->bdq_prod_idx, pktlen);
|
||||
|
||||
bdq_idx = le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo);
|
||||
bdq_idx = le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo);
|
||||
if (bdq_idx >= QEDF_BDQ_SIZE) {
|
||||
QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n",
|
||||
bdq_idx);
|
||||
|
|
|
@ -1860,7 +1860,7 @@ static bool qedf_fp_has_work(struct qedf_fastpath *fp)
|
|||
struct qedf_ctx *qedf = fp->qedf;
|
||||
struct global_queue *que;
|
||||
struct qed_sb_info *sb_info = fp->sb_info;
|
||||
struct status_block *sb = sb_info->sb_virt;
|
||||
struct status_block_e4 *sb = sb_info->sb_virt;
|
||||
u16 prod_idx;
|
||||
|
||||
/* Get the pointer to the global CQ this completion is on */
|
||||
|
@ -1887,7 +1887,7 @@ static bool qedf_process_completions(struct qedf_fastpath *fp)
|
|||
{
|
||||
struct qedf_ctx *qedf = fp->qedf;
|
||||
struct qed_sb_info *sb_info = fp->sb_info;
|
||||
struct status_block *sb = sb_info->sb_virt;
|
||||
struct status_block_e4 *sb = sb_info->sb_virt;
|
||||
struct global_queue *que;
|
||||
u16 prod_idx;
|
||||
struct fcoe_cqe *cqe;
|
||||
|
@ -2352,12 +2352,12 @@ void qedf_fp_io_handler(struct work_struct *work)
|
|||
static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf,
|
||||
struct qed_sb_info *sb_info, u16 sb_id)
|
||||
{
|
||||
struct status_block *sb_virt;
|
||||
struct status_block_e4 *sb_virt;
|
||||
dma_addr_t sb_phys;
|
||||
int ret;
|
||||
|
||||
sb_virt = dma_alloc_coherent(&qedf->pdev->dev,
|
||||
sizeof(struct status_block), &sb_phys, GFP_KERNEL);
|
||||
sizeof(struct status_block_e4), &sb_phys, GFP_KERNEL);
|
||||
|
||||
if (!sb_virt) {
|
||||
QEDF_ERR(&(qedf->dbg_ctx), "Status block allocation failed "
|
||||
|
@ -2623,9 +2623,9 @@ static int qedf_alloc_bdq(struct qedf_ctx *qedf)
|
|||
for (i = 0; i < QEDF_BDQ_SIZE; i++) {
|
||||
pbl->address.hi = cpu_to_le32(U64_HI(qedf->bdq[i].buf_dma));
|
||||
pbl->address.lo = cpu_to_le32(U64_LO(qedf->bdq[i].buf_dma));
|
||||
pbl->opaque.hi = 0;
|
||||
pbl->opaque.fcoe_opaque.hi = 0;
|
||||
/* Opaque lo data is an index into the BDQ array */
|
||||
pbl->opaque.lo = cpu_to_le32(i);
|
||||
pbl->opaque.fcoe_opaque.lo = cpu_to_le32(i);
|
||||
pbl++;
|
||||
}
|
||||
|
||||
|
|
|
@ -7,9 +7,9 @@
|
|||
* this source tree.
|
||||
*/
|
||||
|
||||
#define QEDF_VERSION "8.20.5.0"
|
||||
#define QEDF_VERSION "8.33.0.20"
|
||||
#define QEDF_DRIVER_MAJOR_VER 8
|
||||
#define QEDF_DRIVER_MINOR_VER 20
|
||||
#define QEDF_DRIVER_REV_VER 5
|
||||
#define QEDF_DRIVER_ENG_VER 0
|
||||
#define QEDF_DRIVER_MINOR_VER 33
|
||||
#define QEDF_DRIVER_REV_VER 0
|
||||
#define QEDF_DRIVER_ENG_VER 20
|
||||
|
||||
|
|
|
@ -152,7 +152,7 @@ qedi_gbl_ctx_show(struct seq_file *s, void *unused)
|
|||
{
|
||||
struct qedi_fastpath *fp = NULL;
|
||||
struct qed_sb_info *sb_info = NULL;
|
||||
struct status_block *sb = NULL;
|
||||
struct status_block_e4 *sb = NULL;
|
||||
struct global_queue *que = NULL;
|
||||
int id;
|
||||
u16 prod_idx;
|
||||
|
@ -168,7 +168,7 @@ qedi_gbl_ctx_show(struct seq_file *s, void *unused)
|
|||
sb_info = fp->sb_info;
|
||||
sb = sb_info->sb_virt;
|
||||
prod_idx = (sb->pi_array[QEDI_PROTO_CQ_PROD_IDX] &
|
||||
STATUS_BLOCK_PROD_INDEX_MASK);
|
||||
STATUS_BLOCK_E4_PROD_INDEX_MASK);
|
||||
seq_printf(s, "SB PROD IDX: %d\n", prod_idx);
|
||||
que = qedi->global_queues[fp->sb_id];
|
||||
seq_printf(s, "DRV CONS IDX: %d\n", que->cq_cons_idx);
|
||||
|
|
|
@ -87,7 +87,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
|
|||
{
|
||||
struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
|
||||
struct iscsi_session *session = conn->session;
|
||||
struct iscsi_task_context *task_ctx;
|
||||
struct e4_iscsi_task_context *task_ctx;
|
||||
struct iscsi_text_rsp *resp_hdr_ptr;
|
||||
struct iscsi_text_response_hdr *cqe_text_response;
|
||||
struct qedi_cmd *cmd;
|
||||
|
@ -260,7 +260,7 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi,
|
|||
{
|
||||
struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
|
||||
struct iscsi_session *session = conn->session;
|
||||
struct iscsi_task_context *task_ctx;
|
||||
struct e4_iscsi_task_context *task_ctx;
|
||||
struct iscsi_login_rsp *resp_hdr_ptr;
|
||||
struct iscsi_login_response_hdr *cqe_login_response;
|
||||
struct qedi_cmd *cmd;
|
||||
|
@ -326,7 +326,7 @@ static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi,
|
|||
(qedi->bdq_prod_idx % qedi->rq_num_entries));
|
||||
|
||||
/* Obtain buffer address from rqe_opaque */
|
||||
idx = cqe->rqe_opaque.lo;
|
||||
idx = cqe->rqe_opaque;
|
||||
if (idx > (QEDI_BDQ_NUM - 1)) {
|
||||
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
|
||||
"wrong idx %d returned by FW, dropping the unsolicited pkt\n",
|
||||
|
@ -335,8 +335,7 @@ static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi,
|
|||
}
|
||||
|
||||
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
|
||||
"rqe_opaque.lo [0x%p], rqe_opaque.hi [0x%p], idx [%d]\n",
|
||||
cqe->rqe_opaque.lo, cqe->rqe_opaque.hi, idx);
|
||||
"rqe_opaque [0x%p], idx [%d]\n", cqe->rqe_opaque, idx);
|
||||
|
||||
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
|
||||
"unsol_cqe_type = %d\n", cqe->unsol_cqe_type);
|
||||
|
@ -363,7 +362,7 @@ static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi,
|
|||
struct scsi_bd *pbl;
|
||||
|
||||
/* Obtain buffer address from rqe_opaque */
|
||||
idx = cqe->rqe_opaque.lo;
|
||||
idx = cqe->rqe_opaque;
|
||||
if (idx > (QEDI_BDQ_NUM - 1)) {
|
||||
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
|
||||
"wrong idx %d returned by FW, dropping the unsolicited pkt\n",
|
||||
|
@ -378,8 +377,10 @@ static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi,
|
|||
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
|
||||
"pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx] idx [%d]\n",
|
||||
pbl, pbl->address.hi, pbl->address.lo, idx);
|
||||
pbl->opaque.hi = 0;
|
||||
pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(idx));
|
||||
pbl->opaque.iscsi_opaque.reserved_zero[0] = 0;
|
||||
pbl->opaque.iscsi_opaque.reserved_zero[1] = 0;
|
||||
pbl->opaque.iscsi_opaque.reserved_zero[2] = 0;
|
||||
pbl->opaque.iscsi_opaque.opaque = cpu_to_le32(idx);
|
||||
|
||||
/* Increment producer to let f/w know we've handled the frame */
|
||||
qedi->bdq_prod_idx += count;
|
||||
|
@ -1017,7 +1018,7 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
|
|||
struct scsi_sgl_task_params tx_sgl_task_params;
|
||||
struct scsi_sgl_task_params rx_sgl_task_params;
|
||||
struct iscsi_task_params task_params;
|
||||
struct iscsi_task_context *fw_task_ctx;
|
||||
struct e4_iscsi_task_context *fw_task_ctx;
|
||||
struct qedi_ctx *qedi = qedi_conn->qedi;
|
||||
struct iscsi_login_req *login_hdr;
|
||||
struct scsi_sge *resp_sge = NULL;
|
||||
|
@ -1037,8 +1038,9 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
|
|||
return -ENOMEM;
|
||||
|
||||
fw_task_ctx =
|
||||
(struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
|
||||
memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
|
||||
(struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
|
||||
tid);
|
||||
memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
|
||||
|
||||
qedi_cmd->task_id = tid;
|
||||
|
||||
|
@ -1119,7 +1121,7 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
|
|||
struct scsi_sgl_task_params tx_sgl_task_params;
|
||||
struct scsi_sgl_task_params rx_sgl_task_params;
|
||||
struct iscsi_task_params task_params;
|
||||
struct iscsi_task_context *fw_task_ctx;
|
||||
struct e4_iscsi_task_context *fw_task_ctx;
|
||||
struct iscsi_logout *logout_hdr = NULL;
|
||||
struct qedi_ctx *qedi = qedi_conn->qedi;
|
||||
struct qedi_cmd *qedi_cmd;
|
||||
|
@ -1137,8 +1139,9 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
|
|||
return -ENOMEM;
|
||||
|
||||
fw_task_ctx =
|
||||
(struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
|
||||
memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
|
||||
(struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
|
||||
tid);
|
||||
memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
|
||||
|
||||
qedi_cmd->task_id = tid;
|
||||
|
||||
|
@ -1467,7 +1470,7 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
|
|||
struct iscsi_tmf_request_hdr tmf_pdu_header;
|
||||
struct iscsi_task_params task_params;
|
||||
struct qedi_ctx *qedi = qedi_conn->qedi;
|
||||
struct iscsi_task_context *fw_task_ctx;
|
||||
struct e4_iscsi_task_context *fw_task_ctx;
|
||||
struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
|
||||
struct iscsi_task *ctask;
|
||||
struct iscsi_tm *tmf_hdr;
|
||||
|
@ -1490,8 +1493,9 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
|
|||
return -ENOMEM;
|
||||
|
||||
fw_task_ctx =
|
||||
(struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
|
||||
memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
|
||||
(struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
|
||||
tid);
|
||||
memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
|
||||
|
||||
qedi_cmd->task_id = tid;
|
||||
|
||||
|
@ -1605,7 +1609,7 @@ int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
|
|||
struct scsi_sgl_task_params tx_sgl_task_params;
|
||||
struct scsi_sgl_task_params rx_sgl_task_params;
|
||||
struct iscsi_task_params task_params;
|
||||
struct iscsi_task_context *fw_task_ctx;
|
||||
struct e4_iscsi_task_context *fw_task_ctx;
|
||||
struct qedi_ctx *qedi = qedi_conn->qedi;
|
||||
struct iscsi_text *text_hdr;
|
||||
struct scsi_sge *req_sge = NULL;
|
||||
|
@ -1627,8 +1631,9 @@ int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
|
|||
return -ENOMEM;
|
||||
|
||||
fw_task_ctx =
|
||||
(struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
|
||||
memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
|
||||
(struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
|
||||
tid);
|
||||
memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
|
||||
|
||||
qedi_cmd->task_id = tid;
|
||||
|
||||
|
@ -1705,7 +1710,7 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
|
|||
struct scsi_sgl_task_params rx_sgl_task_params;
|
||||
struct iscsi_task_params task_params;
|
||||
struct qedi_ctx *qedi = qedi_conn->qedi;
|
||||
struct iscsi_task_context *fw_task_ctx;
|
||||
struct e4_iscsi_task_context *fw_task_ctx;
|
||||
struct iscsi_nopout *nopout_hdr;
|
||||
struct scsi_sge *resp_sge = NULL;
|
||||
struct qedi_cmd *qedi_cmd;
|
||||
|
@ -1725,8 +1730,9 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
|
|||
return -ENOMEM;
|
||||
|
||||
fw_task_ctx =
|
||||
(struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
|
||||
memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
|
||||
(struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
|
||||
tid);
|
||||
memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
|
||||
|
||||
qedi_cmd->task_id = tid;
|
||||
|
||||
|
@ -2046,7 +2052,7 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task)
|
|||
struct iscsi_task_params task_params;
|
||||
struct iscsi_conn_params conn_params;
|
||||
struct scsi_initiator_cmd_params cmd_params;
|
||||
struct iscsi_task_context *fw_task_ctx;
|
||||
struct e4_iscsi_task_context *fw_task_ctx;
|
||||
struct iscsi_cls_conn *cls_conn;
|
||||
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
|
||||
enum iscsi_task_type task_type = MAX_ISCSI_TASK_TYPE;
|
||||
|
@ -2069,8 +2075,9 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task)
|
|||
return -ENOMEM;
|
||||
|
||||
fw_task_ctx =
|
||||
(struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
|
||||
memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
|
||||
(struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
|
||||
tid);
|
||||
memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
|
||||
|
||||
cmd->task_id = tid;
|
||||
|
||||
|
|
|
@ -203,12 +203,15 @@ static void init_default_iscsi_task(struct iscsi_task_params *task_params,
|
|||
struct data_hdr *pdu_header,
|
||||
enum iscsi_task_type task_type)
|
||||
{
|
||||
struct iscsi_task_context *context;
|
||||
u16 index;
|
||||
struct e4_iscsi_task_context *context;
|
||||
u32 val;
|
||||
u16 index;
|
||||
u8 val_byte;
|
||||
|
||||
context = task_params->context;
|
||||
val_byte = context->mstorm_ag_context.cdu_validation;
|
||||
memset(context, 0, sizeof(*context));
|
||||
context->mstorm_ag_context.cdu_validation = val_byte;
|
||||
|
||||
for (index = 0; index <
|
||||
ARRAY_SIZE(context->ystorm_st_context.pdu_hdr.data.data);
|
||||
|
@ -222,7 +225,7 @@ static void init_default_iscsi_task(struct iscsi_task_params *task_params,
|
|||
cpu_to_le16(task_params->conn_icid);
|
||||
|
||||
SET_FIELD(context->ustorm_ag_context.flags1,
|
||||
USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
|
||||
E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
|
||||
|
||||
context->ustorm_st_context.task_type = task_type;
|
||||
context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number;
|
||||
|
@ -252,9 +255,8 @@ void init_initiator_rw_cdb_ystorm_context(struct ystorm_iscsi_task_st_ctx *ystc,
|
|||
|
||||
static
|
||||
void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
|
||||
struct ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt,
|
||||
u32 remaining_recv_len,
|
||||
u32 expected_data_transfer_len,
|
||||
struct e4_ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt,
|
||||
u32 remaining_recv_len, u32 expected_data_transfer_len,
|
||||
u8 num_sges, bool tx_dif_conn_err_en)
|
||||
{
|
||||
u32 val;
|
||||
|
@ -265,12 +267,12 @@ void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
|
|||
ustorm_st_cxt->exp_data_transfer_len = val;
|
||||
SET_FIELD(ustorm_st_cxt->reg1.reg1_map, ISCSI_REG1_NUM_SGES, num_sges);
|
||||
SET_FIELD(ustorm_ag_cxt->flags2,
|
||||
USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN,
|
||||
E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN,
|
||||
tx_dif_conn_err_en ? 1 : 0);
|
||||
}
|
||||
|
||||
static
|
||||
void set_rw_exp_data_acked_and_cont_len(struct iscsi_task_context *context,
|
||||
void set_rw_exp_data_acked_and_cont_len(struct e4_iscsi_task_context *context,
|
||||
struct iscsi_conn_params *conn_params,
|
||||
enum iscsi_task_type task_type,
|
||||
u32 task_size,
|
||||
|
@ -342,56 +344,57 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context,
|
|||
cpu_to_le16(dif_task_params->application_tag_mask);
|
||||
SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_CRC_SEED,
|
||||
dif_task_params->crc_seed ? 1 : 0);
|
||||
SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_HOSTGUARDTYPE,
|
||||
SET_FIELD(rdif_context->flags0,
|
||||
RDIF_TASK_CONTEXT_HOST_GUARD_TYPE,
|
||||
dif_task_params->host_guard_type);
|
||||
SET_FIELD(rdif_context->flags0,
|
||||
RDIF_TASK_CONTEXT_PROTECTIONTYPE,
|
||||
RDIF_TASK_CONTEXT_PROTECTION_TYPE,
|
||||
dif_task_params->protection_type);
|
||||
SET_FIELD(rdif_context->flags0,
|
||||
RDIF_TASK_CONTEXT_INITIALREFTAGVALID, 1);
|
||||
RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID, 1);
|
||||
SET_FIELD(rdif_context->flags0,
|
||||
RDIF_TASK_CONTEXT_KEEPREFTAGCONST,
|
||||
RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST,
|
||||
dif_task_params->keep_ref_tag_const ? 1 : 0);
|
||||
SET_FIELD(rdif_context->flags1,
|
||||
RDIF_TASK_CONTEXT_VALIDATEAPPTAG,
|
||||
RDIF_TASK_CONTEXT_VALIDATE_APP_TAG,
|
||||
(dif_task_params->validate_app_tag &&
|
||||
dif_task_params->dif_on_network) ? 1 : 0);
|
||||
SET_FIELD(rdif_context->flags1,
|
||||
RDIF_TASK_CONTEXT_VALIDATEGUARD,
|
||||
RDIF_TASK_CONTEXT_VALIDATE_GUARD,
|
||||
(dif_task_params->validate_guard &&
|
||||
dif_task_params->dif_on_network) ? 1 : 0);
|
||||
SET_FIELD(rdif_context->flags1,
|
||||
RDIF_TASK_CONTEXT_VALIDATEREFTAG,
|
||||
RDIF_TASK_CONTEXT_VALIDATE_REF_TAG,
|
||||
(dif_task_params->validate_ref_tag &&
|
||||
dif_task_params->dif_on_network) ? 1 : 0);
|
||||
SET_FIELD(rdif_context->flags1,
|
||||
RDIF_TASK_CONTEXT_HOSTINTERFACE,
|
||||
RDIF_TASK_CONTEXT_HOST_INTERFACE,
|
||||
dif_task_params->dif_on_host ? 1 : 0);
|
||||
SET_FIELD(rdif_context->flags1,
|
||||
RDIF_TASK_CONTEXT_NETWORKINTERFACE,
|
||||
RDIF_TASK_CONTEXT_NETWORK_INTERFACE,
|
||||
dif_task_params->dif_on_network ? 1 : 0);
|
||||
SET_FIELD(rdif_context->flags1,
|
||||
RDIF_TASK_CONTEXT_FORWARDGUARD,
|
||||
RDIF_TASK_CONTEXT_FORWARD_GUARD,
|
||||
dif_task_params->forward_guard ? 1 : 0);
|
||||
SET_FIELD(rdif_context->flags1,
|
||||
RDIF_TASK_CONTEXT_FORWARDAPPTAG,
|
||||
RDIF_TASK_CONTEXT_FORWARD_APP_TAG,
|
||||
dif_task_params->forward_app_tag ? 1 : 0);
|
||||
SET_FIELD(rdif_context->flags1,
|
||||
RDIF_TASK_CONTEXT_FORWARDREFTAG,
|
||||
RDIF_TASK_CONTEXT_FORWARD_REF_TAG,
|
||||
dif_task_params->forward_ref_tag ? 1 : 0);
|
||||
SET_FIELD(rdif_context->flags1,
|
||||
RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK,
|
||||
RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK,
|
||||
dif_task_params->forward_app_tag_with_mask ? 1 : 0);
|
||||
SET_FIELD(rdif_context->flags1,
|
||||
RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK,
|
||||
RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK,
|
||||
dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
|
||||
SET_FIELD(rdif_context->flags1,
|
||||
RDIF_TASK_CONTEXT_INTERVALSIZE,
|
||||
RDIF_TASK_CONTEXT_INTERVAL_SIZE,
|
||||
dif_task_params->dif_block_size_log - 9);
|
||||
SET_FIELD(rdif_context->state,
|
||||
RDIF_TASK_CONTEXT_REFTAGMASK,
|
||||
RDIF_TASK_CONTEXT_REF_TAG_MASK,
|
||||
dif_task_params->ref_tag_mask);
|
||||
SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNOREAPPTAG,
|
||||
SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNORE_APP_TAG,
|
||||
dif_task_params->ignore_app_tag);
|
||||
}
|
||||
|
||||
|
@ -399,7 +402,7 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context,
|
|||
task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
|
||||
tdif_context->app_tag_value =
|
||||
cpu_to_le16(dif_task_params->application_tag);
|
||||
tdif_context->partial_crc_valueB =
|
||||
tdif_context->partial_crc_value_b =
|
||||
cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
|
||||
tdif_context->partial_crc_value_a =
|
||||
cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
|
||||
|
@ -407,64 +410,68 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context,
|
|||
dif_task_params->crc_seed ? 1 : 0);
|
||||
|
||||
SET_FIELD(tdif_context->flags0,
|
||||
TDIF_TASK_CONTEXT_SETERRORWITHEOP,
|
||||
TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP,
|
||||
dif_task_params->tx_dif_conn_err_en ? 1 : 0);
|
||||
SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDGUARD,
|
||||
SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARD_GUARD,
|
||||
dif_task_params->forward_guard ? 1 : 0);
|
||||
SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDAPPTAG,
|
||||
SET_FIELD(tdif_context->flags1,
|
||||
TDIF_TASK_CONTEXT_FORWARD_APP_TAG,
|
||||
dif_task_params->forward_app_tag ? 1 : 0);
|
||||
SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDREFTAG,
|
||||
SET_FIELD(tdif_context->flags1,
|
||||
TDIF_TASK_CONTEXT_FORWARD_REF_TAG,
|
||||
dif_task_params->forward_ref_tag ? 1 : 0);
|
||||
SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVALSIZE,
|
||||
SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVAL_SIZE,
|
||||
dif_task_params->dif_block_size_log - 9);
|
||||
SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_HOSTINTERFACE,
|
||||
SET_FIELD(tdif_context->flags1,
|
||||
TDIF_TASK_CONTEXT_HOST_INTERFACE,
|
||||
dif_task_params->dif_on_host ? 1 : 0);
|
||||
SET_FIELD(tdif_context->flags1,
|
||||
TDIF_TASK_CONTEXT_NETWORKINTERFACE,
|
||||
TDIF_TASK_CONTEXT_NETWORK_INTERFACE,
|
||||
dif_task_params->dif_on_network ? 1 : 0);
|
||||
val = cpu_to_le32(dif_task_params->initial_ref_tag);
|
||||
tdif_context->initial_ref_tag = val;
|
||||
tdif_context->app_tag_mask =
|
||||
cpu_to_le16(dif_task_params->application_tag_mask);
|
||||
SET_FIELD(tdif_context->flags0,
|
||||
TDIF_TASK_CONTEXT_HOSTGUARDTYPE,
|
||||
TDIF_TASK_CONTEXT_HOST_GUARD_TYPE,
|
||||
dif_task_params->host_guard_type);
|
||||
SET_FIELD(tdif_context->flags0,
|
||||
TDIF_TASK_CONTEXT_PROTECTIONTYPE,
|
||||
TDIF_TASK_CONTEXT_PROTECTION_TYPE,
|
||||
dif_task_params->protection_type);
|
||||
SET_FIELD(tdif_context->flags0,
|
||||
TDIF_TASK_CONTEXT_INITIALREFTAGVALID,
|
||||
TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID,
|
||||
dif_task_params->initial_ref_tag_is_valid ? 1 : 0);
|
||||
SET_FIELD(tdif_context->flags0,
|
||||
TDIF_TASK_CONTEXT_KEEPREFTAGCONST,
|
||||
TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST,
|
||||
dif_task_params->keep_ref_tag_const ? 1 : 0);
|
||||
SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_VALIDATEGUARD,
|
||||
SET_FIELD(tdif_context->flags1,
|
||||
TDIF_TASK_CONTEXT_VALIDATE_GUARD,
|
||||
(dif_task_params->validate_guard &&
|
||||
dif_task_params->dif_on_host) ? 1 : 0);
|
||||
SET_FIELD(tdif_context->flags1,
|
||||
TDIF_TASK_CONTEXT_VALIDATEAPPTAG,
|
||||
TDIF_TASK_CONTEXT_VALIDATE_APP_TAG,
|
||||
(dif_task_params->validate_app_tag &&
|
||||
dif_task_params->dif_on_host) ? 1 : 0);
|
||||
SET_FIELD(tdif_context->flags1,
|
||||
TDIF_TASK_CONTEXT_VALIDATEREFTAG,
|
||||
TDIF_TASK_CONTEXT_VALIDATE_REF_TAG,
|
||||
(dif_task_params->validate_ref_tag &&
|
||||
dif_task_params->dif_on_host) ? 1 : 0);
|
||||
SET_FIELD(tdif_context->flags1,
|
||||
TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK,
|
||||
TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK,
|
||||
dif_task_params->forward_app_tag_with_mask ? 1 : 0);
|
||||
SET_FIELD(tdif_context->flags1,
|
||||
TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK,
|
||||
TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK,
|
||||
dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
|
||||
SET_FIELD(tdif_context->flags1,
|
||||
TDIF_TASK_CONTEXT_REFTAGMASK,
|
||||
TDIF_TASK_CONTEXT_REF_TAG_MASK,
|
||||
dif_task_params->ref_tag_mask);
|
||||
SET_FIELD(tdif_context->flags0,
|
||||
TDIF_TASK_CONTEXT_IGNOREAPPTAG,
|
||||
TDIF_TASK_CONTEXT_IGNORE_APP_TAG,
|
||||
dif_task_params->ignore_app_tag ? 1 : 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void set_local_completion_context(struct iscsi_task_context *context)
|
||||
static void set_local_completion_context(struct e4_iscsi_task_context *context)
|
||||
{
|
||||
SET_FIELD(context->ystorm_st_context.state.flags,
|
||||
YSTORM_ISCSI_TASK_STATE_LOCAL_COMP, 1);
|
||||
|
@ -481,7 +488,7 @@ static int init_rw_iscsi_task(struct iscsi_task_params *task_params,
|
|||
struct scsi_dif_task_params *dif_task_params)
|
||||
{
|
||||
u32 exp_data_transfer_len = conn_params->max_burst_length;
|
||||
struct iscsi_task_context *cxt;
|
||||
struct e4_iscsi_task_context *cxt;
|
||||
bool slow_io = false;
|
||||
u32 task_size, val;
|
||||
u8 num_sges = 0;
|
||||
|
@ -494,8 +501,19 @@ static int init_rw_iscsi_task(struct iscsi_task_params *task_params,
|
|||
|
||||
cxt = task_params->context;
|
||||
|
||||
|
||||
if (task_type == ISCSI_TASK_TYPE_TARGET_READ) {
|
||||
set_local_completion_context(cxt);
|
||||
} else if (task_type == ISCSI_TASK_TYPE_TARGET_WRITE) {
|
||||
val = cpu_to_le32(task_size +
|
||||
((struct iscsi_r2t_hdr *)pdu_header)->buffer_offset);
|
||||
cxt->ystorm_st_context.pdu_hdr.r2t.desired_data_trns_len = val;
|
||||
cxt->mstorm_st_context.expected_itt =
|
||||
cpu_to_le32(pdu_header->itt);
|
||||
} else {
|
||||
val = cpu_to_le32(task_size);
|
||||
cxt->ystorm_st_context.pdu_hdr.cmd.expected_transfer_length = val;
|
||||
cxt->ystorm_st_context.pdu_hdr.cmd.expected_transfer_length =
|
||||
val;
|
||||
init_initiator_rw_cdb_ystorm_context(&cxt->ystorm_st_context,
|
||||
cmd_params);
|
||||
val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.lo);
|
||||
|
@ -503,10 +521,13 @@ static int init_rw_iscsi_task(struct iscsi_task_params *task_params,
|
|||
|
||||
val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.hi);
|
||||
cxt->mstorm_st_context.sense_db.hi = val;
|
||||
}
|
||||
|
||||
if (task_params->tx_io_size) {
|
||||
init_dif_context_flags(&cxt->ystorm_st_context.state.dif_flags,
|
||||
dif_task_params);
|
||||
init_dif_context_flags(&cxt->ustorm_st_context.dif_flags,
|
||||
dif_task_params);
|
||||
init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
|
||||
&cxt->ystorm_st_context.state.data_desc,
|
||||
sgl_task_params);
|
||||
|
@ -595,7 +616,7 @@ int init_initiator_login_request_task(struct iscsi_task_params *task_params,
|
|||
struct scsi_sgl_task_params *tx_params,
|
||||
struct scsi_sgl_task_params *rx_params)
|
||||
{
|
||||
struct iscsi_task_context *cxt;
|
||||
struct e4_iscsi_task_context *cxt;
|
||||
|
||||
cxt = task_params->context;
|
||||
|
||||
|
@ -637,7 +658,7 @@ int init_initiator_nop_out_task(struct iscsi_task_params *task_params,
|
|||
struct scsi_sgl_task_params *tx_sgl_task_params,
|
||||
struct scsi_sgl_task_params *rx_sgl_task_params)
|
||||
{
|
||||
struct iscsi_task_context *cxt;
|
||||
struct e4_iscsi_task_context *cxt;
|
||||
|
||||
cxt = task_params->context;
|
||||
|
||||
|
@ -683,7 +704,7 @@ int init_initiator_logout_request_task(struct iscsi_task_params *task_params,
|
|||
struct scsi_sgl_task_params *tx_params,
|
||||
struct scsi_sgl_task_params *rx_params)
|
||||
{
|
||||
struct iscsi_task_context *cxt;
|
||||
struct e4_iscsi_task_context *cxt;
|
||||
|
||||
cxt = task_params->context;
|
||||
|
||||
|
@ -738,7 +759,7 @@ int init_initiator_text_request_task(struct iscsi_task_params *task_params,
|
|||
struct scsi_sgl_task_params *tx_params,
|
||||
struct scsi_sgl_task_params *rx_params)
|
||||
{
|
||||
struct iscsi_task_context *cxt;
|
||||
struct e4_iscsi_task_context *cxt;
|
||||
|
||||
cxt = task_params->context;
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
#include "qedi_fw_scsi.h"
|
||||
|
||||
struct iscsi_task_params {
|
||||
struct iscsi_task_context *context;
|
||||
struct e4_iscsi_task_context *context;
|
||||
struct iscsi_wqe *sqe;
|
||||
u32 tx_io_size;
|
||||
u32 rx_io_size;
|
||||
|
|
|
@ -52,11 +52,12 @@ void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,
|
|||
void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt);
|
||||
void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, int16_t *tid);
|
||||
void qedi_process_iscsi_error(struct qedi_endpoint *ep,
|
||||
struct async_data *data);
|
||||
struct iscsi_eqe_data *data);
|
||||
void qedi_start_conn_recovery(struct qedi_ctx *qedi,
|
||||
struct qedi_conn *qedi_conn);
|
||||
struct qedi_conn *qedi_get_conn_from_id(struct qedi_ctx *qedi, u32 iscsi_cid);
|
||||
void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data);
|
||||
void qedi_process_tcp_error(struct qedi_endpoint *ep,
|
||||
struct iscsi_eqe_data *data);
|
||||
void qedi_mark_device_missing(struct iscsi_cls_session *cls_session);
|
||||
void qedi_mark_device_available(struct iscsi_cls_session *cls_session);
|
||||
void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu);
|
||||
|
|
|
@ -539,7 +539,6 @@ static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep)
|
|||
conn_info->ka_max_probe_cnt = DEF_KA_MAX_PROBE_COUNT;
|
||||
conn_info->dup_ack_theshold = 3;
|
||||
conn_info->rcv_wnd = 65535;
|
||||
conn_info->cwnd = DEF_MAX_CWND;
|
||||
|
||||
conn_info->ss_thresh = 65535;
|
||||
conn_info->srtt = 300;
|
||||
|
@ -557,8 +556,8 @@ static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep)
|
|||
(qedi_ep->ip_type == TCP_IPV6),
|
||||
1, (qedi_ep->vlan_id != 0));
|
||||
|
||||
conn_info->cwnd = DEF_MAX_CWND * conn_info->mss;
|
||||
conn_info->rcv_wnd_scale = 4;
|
||||
conn_info->ts_ticks_per_second = 1000;
|
||||
conn_info->da_timeout_value = 200;
|
||||
conn_info->ack_frequency = 2;
|
||||
|
||||
|
@ -1557,7 +1556,8 @@ char *qedi_get_iscsi_error(enum iscsi_error_types err_code)
|
|||
return msg;
|
||||
}
|
||||
|
||||
void qedi_process_iscsi_error(struct qedi_endpoint *ep, struct async_data *data)
|
||||
void qedi_process_iscsi_error(struct qedi_endpoint *ep,
|
||||
struct iscsi_eqe_data *data)
|
||||
{
|
||||
struct qedi_conn *qedi_conn;
|
||||
struct qedi_ctx *qedi;
|
||||
|
@ -1603,7 +1603,8 @@ void qedi_process_iscsi_error(struct qedi_endpoint *ep, struct async_data *data)
|
|||
qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
|
||||
}
|
||||
|
||||
void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data)
|
||||
void qedi_process_tcp_error(struct qedi_endpoint *ep,
|
||||
struct iscsi_eqe_data *data)
|
||||
{
|
||||
struct qedi_conn *qedi_conn;
|
||||
|
||||
|
|
|
@ -182,7 +182,7 @@ struct qedi_cmd {
|
|||
struct scsi_cmnd *scsi_cmd;
|
||||
struct scatterlist *sg;
|
||||
struct qedi_io_bdt io_tbl;
|
||||
struct iscsi_task_context request;
|
||||
struct e4_iscsi_task_context request;
|
||||
unsigned char *sense_buffer;
|
||||
dma_addr_t sense_buffer_dma;
|
||||
u16 task_id;
|
||||
|
|
|
@ -60,7 +60,7 @@ static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
|
|||
{
|
||||
struct qedi_ctx *qedi;
|
||||
struct qedi_endpoint *qedi_ep;
|
||||
struct async_data *data;
|
||||
struct iscsi_eqe_data *data;
|
||||
int rval = 0;
|
||||
|
||||
if (!context || !fw_handle) {
|
||||
|
@ -72,18 +72,18 @@ static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
|
|||
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
|
||||
"Recv Event %d fw_handle %p\n", fw_event_code, fw_handle);
|
||||
|
||||
data = (struct async_data *)fw_handle;
|
||||
data = (struct iscsi_eqe_data *)fw_handle;
|
||||
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
|
||||
"cid=0x%x tid=0x%x err-code=0x%x fw-dbg-param=0x%x\n",
|
||||
data->cid, data->itid, data->error_code,
|
||||
data->fw_debug_param);
|
||||
"icid=0x%x conn_id=0x%x err-code=0x%x error-pdu-opcode-reserved=0x%x\n",
|
||||
data->icid, data->conn_id, data->error_code,
|
||||
data->error_pdu_opcode_reserved);
|
||||
|
||||
qedi_ep = qedi->ep_tbl[data->cid];
|
||||
qedi_ep = qedi->ep_tbl[data->icid];
|
||||
|
||||
if (!qedi_ep) {
|
||||
QEDI_WARN(&qedi->dbg_ctx,
|
||||
"Cannot process event, ep already disconnected, cid=0x%x\n",
|
||||
data->cid);
|
||||
data->icid);
|
||||
WARN_ON(1);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
@ -339,12 +339,12 @@ static int qedi_init_uio(struct qedi_ctx *qedi)
|
|||
static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi,
|
||||
struct qed_sb_info *sb_info, u16 sb_id)
|
||||
{
|
||||
struct status_block *sb_virt;
|
||||
struct status_block_e4 *sb_virt;
|
||||
dma_addr_t sb_phys;
|
||||
int ret;
|
||||
|
||||
sb_virt = dma_alloc_coherent(&qedi->pdev->dev,
|
||||
sizeof(struct status_block), &sb_phys,
|
||||
sizeof(struct status_block_e4), &sb_phys,
|
||||
GFP_KERNEL);
|
||||
if (!sb_virt) {
|
||||
QEDI_ERR(&qedi->dbg_ctx,
|
||||
|
@ -858,7 +858,6 @@ static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
|
|||
|
||||
qedi->pf_params.iscsi_pf_params.gl_rq_pi = QEDI_PROTO_CQ_PROD_IDX;
|
||||
qedi->pf_params.iscsi_pf_params.gl_cmd_pi = 1;
|
||||
qedi->pf_params.iscsi_pf_params.ooo_enable = 1;
|
||||
|
||||
err_alloc_mem:
|
||||
return rval;
|
||||
|
@ -961,7 +960,7 @@ static bool qedi_process_completions(struct qedi_fastpath *fp)
|
|||
{
|
||||
struct qedi_ctx *qedi = fp->qedi;
|
||||
struct qed_sb_info *sb_info = fp->sb_info;
|
||||
struct status_block *sb = sb_info->sb_virt;
|
||||
struct status_block_e4 *sb = sb_info->sb_virt;
|
||||
struct qedi_percpu_s *p = NULL;
|
||||
struct global_queue *que;
|
||||
u16 prod_idx;
|
||||
|
@ -1015,7 +1014,7 @@ static bool qedi_fp_has_work(struct qedi_fastpath *fp)
|
|||
struct qedi_ctx *qedi = fp->qedi;
|
||||
struct global_queue *que;
|
||||
struct qed_sb_info *sb_info = fp->sb_info;
|
||||
struct status_block *sb = sb_info->sb_virt;
|
||||
struct status_block_e4 *sb = sb_info->sb_virt;
|
||||
u16 prod_idx;
|
||||
|
||||
barrier();
|
||||
|
@ -1262,8 +1261,10 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi)
|
|||
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
|
||||
"pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx], idx [%d]\n",
|
||||
pbl, pbl->address.hi, pbl->address.lo, i);
|
||||
pbl->opaque.hi = 0;
|
||||
pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(i));
|
||||
pbl->opaque.iscsi_opaque.reserved_zero[0] = 0;
|
||||
pbl->opaque.iscsi_opaque.reserved_zero[1] = 0;
|
||||
pbl->opaque.iscsi_opaque.reserved_zero[2] = 0;
|
||||
pbl->opaque.iscsi_opaque.opaque = cpu_to_le16(i);
|
||||
pbl++;
|
||||
}
|
||||
|
||||
|
|
|
@ -7,8 +7,8 @@
|
|||
* this source tree.
|
||||
*/
|
||||
|
||||
#define QEDI_MODULE_VERSION "8.10.4.0"
|
||||
#define QEDI_MODULE_VERSION "8.33.0.20"
|
||||
#define QEDI_DRIVER_MAJOR_VER 8
|
||||
#define QEDI_DRIVER_MINOR_VER 10
|
||||
#define QEDI_DRIVER_REV_VER 4
|
||||
#define QEDI_DRIVER_ENG_VER 0
|
||||
#define QEDI_DRIVER_MINOR_VER 33
|
||||
#define QEDI_DRIVER_REV_VER 0
|
||||
#define QEDI_DRIVER_ENG_VER 20
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
|
||||
#ifndef _COMMON_HSI_H
|
||||
#define _COMMON_HSI_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <linux/bitops.h>
|
||||
|
@ -48,13 +49,19 @@
|
|||
} while (0)
|
||||
|
||||
#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
|
||||
#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64)
|
||||
#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo))
|
||||
#define HILO_64(hi, lo) \
|
||||
HILO_GEN(le32_to_cpu(hi), le32_to_cpu(lo), u64)
|
||||
#define HILO_64_REGPAIR(regpair) ({ \
|
||||
typeof(regpair) __regpair = (regpair); \
|
||||
HILO_64(__regpair.hi, __regpair.lo); })
|
||||
#define HILO_DMA_REGPAIR(regpair) ((dma_addr_t)HILO_64_REGPAIR(regpair))
|
||||
|
||||
#ifndef __COMMON_HSI__
|
||||
#define __COMMON_HSI__
|
||||
|
||||
/********************************/
|
||||
/* PROTOCOL COMMON FW CONSTANTS */
|
||||
/********************************/
|
||||
|
||||
#define X_FINAL_CLEANUP_AGG_INT 1
|
||||
|
||||
|
@ -102,8 +109,8 @@
|
|||
#define MAX_NUM_LL2_TX_STATS_COUNTERS 48
|
||||
|
||||
#define FW_MAJOR_VERSION 8
|
||||
#define FW_MINOR_VERSION 20
|
||||
#define FW_REVISION_VERSION 0
|
||||
#define FW_MINOR_VERSION 33
|
||||
#define FW_REVISION_VERSION 1
|
||||
#define FW_ENGINEERING_VERSION 0
|
||||
|
||||
/***********************/
|
||||
|
@ -141,30 +148,15 @@
|
|||
/* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */
|
||||
#define NUM_PHYS_TCS_4PORT_K2 (4)
|
||||
#define NUM_OF_PHYS_TCS (8)
|
||||
|
||||
#define PURE_LB_TC NUM_OF_PHYS_TCS
|
||||
#define NUM_TCS_4PORT_K2 (NUM_PHYS_TCS_4PORT_K2 + 1)
|
||||
#define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1)
|
||||
|
||||
#define LB_TC (NUM_OF_PHYS_TCS)
|
||||
|
||||
/* Num of possible traffic priority values */
|
||||
#define NUM_OF_PRIO (8)
|
||||
|
||||
#define MAX_NUM_VOQS_K2 (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2)
|
||||
#define MAX_NUM_VOQS_BB (NUM_OF_TCS * MAX_NUM_PORTS_BB)
|
||||
#define MAX_NUM_VOQS (MAX_NUM_VOQS_K2)
|
||||
#define MAX_PHYS_VOQS (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB)
|
||||
|
||||
/* CIDs */
|
||||
#define NUM_OF_CONNECTION_TYPES (8)
|
||||
#define NUM_OF_CONNECTION_TYPES_E4 (8)
|
||||
#define NUM_OF_LCIDS (320)
|
||||
#define NUM_OF_LTIDS (320)
|
||||
|
||||
/* Clock values */
|
||||
#define MASTER_CLK_FREQ_E4 (375e6)
|
||||
#define STORM_CLK_FREQ_E4 (1000e6)
|
||||
#define CLK25M_CLK_FREQ_E4 (25e6)
|
||||
|
||||
/* Global PXP windows (GTT) */
|
||||
#define NUM_OF_GTT 19
|
||||
#define GTT_DWORD_SIZE_BITS 10
|
||||
|
@ -201,7 +193,7 @@
|
|||
#define DQ_DEMS_TOE_LOCAL_ADV_WND 4
|
||||
#define DQ_DEMS_ROCE_CQ_CONS 7
|
||||
|
||||
/* XCM agg val selection */
|
||||
/* XCM agg val selection (HW) */
|
||||
#define DQ_XCM_AGG_VAL_SEL_WORD2 0
|
||||
#define DQ_XCM_AGG_VAL_SEL_WORD3 1
|
||||
#define DQ_XCM_AGG_VAL_SEL_WORD4 2
|
||||
|
@ -211,7 +203,7 @@
|
|||
#define DQ_XCM_AGG_VAL_SEL_REG5 6
|
||||
#define DQ_XCM_AGG_VAL_SEL_REG6 7
|
||||
|
||||
/* XCM agg val selection */
|
||||
/* XCM agg val selection (FW) */
|
||||
#define DQ_XCM_CORE_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
|
||||
#define DQ_XCM_CORE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
|
||||
#define DQ_XCM_CORE_SPQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
|
||||
|
@ -263,7 +255,7 @@
|
|||
#define DQ_TCM_ROCE_RQ_PROD_CMD \
|
||||
DQ_TCM_AGG_VAL_SEL_WORD0
|
||||
|
||||
/* XCM agg counter flag selection */
|
||||
/* XCM agg counter flag selection (HW) */
|
||||
#define DQ_XCM_AGG_FLG_SHIFT_BIT14 0
|
||||
#define DQ_XCM_AGG_FLG_SHIFT_BIT15 1
|
||||
#define DQ_XCM_AGG_FLG_SHIFT_CF12 2
|
||||
|
@ -273,7 +265,7 @@
|
|||
#define DQ_XCM_AGG_FLG_SHIFT_CF22 6
|
||||
#define DQ_XCM_AGG_FLG_SHIFT_CF23 7
|
||||
|
||||
/* XCM agg counter flag selection */
|
||||
/* XCM agg counter flag selection (FW) */
|
||||
#define DQ_XCM_CORE_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
|
||||
#define DQ_XCM_CORE_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
|
||||
#define DQ_XCM_CORE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
|
||||
|
@ -347,6 +339,7 @@
|
|||
#define DQ_PWM_OFFSET_UCM_RDMA_ARM_FLAGS (DQ_PWM_OFFSET_UCM_FLAGS)
|
||||
#define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 1)
|
||||
#define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 3)
|
||||
|
||||
#define DQ_REGION_SHIFT (12)
|
||||
|
||||
/* DPM */
|
||||
|
@ -359,29 +352,30 @@
|
|||
/* QM CONSTANTS */
|
||||
/*****************/
|
||||
|
||||
/* number of TX queues in the QM */
|
||||
/* Number of TX queues in the QM */
|
||||
#define MAX_QM_TX_QUEUES_K2 512
|
||||
#define MAX_QM_TX_QUEUES_BB 448
|
||||
#define MAX_QM_TX_QUEUES MAX_QM_TX_QUEUES_K2
|
||||
|
||||
/* number of Other queues in the QM */
|
||||
/* Number of Other queues in the QM */
|
||||
#define MAX_QM_OTHER_QUEUES_BB 64
|
||||
#define MAX_QM_OTHER_QUEUES_K2 128
|
||||
#define MAX_QM_OTHER_QUEUES MAX_QM_OTHER_QUEUES_K2
|
||||
|
||||
/* number of queues in a PF queue group */
|
||||
/* Number of queues in a PF queue group */
|
||||
#define QM_PF_QUEUE_GROUP_SIZE 8
|
||||
|
||||
/* the size of a single queue element in bytes */
|
||||
/* The size of a single queue element in bytes */
|
||||
#define QM_PQ_ELEMENT_SIZE 4
|
||||
|
||||
/* base number of Tx PQs in the CM PQ representation.
|
||||
* should be used when storing PQ IDs in CM PQ registers and context
|
||||
/* Base number of Tx PQs in the CM PQ representation.
|
||||
* Should be used when storing PQ IDs in CM PQ registers and context.
|
||||
*/
|
||||
#define CM_TX_PQ_BASE 0x200
|
||||
|
||||
/* number of global Vport/QCN rate limiters */
|
||||
/* Number of global Vport/QCN rate limiters */
|
||||
#define MAX_QM_GLOBAL_RLS 256
|
||||
|
||||
/* QM registers data */
|
||||
#define QM_LINE_CRD_REG_WIDTH 16
|
||||
#define QM_LINE_CRD_REG_SIGN_BIT BIT((QM_LINE_CRD_REG_WIDTH - 1))
|
||||
|
@ -400,7 +394,7 @@
|
|||
#define CAU_FSM_ETH_TX 1
|
||||
|
||||
/* Number of Protocol Indices per Status Block */
|
||||
#define PIS_PER_SB 12
|
||||
#define PIS_PER_SB_E4 12
|
||||
|
||||
#define CAU_HC_STOPPED_STATE 3
|
||||
#define CAU_HC_DISABLE_STATE 4
|
||||
|
@ -432,8 +426,7 @@
|
|||
|
||||
#define IGU_CMD_INT_ACK_BASE 0x0400
|
||||
#define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \
|
||||
MAX_TOT_SB_PER_PATH - \
|
||||
1)
|
||||
MAX_TOT_SB_PER_PATH - 1)
|
||||
#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff
|
||||
|
||||
#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0
|
||||
|
@ -447,8 +440,7 @@
|
|||
|
||||
#define IGU_CMD_PROD_UPD_BASE 0x0600
|
||||
#define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE +\
|
||||
MAX_TOT_SB_PER_PATH - \
|
||||
1)
|
||||
MAX_TOT_SB_PER_PATH - 1)
|
||||
#define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff
|
||||
|
||||
/*****************/
|
||||
|
@ -555,11 +547,6 @@
|
|||
/* VF BAR */
|
||||
#define PXP_VF_BAR0 0
|
||||
|
||||
#define PXP_VF_BAR0_START_GRC 0x3E00
|
||||
#define PXP_VF_BAR0_GRC_LENGTH 0x200
|
||||
#define PXP_VF_BAR0_END_GRC (PXP_VF_BAR0_START_GRC + \
|
||||
PXP_VF_BAR0_GRC_LENGTH - 1)
|
||||
|
||||
#define PXP_VF_BAR0_START_IGU 0
|
||||
#define PXP_VF_BAR0_IGU_LENGTH 0x3000
|
||||
#define PXP_VF_BAR0_END_IGU (PXP_VF_BAR0_START_IGU + \
|
||||
|
@ -577,44 +564,42 @@
|
|||
|
||||
#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200
|
||||
#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200
|
||||
#define PXP_VF_BAR0_END_TSDM_ZONE_B (PXP_VF_BAR0_START_TSDM_ZONE_B \
|
||||
+ \
|
||||
PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
|
||||
- 1)
|
||||
#define PXP_VF_BAR0_END_TSDM_ZONE_B (PXP_VF_BAR0_START_TSDM_ZONE_B + \
|
||||
PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
|
||||
|
||||
#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400
|
||||
#define PXP_VF_BAR0_END_MSDM_ZONE_B (PXP_VF_BAR0_START_MSDM_ZONE_B \
|
||||
+ \
|
||||
PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
|
||||
- 1)
|
||||
#define PXP_VF_BAR0_END_MSDM_ZONE_B (PXP_VF_BAR0_START_MSDM_ZONE_B + \
|
||||
PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
|
||||
|
||||
#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600
|
||||
#define PXP_VF_BAR0_END_USDM_ZONE_B (PXP_VF_BAR0_START_USDM_ZONE_B \
|
||||
+ \
|
||||
PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
|
||||
- 1)
|
||||
#define PXP_VF_BAR0_END_USDM_ZONE_B (PXP_VF_BAR0_START_USDM_ZONE_B + \
|
||||
PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
|
||||
|
||||
#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800
|
||||
#define PXP_VF_BAR0_END_XSDM_ZONE_B (PXP_VF_BAR0_START_XSDM_ZONE_B \
|
||||
+ \
|
||||
PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
|
||||
- 1)
|
||||
#define PXP_VF_BAR0_END_XSDM_ZONE_B (PXP_VF_BAR0_START_XSDM_ZONE_B + \
|
||||
PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
|
||||
|
||||
#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00
|
||||
#define PXP_VF_BAR0_END_YSDM_ZONE_B (PXP_VF_BAR0_START_YSDM_ZONE_B \
|
||||
+ \
|
||||
PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
|
||||
- 1)
|
||||
#define PXP_VF_BAR0_END_YSDM_ZONE_B (PXP_VF_BAR0_START_YSDM_ZONE_B + \
|
||||
PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
|
||||
|
||||
#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00
|
||||
#define PXP_VF_BAR0_END_PSDM_ZONE_B (PXP_VF_BAR0_START_PSDM_ZONE_B \
|
||||
+ \
|
||||
PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
|
||||
- 1)
|
||||
#define PXP_VF_BAR0_END_PSDM_ZONE_B (PXP_VF_BAR0_START_PSDM_ZONE_B + \
|
||||
PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
|
||||
|
||||
#define PXP_VF_BAR0_START_GRC 0x3E00
|
||||
#define PXP_VF_BAR0_GRC_LENGTH 0x200
|
||||
#define PXP_VF_BAR0_END_GRC (PXP_VF_BAR0_START_GRC + \
|
||||
PXP_VF_BAR0_GRC_LENGTH - 1)
|
||||
|
||||
#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000
|
||||
#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000
|
||||
|
||||
#define PXP_VF_BAR0_START_IGU2 0x10000
|
||||
#define PXP_VF_BAR0_IGU2_LENGTH 0xD000
|
||||
#define PXP_VF_BAR0_END_IGU2 (PXP_VF_BAR0_START_IGU2 + \
|
||||
PXP_VF_BAR0_IGU2_LENGTH - 1)
|
||||
|
||||
#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32
|
||||
|
||||
#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
|
||||
|
@ -624,11 +609,15 @@
|
|||
#define PXP_NUM_ILT_RECORDS_BB 7600
|
||||
#define PXP_NUM_ILT_RECORDS_K2 11000
|
||||
#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
|
||||
|
||||
/* Host Interface */
|
||||
#define PXP_QUEUES_ZONE_MAX_NUM 320
|
||||
|
||||
/*****************/
|
||||
/* PRM CONSTANTS */
|
||||
/*****************/
|
||||
#define PRM_DMA_PAD_BYTES_NUM 2
|
||||
|
||||
/*****************/
|
||||
/* SDMs CONSTANTS */
|
||||
/*****************/
|
||||
|
@ -656,7 +645,7 @@
|
|||
#define SDM_COMP_TYPE_INC_ORDER_CNT 9
|
||||
|
||||
/*****************/
|
||||
/* PBF Constants */
|
||||
/* PBF CONSTANTS */
|
||||
/*****************/
|
||||
|
||||
/* Number of PBF command queue lines. Each line is 32B. */
|
||||
|
@ -671,14 +660,7 @@
|
|||
|
||||
#define PRS_GFT_CAM_LINES_NO_MATCH 31
|
||||
|
||||
/* Async data KCQ CQE */
|
||||
struct async_data {
|
||||
__le32 cid;
|
||||
__le16 itid;
|
||||
u8 error_code;
|
||||
u8 fw_debug_param;
|
||||
};
|
||||
|
||||
/* Interrupt coalescing TimeSet */
|
||||
struct coalescing_timeset {
|
||||
u8 value;
|
||||
#define COALESCING_TIMESET_TIMESET_MASK 0x7F
|
||||
|
@ -692,23 +674,32 @@ struct common_queue_zone {
|
|||
__le16 reserved;
|
||||
};
|
||||
|
||||
/* ETH Rx producers data */
|
||||
struct eth_rx_prod_data {
|
||||
__le16 bd_prod;
|
||||
__le16 cqe_prod;
|
||||
};
|
||||
|
||||
struct regpair {
|
||||
__le32 lo;
|
||||
__le32 hi;
|
||||
struct tcp_ulp_connect_done_params {
|
||||
__le16 mss;
|
||||
u8 snd_wnd_scale;
|
||||
u8 flags;
|
||||
#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_MASK 0x1
|
||||
#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_SHIFT 0
|
||||
#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_MASK 0x7F
|
||||
#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_SHIFT 1
|
||||
};
|
||||
|
||||
struct vf_pf_channel_eqe_data {
|
||||
struct regpair msg_addr;
|
||||
struct iscsi_connect_done_results {
|
||||
__le16 icid;
|
||||
__le16 conn_id;
|
||||
struct tcp_ulp_connect_done_params params;
|
||||
};
|
||||
|
||||
struct iscsi_eqe_data {
|
||||
__le32 cid;
|
||||
__le16 icid;
|
||||
__le16 conn_id;
|
||||
__le16 reserved;
|
||||
u8 error_code;
|
||||
u8 error_pdu_opcode_reserved;
|
||||
#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_MASK 0x3F
|
||||
|
@ -719,52 +710,6 @@ struct iscsi_eqe_data {
|
|||
#define ISCSI_EQE_DATA_RESERVED0_SHIFT 7
|
||||
};
|
||||
|
||||
struct rdma_eqe_destroy_qp {
|
||||
__le32 cid;
|
||||
u8 reserved[4];
|
||||
};
|
||||
|
||||
union rdma_eqe_data {
|
||||
struct regpair async_handle;
|
||||
struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
|
||||
};
|
||||
|
||||
struct malicious_vf_eqe_data {
|
||||
u8 vf_id;
|
||||
u8 err_id;
|
||||
__le16 reserved[3];
|
||||
};
|
||||
|
||||
struct initial_cleanup_eqe_data {
|
||||
u8 vf_id;
|
||||
u8 reserved[7];
|
||||
};
|
||||
|
||||
/* Event Data Union */
|
||||
union event_ring_data {
|
||||
u8 bytes[8];
|
||||
struct vf_pf_channel_eqe_data vf_pf_channel;
|
||||
struct iscsi_eqe_data iscsi_info;
|
||||
union rdma_eqe_data rdma_data;
|
||||
struct malicious_vf_eqe_data malicious_vf;
|
||||
struct initial_cleanup_eqe_data vf_init_cleanup;
|
||||
};
|
||||
|
||||
/* Event Ring Entry */
|
||||
struct event_ring_entry {
|
||||
u8 protocol_id;
|
||||
u8 opcode;
|
||||
__le16 reserved0;
|
||||
__le16 echo;
|
||||
u8 fw_return_code;
|
||||
u8 flags;
|
||||
#define EVENT_RING_ENTRY_ASYNC_MASK 0x1
|
||||
#define EVENT_RING_ENTRY_ASYNC_SHIFT 0
|
||||
#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F
|
||||
#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
|
||||
union event_ring_data data;
|
||||
};
|
||||
|
||||
/* Multi function mode */
|
||||
enum mf_mode {
|
||||
ERROR_MODE /* Unsupported mode */,
|
||||
|
@ -781,13 +726,31 @@ enum protocol_type {
|
|||
PROTOCOLID_CORE,
|
||||
PROTOCOLID_ETH,
|
||||
PROTOCOLID_IWARP,
|
||||
PROTOCOLID_RESERVED5,
|
||||
PROTOCOLID_RESERVED0,
|
||||
PROTOCOLID_PREROCE,
|
||||
PROTOCOLID_COMMON,
|
||||
PROTOCOLID_RESERVED6,
|
||||
PROTOCOLID_RESERVED1,
|
||||
MAX_PROTOCOL_TYPE
|
||||
};
|
||||
|
||||
struct regpair {
|
||||
__le32 lo;
|
||||
__le32 hi;
|
||||
};
|
||||
|
||||
/* RoCE Destroy Event Data */
|
||||
struct rdma_eqe_destroy_qp {
|
||||
__le32 cid;
|
||||
u8 reserved[4];
|
||||
};
|
||||
|
||||
/* RDMA Event Data Union */
|
||||
union rdma_eqe_data {
|
||||
struct regpair async_handle;
|
||||
struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
|
||||
};
|
||||
|
||||
/* Ustorm Queue Zone */
|
||||
struct ustorm_eth_queue_zone {
|
||||
struct coalescing_timeset int_coalescing_timeset;
|
||||
u8 reserved[3];
|
||||
|
@ -798,9 +761,9 @@ struct ustorm_queue_zone {
|
|||
struct common_queue_zone common;
|
||||
};
|
||||
|
||||
/* status block structure */
|
||||
/* Status block structure */
|
||||
struct cau_pi_entry {
|
||||
u32 prod;
|
||||
__le32 prod;
|
||||
#define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF
|
||||
#define CAU_PI_ENTRY_PROD_VAL_SHIFT 0
|
||||
#define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F
|
||||
|
@ -811,16 +774,16 @@ struct cau_pi_entry {
|
|||
#define CAU_PI_ENTRY_RESERVED_SHIFT 24
|
||||
};
|
||||
|
||||
/* status block structure */
|
||||
/* Status block structure */
|
||||
struct cau_sb_entry {
|
||||
u32 data;
|
||||
__le32 data;
|
||||
#define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF
|
||||
#define CAU_SB_ENTRY_SB_PROD_SHIFT 0
|
||||
#define CAU_SB_ENTRY_STATE0_MASK 0xF
|
||||
#define CAU_SB_ENTRY_STATE0_SHIFT 24
|
||||
#define CAU_SB_ENTRY_STATE1_MASK 0xF
|
||||
#define CAU_SB_ENTRY_STATE1_SHIFT 28
|
||||
u32 params;
|
||||
__le32 params;
|
||||
#define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F
|
||||
#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0
|
||||
#define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F
|
||||
|
@ -839,7 +802,16 @@ struct cau_sb_entry {
|
|||
#define CAU_SB_ENTRY_TPH_SHIFT 31
|
||||
};
|
||||
|
||||
/* core doorbell data */
|
||||
/* Igu cleanup bit values to distinguish between clean or producer consumer
|
||||
* update.
|
||||
*/
|
||||
enum command_type_bit {
|
||||
IGU_COMMAND_TYPE_NOP = 0,
|
||||
IGU_COMMAND_TYPE_SET = 1,
|
||||
MAX_COMMAND_TYPE_BIT
|
||||
};
|
||||
|
||||
/* Core doorbell data */
|
||||
struct core_db_data {
|
||||
u8 params;
|
||||
#define CORE_DB_DATA_DEST_MASK 0x3
|
||||
|
@ -946,7 +918,7 @@ struct db_pwm_addr {
|
|||
#define DB_PWM_ADDR_RESERVED1_SHIFT 28
|
||||
};
|
||||
|
||||
/* Parameters to RoCE firmware, passed in EDPM doorbell */
|
||||
/* Parameters to RDMA firmware, passed in EDPM doorbell */
|
||||
struct db_rdma_dpm_params {
|
||||
__le32 params;
|
||||
#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F
|
||||
|
@ -969,7 +941,9 @@ struct db_rdma_dpm_params {
|
|||
#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31
|
||||
};
|
||||
|
||||
/* Structure for doorbell data, in ROCE DPM mode, for 1st db in a DPM burst */
|
||||
/* Structure for doorbell data, in RDMA DPM mode, for the first doorbell in a
|
||||
* DPM burst.
|
||||
*/
|
||||
struct db_rdma_dpm_data {
|
||||
__le16 icid;
|
||||
__le16 prod_val;
|
||||
|
@ -987,7 +961,7 @@ enum igu_int_cmd {
|
|||
|
||||
/* IGU producer or consumer update command */
|
||||
struct igu_prod_cons_update {
|
||||
u32 sb_id_and_flags;
|
||||
__le32 sb_id_and_flags;
|
||||
#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF
|
||||
#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0
|
||||
#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1
|
||||
|
@ -1002,7 +976,7 @@ struct igu_prod_cons_update {
|
|||
#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29
|
||||
#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1
|
||||
#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31
|
||||
u32 reserved1;
|
||||
__le32 reserved1;
|
||||
};
|
||||
|
||||
/* Igu segments access for default status block only */
|
||||
|
@ -1012,6 +986,30 @@ enum igu_seg_access {
|
|||
MAX_IGU_SEG_ACCESS
|
||||
};
|
||||
|
||||
/* Enumeration for L3 type field of parsing_and_err_flags.
|
||||
* L3Type: 0 - unknown (not ip), 1 - Ipv4, 2 - Ipv6
|
||||
* (This field can be filled according to the last-ethertype)
|
||||
*/
|
||||
enum l3_type {
|
||||
e_l3_type_unknown,
|
||||
e_l3_type_ipv4,
|
||||
e_l3_type_ipv6,
|
||||
MAX_L3_TYPE
|
||||
};
|
||||
|
||||
/* Enumeration for l4Protocol field of parsing_and_err_flags.
|
||||
* L4-protocol: 0 - none, 1 - TCP, 2 - UDP.
|
||||
* If the packet is IPv4 fragment, and its not the first fragment, the
|
||||
* protocol-type should be set to none.
|
||||
*/
|
||||
enum l4_protocol {
|
||||
e_l4_protocol_none,
|
||||
e_l4_protocol_tcp,
|
||||
e_l4_protocol_udp,
|
||||
MAX_L4_PROTOCOL
|
||||
};
|
||||
|
||||
/* Parsing and error flags field */
|
||||
struct parsing_and_err_flags {
|
||||
__le16 flags;
|
||||
#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3
|
||||
|
@ -1044,6 +1042,7 @@ struct parsing_and_err_flags {
|
|||
#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15
|
||||
};
|
||||
|
||||
/* Parsing error flags bitmap */
|
||||
struct parsing_err_flags {
|
||||
__le16 flags;
|
||||
#define PARSING_ERR_FLAGS_MAC_ERROR_MASK 0x1
|
||||
|
@ -1080,10 +1079,12 @@ struct parsing_err_flags {
|
|||
#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_SHIFT 15
|
||||
};
|
||||
|
||||
/* Pb context */
|
||||
struct pb_context {
|
||||
__le32 crc[4];
|
||||
};
|
||||
|
||||
/* Concrete Function ID */
|
||||
struct pxp_concrete_fid {
|
||||
__le16 fid;
|
||||
#define PXP_CONCRETE_FID_PFID_MASK 0xF
|
||||
|
@ -1098,6 +1099,7 @@ struct pxp_concrete_fid {
|
|||
#define PXP_CONCRETE_FID_VFID_SHIFT 8
|
||||
};
|
||||
|
||||
/* Concrete Function ID */
|
||||
struct pxp_pretend_concrete_fid {
|
||||
__le16 fid;
|
||||
#define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF
|
||||
|
@ -1110,12 +1112,13 @@ struct pxp_pretend_concrete_fid {
|
|||
#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8
|
||||
};
|
||||
|
||||
/* Function ID */
|
||||
union pxp_pretend_fid {
|
||||
struct pxp_pretend_concrete_fid concrete_fid;
|
||||
__le16 opaque_fid;
|
||||
};
|
||||
|
||||
/* Pxp Pretend Command Register. */
|
||||
/* Pxp Pretend Command Register */
|
||||
struct pxp_pretend_cmd {
|
||||
union pxp_pretend_fid fid;
|
||||
__le16 control;
|
||||
|
@ -1139,7 +1142,7 @@ struct pxp_pretend_cmd {
|
|||
#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15
|
||||
};
|
||||
|
||||
/* PTT Record in PXP Admin Window. */
|
||||
/* PTT Record in PXP Admin Window */
|
||||
struct pxp_ptt_entry {
|
||||
__le32 offset;
|
||||
#define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF
|
||||
|
@ -1149,7 +1152,7 @@ struct pxp_ptt_entry {
|
|||
struct pxp_pretend_cmd pretend;
|
||||
};
|
||||
|
||||
/* VF Zone A Permission Register. */
|
||||
/* VF Zone A Permission Register */
|
||||
struct pxp_vf_zone_a_permission {
|
||||
__le32 control;
|
||||
#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK 0xFF
|
||||
|
@ -1162,184 +1165,174 @@ struct pxp_vf_zone_a_permission {
|
|||
#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16
|
||||
};
|
||||
|
||||
/* RSS hash type */
|
||||
/* Rdif context */
|
||||
struct rdif_task_context {
|
||||
__le32 initial_ref_tag;
|
||||
__le16 app_tag_value;
|
||||
__le16 app_tag_mask;
|
||||
u8 flags0;
|
||||
#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0
|
||||
#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1
|
||||
#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2
|
||||
#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3
|
||||
#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3
|
||||
#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4
|
||||
#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0
|
||||
#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1
|
||||
#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2
|
||||
#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3
|
||||
#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3
|
||||
#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4
|
||||
#define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
|
||||
#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 7
|
||||
#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 7
|
||||
u8 partial_dif_data[7];
|
||||
__le16 partial_crc_value;
|
||||
__le16 partial_checksum_value;
|
||||
__le32 offset_in_io;
|
||||
__le16 flags1;
|
||||
#define RDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0
|
||||
#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1
|
||||
#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2
|
||||
#define RDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3
|
||||
#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4
|
||||
#define RDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5
|
||||
#define RDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7
|
||||
#define RDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6
|
||||
#define RDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3
|
||||
#define RDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9
|
||||
#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11
|
||||
#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0
|
||||
#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1
|
||||
#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2
|
||||
#define RDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3
|
||||
#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4
|
||||
#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5
|
||||
#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7
|
||||
#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6
|
||||
#define RDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3
|
||||
#define RDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9
|
||||
#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11
|
||||
#define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12
|
||||
#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13
|
||||
#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 14
|
||||
#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 15
|
||||
#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13
|
||||
#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 14
|
||||
#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 15
|
||||
__le16 state;
|
||||
#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_MASK 0xF
|
||||
#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_SHIFT 0
|
||||
#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_MASK 0xF
|
||||
#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_SHIFT 4
|
||||
#define RDIF_TASK_CONTEXT_ERRORINIO_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_ERRORINIO_SHIFT 8
|
||||
#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9
|
||||
#define RDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF
|
||||
#define RDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 10
|
||||
#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_MASK 0xF
|
||||
#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_SHIFT 0
|
||||
#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_MASK 0xF
|
||||
#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_SHIFT 4
|
||||
#define RDIF_TASK_CONTEXT_ERROR_IN_IO_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_ERROR_IN_IO_SHIFT 8
|
||||
#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_MASK 0x1
|
||||
#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_SHIFT 9
|
||||
#define RDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF
|
||||
#define RDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 10
|
||||
#define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3
|
||||
#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14
|
||||
__le32 reserved2;
|
||||
};
|
||||
|
||||
/* RSS hash type */
|
||||
enum rss_hash_type {
|
||||
RSS_HASH_TYPE_DEFAULT = 0,
|
||||
RSS_HASH_TYPE_IPV4 = 1,
|
||||
RSS_HASH_TYPE_TCP_IPV4 = 2,
|
||||
RSS_HASH_TYPE_IPV6 = 3,
|
||||
RSS_HASH_TYPE_TCP_IPV6 = 4,
|
||||
RSS_HASH_TYPE_UDP_IPV4 = 5,
|
||||
RSS_HASH_TYPE_UDP_IPV6 = 6,
|
||||
MAX_RSS_HASH_TYPE
|
||||
};
|
||||
|
||||
/* status block structure */
|
||||
struct status_block {
|
||||
__le16 pi_array[PIS_PER_SB];
|
||||
/* Status block structure */
|
||||
struct status_block_e4 {
|
||||
__le16 pi_array[PIS_PER_SB_E4];
|
||||
__le32 sb_num;
|
||||
#define STATUS_BLOCK_SB_NUM_MASK 0x1FF
|
||||
#define STATUS_BLOCK_SB_NUM_SHIFT 0
|
||||
#define STATUS_BLOCK_ZERO_PAD_MASK 0x7F
|
||||
#define STATUS_BLOCK_ZERO_PAD_SHIFT 9
|
||||
#define STATUS_BLOCK_ZERO_PAD2_MASK 0xFFFF
|
||||
#define STATUS_BLOCK_ZERO_PAD2_SHIFT 16
|
||||
#define STATUS_BLOCK_E4_SB_NUM_MASK 0x1FF
|
||||
#define STATUS_BLOCK_E4_SB_NUM_SHIFT 0
|
||||
#define STATUS_BLOCK_E4_ZERO_PAD_MASK 0x7F
|
||||
#define STATUS_BLOCK_E4_ZERO_PAD_SHIFT 9
|
||||
#define STATUS_BLOCK_E4_ZERO_PAD2_MASK 0xFFFF
|
||||
#define STATUS_BLOCK_E4_ZERO_PAD2_SHIFT 16
|
||||
__le32 prod_index;
|
||||
#define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF
|
||||
#define STATUS_BLOCK_PROD_INDEX_SHIFT 0
|
||||
#define STATUS_BLOCK_ZERO_PAD3_MASK 0xFF
|
||||
#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24
|
||||
#define STATUS_BLOCK_E4_PROD_INDEX_MASK 0xFFFFFF
|
||||
#define STATUS_BLOCK_E4_PROD_INDEX_SHIFT 0
|
||||
#define STATUS_BLOCK_E4_ZERO_PAD3_MASK 0xFF
|
||||
#define STATUS_BLOCK_E4_ZERO_PAD3_SHIFT 24
|
||||
};
|
||||
|
||||
/* Tdif context */
|
||||
struct tdif_task_context {
|
||||
__le32 initial_ref_tag;
|
||||
__le16 app_tag_value;
|
||||
__le16 app_tag_mask;
|
||||
__le16 partial_crc_valueB;
|
||||
__le16 partial_checksum_valueB;
|
||||
__le16 partial_crc_value_b;
|
||||
__le16 partial_checksum_value_b;
|
||||
__le16 stateB;
|
||||
#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_MASK 0xF
|
||||
#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_SHIFT 0
|
||||
#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_MASK 0xF
|
||||
#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_SHIFT 4
|
||||
#define TDIF_TASK_CONTEXT_ERRORINIOB_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_ERRORINIOB_SHIFT 8
|
||||
#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9
|
||||
#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_MASK 0xF
|
||||
#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_SHIFT 0
|
||||
#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_MASK 0xF
|
||||
#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_SHIFT 4
|
||||
#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_SHIFT 8
|
||||
#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_SHIFT 9
|
||||
#define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F
|
||||
#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10
|
||||
u8 reserved1;
|
||||
u8 flags0;
|
||||
#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0
|
||||
#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1
|
||||
#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2
|
||||
#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3
|
||||
#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3
|
||||
#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4
|
||||
#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0
|
||||
#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1
|
||||
#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2
|
||||
#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3
|
||||
#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3
|
||||
#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4
|
||||
#define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
|
||||
#define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7
|
||||
__le32 flags1;
|
||||
#define TDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0
|
||||
#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1
|
||||
#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2
|
||||
#define TDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3
|
||||
#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4
|
||||
#define TDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5
|
||||
#define TDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7
|
||||
#define TDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6
|
||||
#define TDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3
|
||||
#define TDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9
|
||||
#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11
|
||||
#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0
|
||||
#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1
|
||||
#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2
|
||||
#define TDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3
|
||||
#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4
|
||||
#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5
|
||||
#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7
|
||||
#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6
|
||||
#define TDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3
|
||||
#define TDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9
|
||||
#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11
|
||||
#define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12
|
||||
#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13
|
||||
#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_MASK 0xF
|
||||
#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_SHIFT 14
|
||||
#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_MASK 0xF
|
||||
#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_SHIFT 18
|
||||
#define TDIF_TASK_CONTEXT_ERRORINIOA_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_ERRORINIOA_SHIFT 22
|
||||
#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_SHIFT 23
|
||||
#define TDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF
|
||||
#define TDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 24
|
||||
#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 28
|
||||
#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 29
|
||||
#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 30
|
||||
#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13
|
||||
#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_MASK 0xF
|
||||
#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_SHIFT 14
|
||||
#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_MASK 0xF
|
||||
#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_SHIFT 18
|
||||
#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_SHIFT 22
|
||||
#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_SHIFT 23
|
||||
#define TDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF
|
||||
#define TDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 24
|
||||
#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 28
|
||||
#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 29
|
||||
#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 30
|
||||
#define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1
|
||||
#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31
|
||||
__le32 offset_in_iob;
|
||||
__le32 offset_in_io_b;
|
||||
__le16 partial_crc_value_a;
|
||||
__le16 partial_checksum_valuea_;
|
||||
__le32 offset_in_ioa;
|
||||
__le16 partial_checksum_value_a;
|
||||
__le32 offset_in_io_a;
|
||||
u8 partial_dif_data_a[8];
|
||||
u8 partial_dif_data_b[8];
|
||||
};
|
||||
|
||||
/* Timers context */
|
||||
struct timers_context {
|
||||
__le32 logical_client_0;
|
||||
#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0x7FFFFFF
|
||||
|
@ -1385,6 +1378,7 @@ struct timers_context {
|
|||
#define TIMERS_CONTEXT_RESERVED7_SHIFT 29
|
||||
};
|
||||
|
||||
/* Enum for next_protocol field of tunnel_parsing_flags / tunnelTypeDesc */
|
||||
enum tunnel_next_protocol {
|
||||
e_unknown = 0,
|
||||
e_l2 = 1,
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
/********************/
|
||||
/* ETH FW CONSTANTS */
|
||||
/********************/
|
||||
|
||||
#define ETH_HSI_VER_MAJOR 3
|
||||
#define ETH_HSI_VER_MINOR 10
|
||||
|
||||
|
@ -78,16 +79,16 @@
|
|||
#define ETH_RX_MAX_BUFF_PER_PKT 5
|
||||
#define ETH_RX_BD_THRESHOLD 12
|
||||
|
||||
/* num of MAC/VLAN filters */
|
||||
/* Num of MAC/VLAN filters */
|
||||
#define ETH_NUM_MAC_FILTERS 512
|
||||
#define ETH_NUM_VLAN_FILTERS 512
|
||||
|
||||
/* approx. multicast constants */
|
||||
/* Approx. multicast constants */
|
||||
#define ETH_MULTICAST_BIN_FROM_MAC_SEED 0
|
||||
#define ETH_MULTICAST_MAC_BINS 256
|
||||
#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32)
|
||||
|
||||
/* ethernet vport update constants */
|
||||
/* Ethernet vport update constants */
|
||||
#define ETH_FILTER_RULES_COUNT 10
|
||||
#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128
|
||||
#define ETH_RSS_KEY_SIZE_REGS 10
|
||||
|
@ -103,6 +104,27 @@
|
|||
/* Control frame check constants */
|
||||
#define ETH_CTL_FRAME_ETH_TYPE_NUM 4
|
||||
|
||||
/* GFS constants */
|
||||
#define ETH_GFT_TRASH_CAN_VPORT 0x1FF
|
||||
|
||||
/* Destination port mode */
|
||||
enum dest_port_mode {
|
||||
DEST_PORT_PHY,
|
||||
DEST_PORT_LOOPBACK,
|
||||
DEST_PORT_PHY_LOOPBACK,
|
||||
DEST_PORT_DROP,
|
||||
MAX_DEST_PORT_MODE
|
||||
};
|
||||
|
||||
/* Ethernet address type */
|
||||
enum eth_addr_type {
|
||||
BROADCAST_ADDRESS,
|
||||
MULTICAST_ADDRESS,
|
||||
UNICAST_ADDRESS,
|
||||
UNKNOWN_ADDRESS,
|
||||
MAX_ETH_ADDR_TYPE
|
||||
};
|
||||
|
||||
struct eth_tx_1st_bd_flags {
|
||||
u8 bitfields;
|
||||
#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1
|
||||
|
@ -123,7 +145,7 @@ struct eth_tx_1st_bd_flags {
|
|||
#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7
|
||||
};
|
||||
|
||||
/* The parsing information data fo rthe first tx bd of a given packet. */
|
||||
/* The parsing information data fo rthe first tx bd of a given packet */
|
||||
struct eth_tx_data_1st_bd {
|
||||
__le16 vlan;
|
||||
u8 nbds;
|
||||
|
@ -137,7 +159,7 @@ struct eth_tx_data_1st_bd {
|
|||
#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2
|
||||
};
|
||||
|
||||
/* The parsing information data for the second tx bd of a given packet. */
|
||||
/* The parsing information data for the second tx bd of a given packet */
|
||||
struct eth_tx_data_2nd_bd {
|
||||
__le16 tunn_ip_size;
|
||||
__le16 bitfields1;
|
||||
|
@ -168,18 +190,14 @@ struct eth_tx_data_2nd_bd {
|
|||
#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
|
||||
};
|
||||
|
||||
/* Firmware data for L2-EDPM packet. */
|
||||
/* Firmware data for L2-EDPM packet */
|
||||
struct eth_edpm_fw_data {
|
||||
struct eth_tx_data_1st_bd data_1st_bd;
|
||||
struct eth_tx_data_2nd_bd data_2nd_bd;
|
||||
__le32 reserved;
|
||||
};
|
||||
|
||||
struct eth_fast_path_cqe_fw_debug {
|
||||
__le16 reserved2;
|
||||
};
|
||||
|
||||
/* tunneling parsing flags */
|
||||
/* Tunneling parsing flags */
|
||||
struct eth_tunnel_parsing_flags {
|
||||
u8 flags;
|
||||
#define ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3
|
||||
|
@ -207,7 +225,7 @@ struct eth_pmd_flow_flags {
|
|||
#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2
|
||||
};
|
||||
|
||||
/* Regular ETH Rx FP CQE. */
|
||||
/* Regular ETH Rx FP CQE */
|
||||
struct eth_fast_path_rx_reg_cqe {
|
||||
u8 type;
|
||||
u8 bitfields;
|
||||
|
@ -225,13 +243,13 @@ struct eth_fast_path_rx_reg_cqe {
|
|||
u8 placement_offset;
|
||||
struct eth_tunnel_parsing_flags tunnel_pars_flags;
|
||||
u8 bd_num;
|
||||
u8 reserved[9];
|
||||
struct eth_fast_path_cqe_fw_debug fw_debug;
|
||||
u8 reserved1[3];
|
||||
u8 reserved;
|
||||
__le16 flow_id;
|
||||
u8 reserved1[11];
|
||||
struct eth_pmd_flow_flags pmd_flags;
|
||||
};
|
||||
|
||||
/* TPA-continue ETH Rx FP CQE. */
|
||||
/* TPA-continue ETH Rx FP CQE */
|
||||
struct eth_fast_path_rx_tpa_cont_cqe {
|
||||
u8 type;
|
||||
u8 tpa_agg_index;
|
||||
|
@ -243,7 +261,7 @@ struct eth_fast_path_rx_tpa_cont_cqe {
|
|||
struct eth_pmd_flow_flags pmd_flags;
|
||||
};
|
||||
|
||||
/* TPA-end ETH Rx FP CQE. */
|
||||
/* TPA-end ETH Rx FP CQE */
|
||||
struct eth_fast_path_rx_tpa_end_cqe {
|
||||
u8 type;
|
||||
u8 tpa_agg_index;
|
||||
|
@ -259,7 +277,7 @@ struct eth_fast_path_rx_tpa_end_cqe {
|
|||
struct eth_pmd_flow_flags pmd_flags;
|
||||
};
|
||||
|
||||
/* TPA-start ETH Rx FP CQE. */
|
||||
/* TPA-start ETH Rx FP CQE */
|
||||
struct eth_fast_path_rx_tpa_start_cqe {
|
||||
u8 type;
|
||||
u8 bitfields;
|
||||
|
@ -279,7 +297,7 @@ struct eth_fast_path_rx_tpa_start_cqe {
|
|||
u8 tpa_agg_index;
|
||||
u8 header_len;
|
||||
__le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
|
||||
struct eth_fast_path_cqe_fw_debug fw_debug;
|
||||
__le16 flow_id;
|
||||
u8 reserved;
|
||||
struct eth_pmd_flow_flags pmd_flags;
|
||||
};
|
||||
|
@ -295,7 +313,7 @@ struct eth_rx_bd {
|
|||
struct regpair addr;
|
||||
};
|
||||
|
||||
/* regular ETH Rx SP CQE */
|
||||
/* Regular ETH Rx SP CQE */
|
||||
struct eth_slow_path_rx_cqe {
|
||||
u8 type;
|
||||
u8 ramrod_cmd_id;
|
||||
|
@ -306,7 +324,7 @@ struct eth_slow_path_rx_cqe {
|
|||
struct eth_pmd_flow_flags pmd_flags;
|
||||
};
|
||||
|
||||
/* union for all ETH Rx CQE types */
|
||||
/* Union for all ETH Rx CQE types */
|
||||
union eth_rx_cqe {
|
||||
struct eth_fast_path_rx_reg_cqe fast_path_regular;
|
||||
struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start;
|
||||
|
@ -366,7 +384,7 @@ struct eth_tx_2nd_bd {
|
|||
struct eth_tx_data_2nd_bd data;
|
||||
};
|
||||
|
||||
/* The parsing information data for the third tx bd of a given packet. */
|
||||
/* The parsing information data for the third tx bd of a given packet */
|
||||
struct eth_tx_data_3rd_bd {
|
||||
__le16 lso_mss;
|
||||
__le16 bitfields;
|
||||
|
@ -389,7 +407,7 @@ struct eth_tx_3rd_bd {
|
|||
struct eth_tx_data_3rd_bd data;
|
||||
};
|
||||
|
||||
/* Complementary information for the regular tx bd of a given packet. */
|
||||
/* Complementary information for the regular tx bd of a given packet */
|
||||
struct eth_tx_data_bd {
|
||||
__le16 reserved0;
|
||||
__le16 bitfields;
|
||||
|
@ -448,4 +466,16 @@ struct eth_db_data {
|
|||
__le16 bd_prod;
|
||||
};
|
||||
|
||||
/* RSS hash type */
|
||||
enum rss_hash_type {
|
||||
RSS_HASH_TYPE_DEFAULT = 0,
|
||||
RSS_HASH_TYPE_IPV4 = 1,
|
||||
RSS_HASH_TYPE_TCP_IPV4 = 2,
|
||||
RSS_HASH_TYPE_IPV6 = 3,
|
||||
RSS_HASH_TYPE_TCP_IPV6 = 4,
|
||||
RSS_HASH_TYPE_UDP_IPV4 = 5,
|
||||
RSS_HASH_TYPE_UDP_IPV6 = 6,
|
||||
MAX_RSS_HASH_TYPE
|
||||
};
|
||||
|
||||
#endif /* __ETH_COMMON__ */
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -29,9 +29,12 @@
|
|||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __IWARP_COMMON__
|
||||
#define __IWARP_COMMON__
|
||||
|
||||
#include <linux/qed/rdma_common.h>
|
||||
|
||||
/************************/
|
||||
/* IWARP FW CONSTANTS */
|
||||
/************************/
|
||||
|
|
|
@ -61,6 +61,35 @@ struct qed_txq_start_ret_params {
|
|||
void *p_handle;
|
||||
};
|
||||
|
||||
enum qed_filter_config_mode {
|
||||
QED_FILTER_CONFIG_MODE_DISABLE,
|
||||
QED_FILTER_CONFIG_MODE_5_TUPLE,
|
||||
QED_FILTER_CONFIG_MODE_L4_PORT,
|
||||
QED_FILTER_CONFIG_MODE_IP_DEST,
|
||||
};
|
||||
|
||||
struct qed_ntuple_filter_params {
|
||||
/* Physically mapped address containing header of buffer to be used
|
||||
* as filter.
|
||||
*/
|
||||
dma_addr_t addr;
|
||||
|
||||
/* Length of header in bytes */
|
||||
u16 length;
|
||||
|
||||
/* Relative queue-id to receive classified packet */
|
||||
#define QED_RFS_NTUPLE_QID_RSS ((u16)-1)
|
||||
u16 qid;
|
||||
|
||||
/* Identifier can either be according to vport-id or vfid */
|
||||
bool b_is_vf;
|
||||
u8 vport_id;
|
||||
u8 vf_id;
|
||||
|
||||
/* true iff this filter is to be added. Else to be removed */
|
||||
bool b_is_add;
|
||||
};
|
||||
|
||||
struct qed_dev_eth_info {
|
||||
struct qed_dev_info common;
|
||||
|
||||
|
@ -316,13 +345,12 @@ struct qed_eth_ops {
|
|||
int (*tunn_config)(struct qed_dev *cdev,
|
||||
struct qed_tunn_params *params);
|
||||
|
||||
int (*ntuple_filter_config)(struct qed_dev *cdev, void *cookie,
|
||||
dma_addr_t mapping, u16 length,
|
||||
u16 vport_id, u16 rx_queue_id,
|
||||
bool add_filter);
|
||||
int (*ntuple_filter_config)(struct qed_dev *cdev,
|
||||
void *cookie,
|
||||
struct qed_ntuple_filter_params *params);
|
||||
|
||||
int (*configure_arfs_searcher)(struct qed_dev *cdev,
|
||||
bool en_searcher);
|
||||
enum qed_filter_config_mode mode);
|
||||
int (*get_coalesce)(struct qed_dev *cdev, u16 *coal, void *handle);
|
||||
};
|
||||
|
||||
|
|
|
@ -244,16 +244,11 @@ struct qed_fcoe_pf_params {
|
|||
/* Most of the the parameters below are described in the FW iSCSI / TCP HSI */
|
||||
struct qed_iscsi_pf_params {
|
||||
u64 glbl_q_params_addr;
|
||||
u64 bdq_pbl_base_addr[2];
|
||||
u32 max_cwnd;
|
||||
u64 bdq_pbl_base_addr[3];
|
||||
u16 cq_num_entries;
|
||||
u16 cmdq_num_entries;
|
||||
u32 two_msl_timer;
|
||||
u16 dup_ack_threshold;
|
||||
u16 tx_sws_timer;
|
||||
u16 min_rto;
|
||||
u16 min_rto_rt;
|
||||
u16 max_rto;
|
||||
|
||||
/* The following parameters are used during HW-init
|
||||
* and these parameters need to be passed as arguments
|
||||
|
@ -264,8 +259,8 @@ struct qed_iscsi_pf_params {
|
|||
|
||||
/* The following parameters are used during protocol-init */
|
||||
u16 half_way_close_timeout;
|
||||
u16 bdq_xoff_threshold[2];
|
||||
u16 bdq_xon_threshold[2];
|
||||
u16 bdq_xoff_threshold[3];
|
||||
u16 bdq_xon_threshold[3];
|
||||
u16 cmdq_xoff_threshold;
|
||||
u16 cmdq_xon_threshold;
|
||||
u16 rq_buffer_size;
|
||||
|
@ -281,10 +276,11 @@ struct qed_iscsi_pf_params {
|
|||
u8 gl_cmd_pi;
|
||||
u8 debug_mode;
|
||||
u8 ll2_ooo_queue_id;
|
||||
u8 ooo_enable;
|
||||
|
||||
u8 is_target;
|
||||
u8 bdq_pbl_num_entries[2];
|
||||
u8 is_soc_en;
|
||||
u8 soc_num_of_blocks_log;
|
||||
u8 bdq_pbl_num_entries[3];
|
||||
};
|
||||
|
||||
struct qed_rdma_pf_params {
|
||||
|
@ -316,7 +312,7 @@ enum qed_int_mode {
|
|||
};
|
||||
|
||||
struct qed_sb_info {
|
||||
struct status_block *sb_virt;
|
||||
struct status_block_e4 *sb_virt;
|
||||
dma_addr_t sb_phys;
|
||||
u32 sb_ack; /* Last given ack */
|
||||
u16 igu_sb_id;
|
||||
|
@ -939,7 +935,7 @@ static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
|
|||
u16 rc = 0;
|
||||
|
||||
prod = le32_to_cpu(sb_info->sb_virt->prod_index) &
|
||||
STATUS_BLOCK_PROD_INDEX_MASK;
|
||||
STATUS_BLOCK_E4_PROD_INDEX_MASK;
|
||||
if (sb_info->sb_ack != prod) {
|
||||
sb_info->sb_ack = prod;
|
||||
rc |= QED_SB_IDX;
|
||||
|
|
|
@ -102,7 +102,6 @@ struct qed_iscsi_params_offload {
|
|||
u32 ss_thresh;
|
||||
u16 srtt;
|
||||
u16 rtt_var;
|
||||
u32 ts_time;
|
||||
u32 ts_recent;
|
||||
u32 ts_recent_age;
|
||||
u32 total_rt;
|
||||
|
@ -124,7 +123,6 @@ struct qed_iscsi_params_offload {
|
|||
u16 mss;
|
||||
u8 snd_wnd_scale;
|
||||
u8 rcv_wnd_scale;
|
||||
u32 ts_ticks_per_second;
|
||||
u16 da_timeout_value;
|
||||
u8 ack_frequency;
|
||||
};
|
||||
|
|
|
@ -116,7 +116,7 @@ struct qed_ll2_comp_rx_data {
|
|||
u32 opaque_data_1;
|
||||
|
||||
/* GSI only */
|
||||
u32 gid_dst[4];
|
||||
u32 src_qp;
|
||||
u16 qp_id;
|
||||
|
||||
union {
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
|
||||
#ifndef __RDMA_COMMON__
|
||||
#define __RDMA_COMMON__
|
||||
|
||||
/************************/
|
||||
/* RDMA FW CONSTANTS */
|
||||
/************************/
|
||||
|
|
|
@ -33,6 +33,10 @@
|
|||
#ifndef __ROCE_COMMON__
|
||||
#define __ROCE_COMMON__
|
||||
|
||||
/************************/
|
||||
/* ROCE FW CONSTANTS */
|
||||
/************************/
|
||||
|
||||
#define ROCE_REQ_MAX_INLINE_DATA_SIZE (256)
|
||||
#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288)
|
||||
|
||||
|
@ -40,6 +44,7 @@
|
|||
#define ROCE_DCQCN_NP_MAX_QPS (64)
|
||||
#define ROCE_DCQCN_RP_MAX_QPS (64)
|
||||
|
||||
/* Affiliated asynchronous events / errors enumeration */
|
||||
enum roce_async_events_type {
|
||||
ROCE_ASYNC_EVENT_NONE = 0,
|
||||
ROCE_ASYNC_EVENT_COMM_EST = 1,
|
||||
|
|
|
@ -33,43 +33,77 @@
|
|||
#ifndef __STORAGE_COMMON__
|
||||
#define __STORAGE_COMMON__
|
||||
|
||||
#define NUM_OF_CMDQS_CQS (NUM_OF_GLOBAL_QUEUES / 2)
|
||||
/*********************/
|
||||
/* SCSI CONSTANTS */
|
||||
/*********************/
|
||||
|
||||
#define SCSI_MAX_NUM_OF_CMDQS (NUM_OF_GLOBAL_QUEUES / 2)
|
||||
#define BDQ_NUM_RESOURCES (4)
|
||||
|
||||
#define BDQ_ID_RQ (0)
|
||||
#define BDQ_ID_IMM_DATA (1)
|
||||
#define BDQ_NUM_IDS (2)
|
||||
#define BDQ_ID_TQ (2)
|
||||
#define BDQ_NUM_IDS (3)
|
||||
|
||||
#define SCSI_NUM_SGES_SLOW_SGL_THR 8
|
||||
|
||||
#define BDQ_MAX_EXTERNAL_RING_SIZE (1 << 15)
|
||||
#define BDQ_MAX_EXTERNAL_RING_SIZE BIT(15)
|
||||
|
||||
struct scsi_bd {
|
||||
struct regpair address;
|
||||
struct regpair opaque;
|
||||
/* SCSI op codes */
|
||||
#define SCSI_OPCODE_COMPARE_AND_WRITE (0x89)
|
||||
#define SCSI_OPCODE_READ_10 (0x28)
|
||||
#define SCSI_OPCODE_WRITE_6 (0x0A)
|
||||
#define SCSI_OPCODE_WRITE_10 (0x2A)
|
||||
#define SCSI_OPCODE_WRITE_12 (0xAA)
|
||||
#define SCSI_OPCODE_WRITE_16 (0x8A)
|
||||
#define SCSI_OPCODE_WRITE_AND_VERIFY_10 (0x2E)
|
||||
#define SCSI_OPCODE_WRITE_AND_VERIFY_12 (0xAE)
|
||||
#define SCSI_OPCODE_WRITE_AND_VERIFY_16 (0x8E)
|
||||
|
||||
/* iSCSI Drv opaque */
|
||||
struct iscsi_drv_opaque {
|
||||
__le16 reserved_zero[3];
|
||||
__le16 opaque;
|
||||
};
|
||||
|
||||
/* Scsi 2B/8B opaque union */
|
||||
union scsi_opaque {
|
||||
struct regpair fcoe_opaque;
|
||||
struct iscsi_drv_opaque iscsi_opaque;
|
||||
};
|
||||
|
||||
/* SCSI buffer descriptor */
|
||||
struct scsi_bd {
|
||||
struct regpair address;
|
||||
union scsi_opaque opaque;
|
||||
};
|
||||
|
||||
/* Scsi Drv BDQ struct */
|
||||
struct scsi_bdq_ram_drv_data {
|
||||
__le16 external_producer;
|
||||
__le16 reserved0[3];
|
||||
};
|
||||
|
||||
/* SCSI SGE entry */
|
||||
struct scsi_sge {
|
||||
struct regpair sge_addr;
|
||||
__le32 sge_len;
|
||||
__le32 reserved;
|
||||
};
|
||||
|
||||
/* Cached SGEs section */
|
||||
struct scsi_cached_sges {
|
||||
struct scsi_sge sge[4];
|
||||
};
|
||||
|
||||
/* Scsi Drv CMDQ struct */
|
||||
struct scsi_drv_cmdq {
|
||||
__le16 cmdq_cons;
|
||||
__le16 reserved0;
|
||||
__le32 reserved1;
|
||||
};
|
||||
|
||||
/* Common SCSI init params passed by driver to FW in function init ramrod */
|
||||
struct scsi_init_func_params {
|
||||
__le16 num_tasks;
|
||||
u8 log_page_size;
|
||||
|
@ -77,6 +111,7 @@ struct scsi_init_func_params {
|
|||
u8 reserved2[12];
|
||||
};
|
||||
|
||||
/* SCSI RQ/CQ/CMDQ firmware function init parameters */
|
||||
struct scsi_init_func_queues {
|
||||
struct regpair glbl_q_params_addr;
|
||||
__le16 rq_buffer_size;
|
||||
|
@ -90,33 +125,39 @@ struct scsi_init_func_queues {
|
|||
#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1
|
||||
#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK 0x1
|
||||
#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT 2
|
||||
#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_MASK 0x1F
|
||||
#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_SHIFT 3
|
||||
#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_MASK 0x1
|
||||
#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_SHIFT 3
|
||||
#define SCSI_INIT_FUNC_QUEUES_SOC_EN_MASK 0x1
|
||||
#define SCSI_INIT_FUNC_QUEUES_SOC_EN_SHIFT 4
|
||||
#define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_MASK 0x7
|
||||
#define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_SHIFT 5
|
||||
__le16 cq_cmdq_sb_num_arr[SCSI_MAX_NUM_OF_CMDQS];
|
||||
u8 num_queues;
|
||||
u8 queue_relative_offset;
|
||||
u8 cq_sb_pi;
|
||||
u8 cmdq_sb_pi;
|
||||
__le16 cq_cmdq_sb_num_arr[NUM_OF_CMDQS_CQS];
|
||||
__le16 reserved0;
|
||||
u8 bdq_pbl_num_entries[BDQ_NUM_IDS];
|
||||
u8 reserved1;
|
||||
struct regpair bdq_pbl_base_address[BDQ_NUM_IDS];
|
||||
__le16 bdq_xoff_threshold[BDQ_NUM_IDS];
|
||||
__le16 bdq_xon_threshold[BDQ_NUM_IDS];
|
||||
__le16 cmdq_xoff_threshold;
|
||||
__le16 bdq_xon_threshold[BDQ_NUM_IDS];
|
||||
__le16 cmdq_xon_threshold;
|
||||
__le32 reserved1;
|
||||
};
|
||||
|
||||
/* Scsi Drv BDQ Data struct (2 BDQ IDs: 0 - RQ, 1 - Immediate Data) */
|
||||
struct scsi_ram_per_bdq_resource_drv_data {
|
||||
struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS];
|
||||
};
|
||||
|
||||
/* SCSI SGL types */
|
||||
enum scsi_sgl_mode {
|
||||
SCSI_TX_SLOW_SGL,
|
||||
SCSI_FAST_SGL,
|
||||
MAX_SCSI_SGL_MODE
|
||||
};
|
||||
|
||||
/* SCSI SGL parameters */
|
||||
struct scsi_sgl_params {
|
||||
struct regpair sgl_addr;
|
||||
__le32 sgl_total_length;
|
||||
|
@ -126,10 +167,16 @@ struct scsi_sgl_params {
|
|||
u8 reserved;
|
||||
};
|
||||
|
||||
/* SCSI terminate connection params */
|
||||
struct scsi_terminate_extra_params {
|
||||
__le16 unsolicited_cq_count;
|
||||
__le16 cmdq_count;
|
||||
u8 reserved[4];
|
||||
};
|
||||
|
||||
/* SCSI Task Queue Element */
|
||||
struct scsi_tqe {
|
||||
__le16 itid;
|
||||
};
|
||||
|
||||
#endif /* __STORAGE_COMMON__ */
|
||||
|
|
|
@ -33,8 +33,13 @@
|
|||
#ifndef __TCP_COMMON__
|
||||
#define __TCP_COMMON__
|
||||
|
||||
/********************/
|
||||
/* TCP FW CONSTANTS */
|
||||
/********************/
|
||||
|
||||
#define TCP_INVALID_TIMEOUT_VAL -1
|
||||
|
||||
/* OOO opaque data received from LL2 */
|
||||
struct ooo_opaque {
|
||||
__le32 cid;
|
||||
u8 drop_isle;
|
||||
|
@ -43,25 +48,29 @@ struct ooo_opaque {
|
|||
u8 ooo_isle;
|
||||
};
|
||||
|
||||
/* tcp connect mode enum */
|
||||
enum tcp_connect_mode {
|
||||
TCP_CONNECT_ACTIVE,
|
||||
TCP_CONNECT_PASSIVE,
|
||||
MAX_TCP_CONNECT_MODE
|
||||
};
|
||||
|
||||
/* tcp function init parameters */
|
||||
struct tcp_init_params {
|
||||
__le32 two_msl_timer;
|
||||
__le16 tx_sws_timer;
|
||||
u8 maxfinrt;
|
||||
u8 max_fin_rt;
|
||||
u8 reserved[9];
|
||||
};
|
||||
|
||||
/* tcp IPv4/IPv6 enum */
|
||||
enum tcp_ip_version {
|
||||
TCP_IPV4,
|
||||
TCP_IPV6,
|
||||
MAX_TCP_IP_VERSION
|
||||
};
|
||||
|
||||
/* tcp offload parameters */
|
||||
struct tcp_offload_params {
|
||||
__le16 local_mac_addr_lo;
|
||||
__le16 local_mac_addr_mid;
|
||||
|
@ -70,24 +79,29 @@ struct tcp_offload_params {
|
|||
__le16 remote_mac_addr_mid;
|
||||
__le16 remote_mac_addr_hi;
|
||||
__le16 vlan_id;
|
||||
u8 flags;
|
||||
__le16 flags;
|
||||
#define TCP_OFFLOAD_PARAMS_TS_EN_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT 0
|
||||
#define TCP_OFFLOAD_PARAMS_DA_EN_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT 1
|
||||
#define TCP_OFFLOAD_PARAMS_KA_EN_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT 2
|
||||
#define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_SHIFT 3
|
||||
#define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_SHIFT 4
|
||||
#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 3
|
||||
#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 5
|
||||
#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 4
|
||||
#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 6
|
||||
#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 5
|
||||
#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 7
|
||||
#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 6
|
||||
#define TCP_OFFLOAD_PARAMS_RESERVED0_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_RESERVED0_SHIFT 7
|
||||
#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 8
|
||||
#define TCP_OFFLOAD_PARAMS_RESERVED_MASK 0x7F
|
||||
#define TCP_OFFLOAD_PARAMS_RESERVED_SHIFT 9
|
||||
u8 ip_version;
|
||||
u8 reserved0[3];
|
||||
__le32 remote_ip[4];
|
||||
__le32 local_ip[4];
|
||||
__le32 flow_label;
|
||||
|
@ -99,17 +113,21 @@ struct tcp_offload_params {
|
|||
u8 rcv_wnd_scale;
|
||||
u8 connect_mode;
|
||||
__le16 srtt;
|
||||
__le32 cwnd;
|
||||
__le32 ss_thresh;
|
||||
__le16 reserved1;
|
||||
__le32 rcv_wnd;
|
||||
__le32 cwnd;
|
||||
u8 ka_max_probe_cnt;
|
||||
u8 dup_ack_theshold;
|
||||
__le16 reserved1;
|
||||
__le32 ka_timeout;
|
||||
__le32 ka_interval;
|
||||
__le32 max_rt_time;
|
||||
__le32 initial_rcv_wnd;
|
||||
__le32 rcv_next;
|
||||
__le32 snd_una;
|
||||
__le32 snd_next;
|
||||
__le32 snd_max;
|
||||
__le32 snd_wnd;
|
||||
__le32 rcv_wnd;
|
||||
__le32 snd_wl1;
|
||||
__le32 ts_recent;
|
||||
__le32 ts_recent_age;
|
||||
|
@ -122,16 +140,13 @@ struct tcp_offload_params {
|
|||
u8 rt_cnt;
|
||||
__le16 rtt_var;
|
||||
__le16 fw_internal;
|
||||
__le32 ka_timeout;
|
||||
__le32 ka_interval;
|
||||
__le32 max_rt_time;
|
||||
__le32 initial_rcv_wnd;
|
||||
u8 snd_wnd_scale;
|
||||
u8 ack_frequency;
|
||||
__le16 da_timeout_value;
|
||||
__le32 reserved3[2];
|
||||
__le32 reserved3;
|
||||
};
|
||||
|
||||
/* tcp offload parameters */
|
||||
struct tcp_offload_params_opt2 {
|
||||
__le16 local_mac_addr_lo;
|
||||
__le16 local_mac_addr_mid;
|
||||
|
@ -140,16 +155,19 @@ struct tcp_offload_params_opt2 {
|
|||
__le16 remote_mac_addr_mid;
|
||||
__le16 remote_mac_addr_hi;
|
||||
__le16 vlan_id;
|
||||
u8 flags;
|
||||
__le16 flags;
|
||||
#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT 0
|
||||
#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT 1
|
||||
#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT 2
|
||||
#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0x1F
|
||||
#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 3
|
||||
#define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_SHIFT 3
|
||||
#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0xFFF
|
||||
#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 4
|
||||
u8 ip_version;
|
||||
u8 reserved1[3];
|
||||
__le32 remote_ip[4];
|
||||
__le32 local_ip[4];
|
||||
__le32 flow_label;
|
||||
|
@ -163,9 +181,16 @@ struct tcp_offload_params_opt2 {
|
|||
__le16 syn_ip_payload_length;
|
||||
__le32 syn_phy_addr_lo;
|
||||
__le32 syn_phy_addr_hi;
|
||||
__le32 reserved1[22];
|
||||
__le32 cwnd;
|
||||
u8 ka_max_probe_cnt;
|
||||
u8 reserved2[3];
|
||||
__le32 ka_timeout;
|
||||
__le32 ka_interval;
|
||||
__le32 max_rt_time;
|
||||
__le32 reserved3[16];
|
||||
};
|
||||
|
||||
/* tcp IPv4/IPv6 enum */
|
||||
enum tcp_seg_placement_event {
|
||||
TCP_EVENT_ADD_PEN,
|
||||
TCP_EVENT_ADD_NEW_ISLE,
|
||||
|
@ -177,6 +202,7 @@ enum tcp_seg_placement_event {
|
|||
MAX_TCP_SEG_PLACEMENT_EVENT
|
||||
};
|
||||
|
||||
/* tcp init parameters */
|
||||
struct tcp_update_params {
|
||||
__le16 flags;
|
||||
#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK 0x1
|
||||
|
@ -226,6 +252,7 @@ struct tcp_update_params {
|
|||
u8 reserved1[7];
|
||||
};
|
||||
|
||||
/* toe upload parameters */
|
||||
struct tcp_upload_params {
|
||||
__le32 rcv_next;
|
||||
__le32 snd_una;
|
||||
|
|
Loading…
Reference in New Issue