Merge branch 'qed-Utilize-FW-8.42.2.0'
Michal Kalderon says: ==================== qed*: Utilize FW 8.42.2.0 This FW contains several fixes and features, main ones listed below. We have taken into consideration past comments on previous FW versions that were uploaded and tried to separate this one to smaller patches to ease review. - RoCE - SRIOV support - Fixes in following flows: - latency optimization flow for inline WQEs - iwarp OOO packed DDPs flow - tx-dif workaround calculations flow - XRC-SRQ exceed cache num - iSCSI - Fixes: - iSCSI TCP out-of-order handling. - iscsi retransmit flow - Fcoe - Fixes: - upload + cleanup flows - Debug - Better handling of extracting data during traffic - ILT Dump -> dumping host memory used by chip - MDUMP -> collect debug data on system crash and extract after reboot Patches prefixed with FW 8.42.2.0 are required to work with binary 8.42.2.0 FW where as the rest are FW related but do not require the binary. Changes from V2 --------------- - Move FW version to the start of the series to maintain minimal compatibility - Fix some kbuild errors: - frame size larger than 1024 (Queue Manager patch - remove redundant field from struct) - sparse warning on endianity (Dmae patch fix - wrong use of __le32 for field used only on host, should be u32) - static should be used for some functions (Debug feature ilt and mdump) Reported-by: kbuild test robot <lkp@intel.com> Changes from V1 --------------- - Remove epoch + kernel version from device debug dump - don't bump driver version ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
8e5aa6173a
|
@ -253,7 +253,8 @@ enum qed_resources {
|
|||
QED_VLAN,
|
||||
QED_RDMA_CNQ_RAM,
|
||||
QED_ILT,
|
||||
QED_LL2_QUEUE,
|
||||
QED_LL2_RAM_QUEUE,
|
||||
QED_LL2_CTX_QUEUE,
|
||||
QED_CMDQS_CQS,
|
||||
QED_RDMA_STATS_QUEUE,
|
||||
QED_BDQ,
|
||||
|
@ -461,6 +462,8 @@ struct qed_fw_data {
|
|||
const u8 *modes_tree_buf;
|
||||
union init_op *init_ops;
|
||||
const u32 *arr_data;
|
||||
const u32 *fw_overlays;
|
||||
u32 fw_overlays_len;
|
||||
u32 init_ops_size;
|
||||
};
|
||||
|
||||
|
@ -531,6 +534,23 @@ struct qed_nvm_image_info {
|
|||
bool valid;
|
||||
};
|
||||
|
||||
enum qed_hsi_def_type {
|
||||
QED_HSI_DEF_MAX_NUM_VFS,
|
||||
QED_HSI_DEF_MAX_NUM_L2_QUEUES,
|
||||
QED_HSI_DEF_MAX_NUM_PORTS,
|
||||
QED_HSI_DEF_MAX_SB_PER_PATH,
|
||||
QED_HSI_DEF_MAX_NUM_PFS,
|
||||
QED_HSI_DEF_MAX_NUM_VPORTS,
|
||||
QED_HSI_DEF_NUM_ETH_RSS_ENGINE,
|
||||
QED_HSI_DEF_MAX_QM_TX_QUEUES,
|
||||
QED_HSI_DEF_NUM_PXP_ILT_RECORDS,
|
||||
QED_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS,
|
||||
QED_HSI_DEF_MAX_QM_GLOBAL_RLS,
|
||||
QED_HSI_DEF_MAX_PBF_CMD_LINES,
|
||||
QED_HSI_DEF_MAX_BTB_BLOCKS,
|
||||
QED_NUM_HSI_DEFS
|
||||
};
|
||||
|
||||
#define DRV_MODULE_VERSION \
|
||||
__stringify(QED_MAJOR_VERSION) "." \
|
||||
__stringify(QED_MINOR_VERSION) "." \
|
||||
|
@ -646,6 +666,7 @@ struct qed_hwfn {
|
|||
|
||||
struct dbg_tools_data dbg_info;
|
||||
void *dbg_user_info;
|
||||
struct virt_mem_desc dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE];
|
||||
|
||||
/* PWM region specific data */
|
||||
u16 wid_count;
|
||||
|
@ -668,6 +689,7 @@ struct qed_hwfn {
|
|||
/* Nvm images number and attributes */
|
||||
struct qed_nvm_image_info nvm_info;
|
||||
|
||||
struct phys_mem_desc *fw_overlay_mem;
|
||||
struct qed_ptt *p_arfs_ptt;
|
||||
|
||||
struct qed_simd_fp_handler simd_proto_handler[64];
|
||||
|
@ -796,8 +818,8 @@ struct qed_dev {
|
|||
u8 cache_shift;
|
||||
|
||||
/* Init */
|
||||
const struct iro *iro_arr;
|
||||
#define IRO (p_hwfn->cdev->iro_arr)
|
||||
const u32 *iro_arr;
|
||||
#define IRO ((const struct iro *)p_hwfn->cdev->iro_arr)
|
||||
|
||||
/* HW functions */
|
||||
u8 num_hwfns;
|
||||
|
@ -856,6 +878,8 @@ struct qed_dev {
|
|||
struct qed_cb_ll2_info *ll2;
|
||||
u8 ll2_mac_address[ETH_ALEN];
|
||||
#endif
|
||||
struct qed_dbg_feature dbg_features[DBG_FEATURE_NUM];
|
||||
bool disable_ilt_dump;
|
||||
DECLARE_HASHTABLE(connections, 10);
|
||||
const struct firmware *firmware;
|
||||
|
||||
|
@ -868,16 +892,35 @@ struct qed_dev {
|
|||
bool iwarp_cmt;
|
||||
};
|
||||
|
||||
#define NUM_OF_VFS(dev) (QED_IS_BB(dev) ? MAX_NUM_VFS_BB \
|
||||
: MAX_NUM_VFS_K2)
|
||||
#define NUM_OF_L2_QUEUES(dev) (QED_IS_BB(dev) ? MAX_NUM_L2_QUEUES_BB \
|
||||
: MAX_NUM_L2_QUEUES_K2)
|
||||
#define NUM_OF_PORTS(dev) (QED_IS_BB(dev) ? MAX_NUM_PORTS_BB \
|
||||
: MAX_NUM_PORTS_K2)
|
||||
#define NUM_OF_SBS(dev) (QED_IS_BB(dev) ? MAX_SB_PER_PATH_BB \
|
||||
: MAX_SB_PER_PATH_K2)
|
||||
#define NUM_OF_ENG_PFS(dev) (QED_IS_BB(dev) ? MAX_NUM_PFS_BB \
|
||||
: MAX_NUM_PFS_K2)
|
||||
u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type);
|
||||
|
||||
#define NUM_OF_VFS(dev) \
|
||||
qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_VFS)
|
||||
#define NUM_OF_L2_QUEUES(dev) \
|
||||
qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_L2_QUEUES)
|
||||
#define NUM_OF_PORTS(dev) \
|
||||
qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_PORTS)
|
||||
#define NUM_OF_SBS(dev) \
|
||||
qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_SB_PER_PATH)
|
||||
#define NUM_OF_ENG_PFS(dev) \
|
||||
qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_PFS)
|
||||
#define NUM_OF_VPORTS(dev) \
|
||||
qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_VPORTS)
|
||||
#define NUM_OF_RSS_ENGINES(dev) \
|
||||
qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_ETH_RSS_ENGINE)
|
||||
#define NUM_OF_QM_TX_QUEUES(dev) \
|
||||
qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_QM_TX_QUEUES)
|
||||
#define NUM_OF_PXP_ILT_RECORDS(dev) \
|
||||
qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_PXP_ILT_RECORDS)
|
||||
#define NUM_OF_RDMA_STATISTIC_COUNTERS(dev) \
|
||||
qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS)
|
||||
#define NUM_OF_QM_GLOBAL_RLS(dev) \
|
||||
qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_QM_GLOBAL_RLS)
|
||||
#define NUM_OF_PBF_CMD_LINES(dev) \
|
||||
qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_PBF_CMD_LINES)
|
||||
#define NUM_OF_BTB_BLOCKS(dev) \
|
||||
qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_BTB_BLOCKS)
|
||||
|
||||
|
||||
/**
|
||||
* @brief qed_concrete_to_sw_fid - get the sw function id from
|
||||
|
|
|
@ -50,12 +50,6 @@
|
|||
#include "qed_reg_addr.h"
|
||||
#include "qed_sriov.h"
|
||||
|
||||
/* Max number of connection types in HW (DQ/CDU etc.) */
|
||||
#define MAX_CONN_TYPES PROTOCOLID_COMMON
|
||||
#define NUM_TASK_TYPES 2
|
||||
#define NUM_TASK_PF_SEGMENTS 4
|
||||
#define NUM_TASK_VF_SEGMENTS 1
|
||||
|
||||
/* QM constants */
|
||||
#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
|
||||
|
||||
|
@ -123,126 +117,6 @@ struct src_ent {
|
|||
/* Alignment is inherent to the type1_task_context structure */
|
||||
#define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context)
|
||||
|
||||
/* PF per protocl configuration object */
|
||||
#define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
|
||||
#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
|
||||
|
||||
struct qed_tid_seg {
|
||||
u32 count;
|
||||
u8 type;
|
||||
bool has_fl_mem;
|
||||
};
|
||||
|
||||
struct qed_conn_type_cfg {
|
||||
u32 cid_count;
|
||||
u32 cids_per_vf;
|
||||
struct qed_tid_seg tid_seg[TASK_SEGMENTS];
|
||||
};
|
||||
|
||||
/* ILT Client configuration, Per connection type (protocol) resources. */
|
||||
#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
|
||||
#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
|
||||
#define CDUC_BLK (0)
|
||||
#define SRQ_BLK (0)
|
||||
#define CDUT_SEG_BLK(n) (1 + (u8)(n))
|
||||
#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_ ## X ## _SEGMENTS)
|
||||
|
||||
enum ilt_clients {
|
||||
ILT_CLI_CDUC,
|
||||
ILT_CLI_CDUT,
|
||||
ILT_CLI_QM,
|
||||
ILT_CLI_TM,
|
||||
ILT_CLI_SRC,
|
||||
ILT_CLI_TSDM,
|
||||
ILT_CLI_MAX
|
||||
};
|
||||
|
||||
struct ilt_cfg_pair {
|
||||
u32 reg;
|
||||
u32 val;
|
||||
};
|
||||
|
||||
struct qed_ilt_cli_blk {
|
||||
u32 total_size; /* 0 means not active */
|
||||
u32 real_size_in_page;
|
||||
u32 start_line;
|
||||
u32 dynamic_line_cnt;
|
||||
};
|
||||
|
||||
struct qed_ilt_client_cfg {
|
||||
bool active;
|
||||
|
||||
/* ILT boundaries */
|
||||
struct ilt_cfg_pair first;
|
||||
struct ilt_cfg_pair last;
|
||||
struct ilt_cfg_pair p_size;
|
||||
|
||||
/* ILT client blocks for PF */
|
||||
struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
|
||||
u32 pf_total_lines;
|
||||
|
||||
/* ILT client blocks for VFs */
|
||||
struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
|
||||
u32 vf_total_lines;
|
||||
};
|
||||
|
||||
/* Per Path -
|
||||
* ILT shadow table
|
||||
* Protocol acquired CID lists
|
||||
* PF start line in ILT
|
||||
*/
|
||||
struct qed_dma_mem {
|
||||
dma_addr_t p_phys;
|
||||
void *p_virt;
|
||||
size_t size;
|
||||
};
|
||||
|
||||
struct qed_cid_acquired_map {
|
||||
u32 start_cid;
|
||||
u32 max_count;
|
||||
unsigned long *cid_map;
|
||||
};
|
||||
|
||||
struct qed_cxt_mngr {
|
||||
/* Per protocl configuration */
|
||||
struct qed_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
|
||||
|
||||
/* computed ILT structure */
|
||||
struct qed_ilt_client_cfg clients[ILT_CLI_MAX];
|
||||
|
||||
/* Task type sizes */
|
||||
u32 task_type_size[NUM_TASK_TYPES];
|
||||
|
||||
/* total number of VFs for this hwfn -
|
||||
* ALL VFs are symmetric in terms of HW resources
|
||||
*/
|
||||
u32 vf_count;
|
||||
|
||||
/* Acquired CIDs */
|
||||
struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
|
||||
|
||||
struct qed_cid_acquired_map
|
||||
acquired_vf[MAX_CONN_TYPES][MAX_NUM_VFS];
|
||||
|
||||
/* ILT shadow table */
|
||||
struct qed_dma_mem *ilt_shadow;
|
||||
u32 pf_start_line;
|
||||
|
||||
/* Mutex for a dynamic ILT allocation */
|
||||
struct mutex mutex;
|
||||
|
||||
/* SRC T2 */
|
||||
struct qed_dma_mem *t2;
|
||||
u32 t2_num_pages;
|
||||
u64 first_free;
|
||||
u64 last_free;
|
||||
|
||||
/* total number of SRQ's for this hwfn */
|
||||
u32 srq_count;
|
||||
|
||||
/* Maximal number of L2 steering filters */
|
||||
u32 arfs_count;
|
||||
};
|
||||
static bool src_proto(enum protocol_type type)
|
||||
{
|
||||
return type == PROTOCOLID_ISCSI ||
|
||||
|
@ -880,30 +754,60 @@ u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines)
|
|||
|
||||
static void qed_cxt_src_t2_free(struct qed_hwfn *p_hwfn)
|
||||
{
|
||||
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
|
||||
struct qed_src_t2 *p_t2 = &p_hwfn->p_cxt_mngr->src_t2;
|
||||
u32 i;
|
||||
|
||||
if (!p_mngr->t2)
|
||||
if (!p_t2 || !p_t2->dma_mem)
|
||||
return;
|
||||
|
||||
for (i = 0; i < p_mngr->t2_num_pages; i++)
|
||||
if (p_mngr->t2[i].p_virt)
|
||||
for (i = 0; i < p_t2->num_pages; i++)
|
||||
if (p_t2->dma_mem[i].virt_addr)
|
||||
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
|
||||
p_mngr->t2[i].size,
|
||||
p_mngr->t2[i].p_virt,
|
||||
p_mngr->t2[i].p_phys);
|
||||
p_t2->dma_mem[i].size,
|
||||
p_t2->dma_mem[i].virt_addr,
|
||||
p_t2->dma_mem[i].phys_addr);
|
||||
|
||||
kfree(p_mngr->t2);
|
||||
p_mngr->t2 = NULL;
|
||||
kfree(p_t2->dma_mem);
|
||||
p_t2->dma_mem = NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
qed_cxt_t2_alloc_pages(struct qed_hwfn *p_hwfn,
|
||||
struct qed_src_t2 *p_t2, u32 total_size, u32 page_size)
|
||||
{
|
||||
void **p_virt;
|
||||
u32 size, i;
|
||||
|
||||
if (!p_t2 || !p_t2->dma_mem)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < p_t2->num_pages; i++) {
|
||||
size = min_t(u32, total_size, page_size);
|
||||
p_virt = &p_t2->dma_mem[i].virt_addr;
|
||||
|
||||
*p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
|
||||
size,
|
||||
&p_t2->dma_mem[i].phys_addr,
|
||||
GFP_KERNEL);
|
||||
if (!p_t2->dma_mem[i].virt_addr)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(*p_virt, 0, size);
|
||||
p_t2->dma_mem[i].size = size;
|
||||
total_size -= size;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
|
||||
{
|
||||
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
|
||||
u32 conn_num, total_size, ent_per_page, psz, i;
|
||||
struct phys_mem_desc *p_t2_last_page;
|
||||
struct qed_ilt_client_cfg *p_src;
|
||||
struct qed_src_iids src_iids;
|
||||
struct qed_dma_mem *p_t2;
|
||||
struct qed_src_t2 *p_t2;
|
||||
int rc;
|
||||
|
||||
memset(&src_iids, 0, sizeof(src_iids));
|
||||
|
@ -921,49 +825,39 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
|
|||
|
||||
/* use the same page size as the SRC ILT client */
|
||||
psz = ILT_PAGE_IN_BYTES(p_src->p_size.val);
|
||||
p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);
|
||||
p_t2 = &p_mngr->src_t2;
|
||||
p_t2->num_pages = DIV_ROUND_UP(total_size, psz);
|
||||
|
||||
/* allocate t2 */
|
||||
p_mngr->t2 = kcalloc(p_mngr->t2_num_pages, sizeof(struct qed_dma_mem),
|
||||
GFP_KERNEL);
|
||||
if (!p_mngr->t2) {
|
||||
p_t2->dma_mem = kcalloc(p_t2->num_pages, sizeof(struct phys_mem_desc),
|
||||
GFP_KERNEL);
|
||||
if (!p_t2->dma_mem) {
|
||||
DP_NOTICE(p_hwfn, "Failed to allocate t2 table\n");
|
||||
rc = -ENOMEM;
|
||||
goto t2_fail;
|
||||
}
|
||||
|
||||
/* allocate t2 pages */
|
||||
for (i = 0; i < p_mngr->t2_num_pages; i++) {
|
||||
u32 size = min_t(u32, total_size, psz);
|
||||
void **p_virt = &p_mngr->t2[i].p_virt;
|
||||
|
||||
*p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size,
|
||||
&p_mngr->t2[i].p_phys,
|
||||
GFP_KERNEL);
|
||||
if (!p_mngr->t2[i].p_virt) {
|
||||
rc = -ENOMEM;
|
||||
goto t2_fail;
|
||||
}
|
||||
p_mngr->t2[i].size = size;
|
||||
total_size -= size;
|
||||
}
|
||||
rc = qed_cxt_t2_alloc_pages(p_hwfn, p_t2, total_size, psz);
|
||||
if (rc)
|
||||
goto t2_fail;
|
||||
|
||||
/* Set the t2 pointers */
|
||||
|
||||
/* entries per page - must be a power of two */
|
||||
ent_per_page = psz / sizeof(struct src_ent);
|
||||
|
||||
p_mngr->first_free = (u64) p_mngr->t2[0].p_phys;
|
||||
p_t2->first_free = (u64)p_t2->dma_mem[0].phys_addr;
|
||||
|
||||
p_t2 = &p_mngr->t2[(conn_num - 1) / ent_per_page];
|
||||
p_mngr->last_free = (u64) p_t2->p_phys +
|
||||
p_t2_last_page = &p_t2->dma_mem[(conn_num - 1) / ent_per_page];
|
||||
p_t2->last_free = (u64)p_t2_last_page->phys_addr +
|
||||
((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent);
|
||||
|
||||
for (i = 0; i < p_mngr->t2_num_pages; i++) {
|
||||
for (i = 0; i < p_t2->num_pages; i++) {
|
||||
u32 ent_num = min_t(u32,
|
||||
ent_per_page,
|
||||
conn_num);
|
||||
struct src_ent *entries = p_mngr->t2[i].p_virt;
|
||||
u64 p_ent_phys = (u64) p_mngr->t2[i].p_phys, val;
|
||||
struct src_ent *entries = p_t2->dma_mem[i].virt_addr;
|
||||
u64 p_ent_phys = (u64)p_t2->dma_mem[i].phys_addr, val;
|
||||
u32 j;
|
||||
|
||||
for (j = 0; j < ent_num - 1; j++) {
|
||||
|
@ -971,8 +865,8 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
|
|||
entries[j].next = cpu_to_be64(val);
|
||||
}
|
||||
|
||||
if (i < p_mngr->t2_num_pages - 1)
|
||||
val = (u64) p_mngr->t2[i + 1].p_phys;
|
||||
if (i < p_t2->num_pages - 1)
|
||||
val = (u64)p_t2->dma_mem[i + 1].phys_addr;
|
||||
else
|
||||
val = 0;
|
||||
entries[j].next = cpu_to_be64(val);
|
||||
|
@ -988,7 +882,7 @@ t2_fail:
|
|||
}
|
||||
|
||||
#define for_each_ilt_valid_client(pos, clients) \
|
||||
for (pos = 0; pos < ILT_CLI_MAX; pos++) \
|
||||
for (pos = 0; pos < MAX_ILT_CLIENTS; pos++) \
|
||||
if (!clients[pos].active) { \
|
||||
continue; \
|
||||
} else \
|
||||
|
@ -1014,13 +908,13 @@ static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
|
|||
ilt_size = qed_cxt_ilt_shadow_size(p_cli);
|
||||
|
||||
for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
|
||||
struct qed_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
|
||||
struct phys_mem_desc *p_dma = &p_mngr->ilt_shadow[i];
|
||||
|
||||
if (p_dma->p_virt)
|
||||
if (p_dma->virt_addr)
|
||||
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
|
||||
p_dma->size, p_dma->p_virt,
|
||||
p_dma->p_phys);
|
||||
p_dma->p_virt = NULL;
|
||||
p_dma->size, p_dma->virt_addr,
|
||||
p_dma->phys_addr);
|
||||
p_dma->virt_addr = NULL;
|
||||
}
|
||||
kfree(p_mngr->ilt_shadow);
|
||||
}
|
||||
|
@ -1030,7 +924,7 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
|
|||
enum ilt_clients ilt_client,
|
||||
u32 start_line_offset)
|
||||
{
|
||||
struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
|
||||
struct phys_mem_desc *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
|
||||
u32 lines, line, sz_left, lines_to_skip = 0;
|
||||
|
||||
/* Special handling for RoCE that supports dynamic allocation */
|
||||
|
@ -1059,8 +953,8 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
|
|||
if (!p_virt)
|
||||
return -ENOMEM;
|
||||
|
||||
ilt_shadow[line].p_phys = p_phys;
|
||||
ilt_shadow[line].p_virt = p_virt;
|
||||
ilt_shadow[line].phys_addr = p_phys;
|
||||
ilt_shadow[line].virt_addr = p_virt;
|
||||
ilt_shadow[line].size = size;
|
||||
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
|
||||
|
@ -1083,7 +977,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
|
|||
int rc;
|
||||
|
||||
size = qed_cxt_ilt_shadow_size(clients);
|
||||
p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem),
|
||||
p_mngr->ilt_shadow = kcalloc(size, sizeof(struct phys_mem_desc),
|
||||
GFP_KERNEL);
|
||||
if (!p_mngr->ilt_shadow) {
|
||||
rc = -ENOMEM;
|
||||
|
@ -1092,7 +986,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
|
|||
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
|
||||
"Allocated 0x%x bytes for ilt shadow\n",
|
||||
(u32)(size * sizeof(struct qed_dma_mem)));
|
||||
(u32)(size * sizeof(struct phys_mem_desc)));
|
||||
|
||||
for_each_ilt_valid_client(i, clients) {
|
||||
for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
|
||||
|
@ -1238,15 +1132,20 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
|
|||
clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
|
||||
clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
|
||||
/* default ILT page size for all clients is 64K */
|
||||
for (i = 0; i < ILT_CLI_MAX; i++)
|
||||
for (i = 0; i < MAX_ILT_CLIENTS; i++)
|
||||
p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
|
||||
|
||||
p_mngr->conn_ctx_size = CONN_CXT_SIZE(p_hwfn);
|
||||
|
||||
/* Initialize task sizes */
|
||||
p_mngr->task_type_size[0] = TYPE0_TASK_CXT_SIZE(p_hwfn);
|
||||
p_mngr->task_type_size[1] = TYPE1_TASK_CXT_SIZE(p_hwfn);
|
||||
|
||||
if (p_hwfn->cdev->p_iov_info)
|
||||
if (p_hwfn->cdev->p_iov_info) {
|
||||
p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs;
|
||||
p_mngr->first_vf_in_pf =
|
||||
p_hwfn->cdev->p_iov_info->first_vf_in_pf;
|
||||
}
|
||||
/* Initialize the dynamic ILT allocation mutex */
|
||||
mutex_init(&p_mngr->mutex);
|
||||
|
||||
|
@ -1522,7 +1421,6 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
|
|||
params.num_vports = qm_info->num_vports;
|
||||
params.pf_wfq = qm_info->pf_wfq;
|
||||
params.pf_rl = qm_info->pf_rl;
|
||||
params.link_speed = p_link->speed;
|
||||
params.pq_params = qm_info->qm_pq_params;
|
||||
params.vport_params = qm_info->qm_vport_params;
|
||||
|
||||
|
@ -1674,7 +1572,7 @@ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
|
|||
{
|
||||
struct qed_ilt_client_cfg *clients;
|
||||
struct qed_cxt_mngr *p_mngr;
|
||||
struct qed_dma_mem *p_shdw;
|
||||
struct phys_mem_desc *p_shdw;
|
||||
u32 line, rt_offst, i;
|
||||
|
||||
qed_ilt_bounds_init(p_hwfn);
|
||||
|
@ -1699,15 +1597,15 @@ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
|
|||
/** p_virt could be NULL incase of dynamic
|
||||
* allocation
|
||||
*/
|
||||
if (p_shdw[line].p_virt) {
|
||||
if (p_shdw[line].virt_addr) {
|
||||
SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
|
||||
SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
|
||||
(p_shdw[line].p_phys >> 12));
|
||||
(p_shdw[line].phys_addr >> 12));
|
||||
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
|
||||
"Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n",
|
||||
rt_offst, line, i,
|
||||
(u64)(p_shdw[line].p_phys >> 12));
|
||||
(u64)(p_shdw[line].phys_addr >> 12));
|
||||
}
|
||||
|
||||
STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
|
||||
|
@ -2050,10 +1948,10 @@ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
|
|||
line = p_info->iid / cxts_per_p;
|
||||
|
||||
/* Make sure context is allocated (dynamic allocation) */
|
||||
if (!p_mngr->ilt_shadow[line].p_virt)
|
||||
if (!p_mngr->ilt_shadow[line].virt_addr)
|
||||
return -EINVAL;
|
||||
|
||||
p_info->p_cxt = p_mngr->ilt_shadow[line].p_virt +
|
||||
p_info->p_cxt = p_mngr->ilt_shadow[line].virt_addr +
|
||||
p_info->iid % cxts_per_p * conn_cxt_size;
|
||||
|
||||
DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT),
|
||||
|
@ -2234,7 +2132,7 @@ int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
|
|||
for (i = 0; i < total_lines; i++) {
|
||||
shadow_line = i + p_fl_seg->start_line -
|
||||
p_hwfn->p_cxt_mngr->pf_start_line;
|
||||
p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].p_virt;
|
||||
p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].virt_addr;
|
||||
}
|
||||
p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) -
|
||||
p_fl_seg->real_size_in_page;
|
||||
|
@ -2296,7 +2194,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
|
|||
|
||||
mutex_lock(&p_hwfn->p_cxt_mngr->mutex);
|
||||
|
||||
if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt)
|
||||
if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr)
|
||||
goto out0;
|
||||
|
||||
p_ptt = qed_ptt_acquire(p_hwfn);
|
||||
|
@ -2334,8 +2232,8 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
|
|||
}
|
||||
}
|
||||
|
||||
p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt = p_virt;
|
||||
p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys = p_phys;
|
||||
p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr = p_virt;
|
||||
p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr = p_phys;
|
||||
p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size =
|
||||
p_blk->real_size_in_page;
|
||||
|
||||
|
@ -2345,9 +2243,9 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
|
|||
|
||||
ilt_hw_entry = 0;
|
||||
SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
|
||||
SET_FIELD(ilt_hw_entry,
|
||||
ILT_ENTRY_PHY_ADDR,
|
||||
(p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys >> 12));
|
||||
SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
|
||||
(p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr
|
||||
>> 12));
|
||||
|
||||
/* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
|
||||
qed_dmae_host2grc(p_hwfn, p_ptt, (u64) (uintptr_t)&ilt_hw_entry,
|
||||
|
@ -2434,16 +2332,16 @@ qed_cxt_free_ilt_range(struct qed_hwfn *p_hwfn,
|
|||
}
|
||||
|
||||
for (i = shadow_start_line; i < shadow_end_line; i++) {
|
||||
if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt)
|
||||
if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr)
|
||||
continue;
|
||||
|
||||
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
|
||||
p_hwfn->p_cxt_mngr->ilt_shadow[i].size,
|
||||
p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt,
|
||||
p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys);
|
||||
p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr,
|
||||
p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr);
|
||||
|
||||
p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt = NULL;
|
||||
p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys = 0;
|
||||
p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr = NULL;
|
||||
p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr = 0;
|
||||
p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
|
||||
|
||||
/* compute absolute offset */
|
||||
|
@ -2547,8 +2445,76 @@ int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
|
|||
|
||||
ilt_idx = tid / num_tids_per_block + p_seg->start_line -
|
||||
p_mngr->pf_start_line;
|
||||
*pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].p_virt +
|
||||
*pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].virt_addr +
|
||||
(tid % num_tids_per_block) * tid_size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u16 qed_blk_calculate_pages(struct qed_ilt_cli_blk *p_blk)
|
||||
{
|
||||
if (p_blk->real_size_in_page == 0)
|
||||
return 0;
|
||||
|
||||
return DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
|
||||
}
|
||||
|
||||
u16 qed_get_cdut_num_pf_init_pages(struct qed_hwfn *p_hwfn)
|
||||
{
|
||||
struct qed_ilt_client_cfg *p_cli;
|
||||
struct qed_ilt_cli_blk *p_blk;
|
||||
u16 i, pages = 0;
|
||||
|
||||
p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
|
||||
for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
|
||||
p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];
|
||||
pages += qed_blk_calculate_pages(p_blk);
|
||||
}
|
||||
|
||||
return pages;
|
||||
}
|
||||
|
||||
u16 qed_get_cdut_num_vf_init_pages(struct qed_hwfn *p_hwfn)
|
||||
{
|
||||
struct qed_ilt_client_cfg *p_cli;
|
||||
struct qed_ilt_cli_blk *p_blk;
|
||||
u16 i, pages = 0;
|
||||
|
||||
p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
|
||||
for (i = 0; i < NUM_TASK_VF_SEGMENTS; i++) {
|
||||
p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(i, VF)];
|
||||
pages += qed_blk_calculate_pages(p_blk);
|
||||
}
|
||||
|
||||
return pages;
|
||||
}
|
||||
|
||||
u16 qed_get_cdut_num_pf_work_pages(struct qed_hwfn *p_hwfn)
|
||||
{
|
||||
struct qed_ilt_client_cfg *p_cli;
|
||||
struct qed_ilt_cli_blk *p_blk;
|
||||
u16 i, pages = 0;
|
||||
|
||||
p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
|
||||
for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
|
||||
p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];
|
||||
pages += qed_blk_calculate_pages(p_blk);
|
||||
}
|
||||
|
||||
return pages;
|
||||
}
|
||||
|
||||
u16 qed_get_cdut_num_vf_work_pages(struct qed_hwfn *p_hwfn)
|
||||
{
|
||||
struct qed_ilt_client_cfg *p_cli;
|
||||
struct qed_ilt_cli_blk *p_blk;
|
||||
u16 pages = 0, i;
|
||||
|
||||
p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
|
||||
for (i = 0; i < NUM_TASK_VF_SEGMENTS; i++) {
|
||||
p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(i)];
|
||||
pages += qed_blk_calculate_pages(p_blk);
|
||||
}
|
||||
|
||||
return pages;
|
||||
}
|
||||
|
|
|
@ -242,4 +242,134 @@ int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto);
|
|||
#define QED_CTX_FL_MEM 1
|
||||
int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
|
||||
u32 tid, u8 ctx_type, void **task_ctx);
|
||||
|
||||
/* Max number of connection types in HW (DQ/CDU etc.) */
|
||||
#define MAX_CONN_TYPES PROTOCOLID_COMMON
|
||||
#define NUM_TASK_TYPES 2
|
||||
#define NUM_TASK_PF_SEGMENTS 4
|
||||
#define NUM_TASK_VF_SEGMENTS 1
|
||||
|
||||
/* PF per protocl configuration object */
|
||||
#define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
|
||||
#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
|
||||
|
||||
struct qed_tid_seg {
|
||||
u32 count;
|
||||
u8 type;
|
||||
bool has_fl_mem;
|
||||
};
|
||||
|
||||
struct qed_conn_type_cfg {
|
||||
u32 cid_count;
|
||||
u32 cids_per_vf;
|
||||
struct qed_tid_seg tid_seg[TASK_SEGMENTS];
|
||||
};
|
||||
|
||||
/* ILT Client configuration,
|
||||
* Per connection type (protocol) resources (cids, tis, vf cids etc.)
|
||||
* 1 - for connection context (CDUC) and for each task context we need two
|
||||
* values, for regular task context and for force load memory
|
||||
*/
|
||||
#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
|
||||
#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
|
||||
#define CDUC_BLK (0)
|
||||
#define SRQ_BLK (0)
|
||||
#define CDUT_SEG_BLK(n) (1 + (u8)(n))
|
||||
#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_ ## X ## _SEGMENTS)
|
||||
|
||||
struct ilt_cfg_pair {
|
||||
u32 reg;
|
||||
u32 val;
|
||||
};
|
||||
|
||||
struct qed_ilt_cli_blk {
|
||||
u32 total_size; /* 0 means not active */
|
||||
u32 real_size_in_page;
|
||||
u32 start_line;
|
||||
u32 dynamic_line_offset;
|
||||
u32 dynamic_line_cnt;
|
||||
};
|
||||
|
||||
struct qed_ilt_client_cfg {
|
||||
bool active;
|
||||
|
||||
/* ILT boundaries */
|
||||
struct ilt_cfg_pair first;
|
||||
struct ilt_cfg_pair last;
|
||||
struct ilt_cfg_pair p_size;
|
||||
|
||||
/* ILT client blocks for PF */
|
||||
struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
|
||||
u32 pf_total_lines;
|
||||
|
||||
/* ILT client blocks for VFs */
|
||||
struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
|
||||
u32 vf_total_lines;
|
||||
};
|
||||
|
||||
struct qed_cid_acquired_map {
|
||||
u32 start_cid;
|
||||
u32 max_count;
|
||||
unsigned long *cid_map;
|
||||
};
|
||||
|
||||
struct qed_src_t2 {
|
||||
struct phys_mem_desc *dma_mem;
|
||||
u32 num_pages;
|
||||
u64 first_free;
|
||||
u64 last_free;
|
||||
};
|
||||
|
||||
struct qed_cxt_mngr {
|
||||
/* Per protocl configuration */
|
||||
struct qed_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
|
||||
|
||||
/* computed ILT structure */
|
||||
struct qed_ilt_client_cfg clients[MAX_ILT_CLIENTS];
|
||||
|
||||
/* Task type sizes */
|
||||
u32 task_type_size[NUM_TASK_TYPES];
|
||||
|
||||
/* total number of VFs for this hwfn -
|
||||
* ALL VFs are symmetric in terms of HW resources
|
||||
*/
|
||||
u32 vf_count;
|
||||
u32 first_vf_in_pf;
|
||||
|
||||
/* Acquired CIDs */
|
||||
struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
|
||||
|
||||
struct qed_cid_acquired_map
|
||||
acquired_vf[MAX_CONN_TYPES][MAX_NUM_VFS];
|
||||
|
||||
/* ILT shadow table */
|
||||
struct phys_mem_desc *ilt_shadow;
|
||||
u32 ilt_shadow_size;
|
||||
u32 pf_start_line;
|
||||
|
||||
/* Mutex for a dynamic ILT allocation */
|
||||
struct mutex mutex;
|
||||
|
||||
/* SRC T2 */
|
||||
struct qed_src_t2 src_t2;
|
||||
u32 t2_num_pages;
|
||||
u64 first_free;
|
||||
u64 last_free;
|
||||
|
||||
/* total number of SRQ's for this hwfn */
|
||||
u32 srq_count;
|
||||
|
||||
/* Maximal number of L2 steering filters */
|
||||
u32 arfs_count;
|
||||
|
||||
u8 task_type_id;
|
||||
u16 task_ctx_size;
|
||||
u16 conn_ctx_size;
|
||||
};
|
||||
|
||||
u16 qed_get_cdut_num_pf_init_pages(struct qed_hwfn *p_hwfn);
|
||||
u16 qed_get_cdut_num_vf_init_pages(struct qed_hwfn *p_hwfn);
|
||||
u16 qed_get_cdut_num_pf_work_pages(struct qed_hwfn *p_hwfn);
|
||||
u16 qed_get_cdut_num_vf_work_pages(struct qed_hwfn *p_hwfn);
|
||||
|
||||
#endif
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -14,11 +14,13 @@ enum qed_dbg_features {
|
|||
DBG_FEATURE_IGU_FIFO,
|
||||
DBG_FEATURE_PROTECTION_OVERRIDE,
|
||||
DBG_FEATURE_FW_ASSERTS,
|
||||
DBG_FEATURE_ILT,
|
||||
DBG_FEATURE_NUM
|
||||
};
|
||||
|
||||
/* Forward Declaration */
|
||||
struct qed_dev;
|
||||
struct qed_hwfn;
|
||||
|
||||
int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes);
|
||||
int qed_dbg_grc_size(struct qed_dev *cdev);
|
||||
|
@ -37,6 +39,8 @@ int qed_dbg_protection_override_size(struct qed_dev *cdev);
|
|||
int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
|
||||
u32 *num_dumped_bytes);
|
||||
int qed_dbg_fw_asserts_size(struct qed_dev *cdev);
|
||||
int qed_dbg_ilt(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes);
|
||||
int qed_dbg_ilt_size(struct qed_dev *cdev);
|
||||
int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
|
||||
u32 *num_dumped_bytes);
|
||||
int qed_dbg_mcp_trace_size(struct qed_dev *cdev);
|
||||
|
|
|
@ -907,7 +907,7 @@ qed_llh_access_filter(struct qed_hwfn *p_hwfn,
|
|||
/* Filter value */
|
||||
addr = NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * filter_idx * 0x4;
|
||||
|
||||
params.flags = QED_DMAE_FLAG_PF_DST;
|
||||
SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_PF_VALID, 0x1);
|
||||
params.dst_pfid = pfid;
|
||||
rc = qed_dmae_host2grc(p_hwfn,
|
||||
p_ptt,
|
||||
|
@ -1412,6 +1412,7 @@ void qed_resc_free(struct qed_dev *cdev)
|
|||
qed_dmae_info_free(p_hwfn);
|
||||
qed_dcbx_info_free(p_hwfn);
|
||||
qed_dbg_user_data_free(p_hwfn);
|
||||
qed_fw_overlay_mem_free(p_hwfn, p_hwfn->fw_overlay_mem);
|
||||
|
||||
/* Destroy doorbell recovery mechanism */
|
||||
qed_db_recovery_teardown(p_hwfn);
|
||||
|
@ -1571,7 +1572,7 @@ static void qed_init_qm_vport_params(struct qed_hwfn *p_hwfn)
|
|||
|
||||
/* all vports participate in weighted fair queueing */
|
||||
for (i = 0; i < qed_init_qm_get_num_vports(p_hwfn); i++)
|
||||
qm_info->qm_vport_params[i].vport_wfq = 1;
|
||||
qm_info->qm_vport_params[i].wfq = 1;
|
||||
}
|
||||
|
||||
/* initialize qm port params */
|
||||
|
@ -1579,6 +1580,7 @@ static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn)
|
|||
{
|
||||
/* Initialize qm port parameters */
|
||||
u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engine;
|
||||
struct qed_dev *cdev = p_hwfn->cdev;
|
||||
|
||||
/* indicate how ooo and high pri traffic is dealt with */
|
||||
active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
|
||||
|
@ -1588,11 +1590,13 @@ static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn)
|
|||
for (i = 0; i < num_ports; i++) {
|
||||
struct init_qm_port_params *p_qm_port =
|
||||
&p_hwfn->qm_info.qm_port_params[i];
|
||||
u16 pbf_max_cmd_lines;
|
||||
|
||||
p_qm_port->active = 1;
|
||||
p_qm_port->active_phys_tcs = active_phys_tcs;
|
||||
p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
|
||||
p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
|
||||
pbf_max_cmd_lines = (u16)NUM_OF_PBF_CMD_LINES(cdev);
|
||||
p_qm_port->num_pbf_cmd_lines = pbf_max_cmd_lines / num_ports;
|
||||
p_qm_port->num_btb_blocks = NUM_OF_BTB_BLOCKS(cdev) / num_ports;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2034,9 +2038,8 @@ static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn)
|
|||
vport = &(qm_info->qm_vport_params[i]);
|
||||
DP_VERBOSE(p_hwfn,
|
||||
NETIF_MSG_HW,
|
||||
"vport idx %d, vport_rl %d, wfq %d, first_tx_pq_id [ ",
|
||||
qm_info->start_vport + i,
|
||||
vport->vport_rl, vport->vport_wfq);
|
||||
"vport idx %d, wfq %d, first_tx_pq_id [ ",
|
||||
qm_info->start_vport + i, vport->wfq);
|
||||
for (tc = 0; tc < NUM_OF_TCS; tc++)
|
||||
DP_VERBOSE(p_hwfn,
|
||||
NETIF_MSG_HW,
|
||||
|
@ -2049,11 +2052,11 @@ static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn)
|
|||
pq = &(qm_info->qm_pq_params[i]);
|
||||
DP_VERBOSE(p_hwfn,
|
||||
NETIF_MSG_HW,
|
||||
"pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n",
|
||||
"pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d rl_id %d\n",
|
||||
qm_info->start_pq + i,
|
||||
pq->port_id,
|
||||
pq->vport_id,
|
||||
pq->tc_id, pq->wrr_group, pq->rl_valid);
|
||||
pq->tc_id, pq->wrr_group, pq->rl_valid, pq->rl_id);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2103,9 +2106,6 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
|
|||
if (!b_rc)
|
||||
return -EINVAL;
|
||||
|
||||
/* clear the QM_PF runtime phase leftovers from previous init */
|
||||
qed_init_clear_rt_data(p_hwfn);
|
||||
|
||||
/* prepare QM portion of runtime array */
|
||||
qed_qm_init_pf(p_hwfn, p_ptt, false);
|
||||
|
||||
|
@ -2346,7 +2346,7 @@ int qed_resc_alloc(struct qed_dev *cdev)
|
|||
if (rc)
|
||||
goto alloc_err;
|
||||
|
||||
rc = qed_dbg_alloc_user_data(p_hwfn);
|
||||
rc = qed_dbg_alloc_user_data(p_hwfn, &p_hwfn->dbg_user_info);
|
||||
if (rc)
|
||||
goto alloc_err;
|
||||
}
|
||||
|
@ -2623,7 +2623,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
|
|||
params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
|
||||
params.pf_rl_en = qm_info->pf_rl_en;
|
||||
params.pf_wfq_en = qm_info->pf_wfq_en;
|
||||
params.vport_rl_en = qm_info->vport_rl_en;
|
||||
params.global_rl_en = qm_info->vport_rl_en;
|
||||
params.vport_wfq_en = qm_info->vport_wfq_en;
|
||||
params.port_params = qm_info->qm_port_params;
|
||||
|
||||
|
@ -2891,6 +2891,8 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
qed_fw_overlay_init_ram(p_hwfn, p_ptt, p_hwfn->fw_overlay_mem);
|
||||
|
||||
/* Pure runtime initializations - directly to the HW */
|
||||
qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
|
||||
|
||||
|
@ -3000,8 +3002,10 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
|
|||
u32 load_code, resp, param, drv_mb_param;
|
||||
bool b_default_mtu = true;
|
||||
struct qed_hwfn *p_hwfn;
|
||||
int rc = 0, i;
|
||||
const u32 *fw_overlays;
|
||||
u32 fw_overlays_len;
|
||||
u16 ether_type;
|
||||
int rc = 0, i;
|
||||
|
||||
if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
|
||||
DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
|
||||
|
@ -3102,6 +3106,17 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
|
|||
*/
|
||||
qed_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
|
||||
|
||||
fw_overlays = cdev->fw_data->fw_overlays;
|
||||
fw_overlays_len = cdev->fw_data->fw_overlays_len;
|
||||
p_hwfn->fw_overlay_mem =
|
||||
qed_fw_overlay_mem_alloc(p_hwfn, fw_overlays,
|
||||
fw_overlays_len);
|
||||
if (!p_hwfn->fw_overlay_mem) {
|
||||
DP_NOTICE(p_hwfn,
|
||||
"Failed to allocate fw overlay memory\n");
|
||||
goto load_err;
|
||||
}
|
||||
|
||||
switch (load_code) {
|
||||
case FW_MSG_CODE_DRV_LOAD_ENGINE:
|
||||
rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
|
||||
|
@ -3566,8 +3581,10 @@ const char *qed_hw_get_resc_name(enum qed_resources res_id)
|
|||
return "RDMA_CNQ_RAM";
|
||||
case QED_ILT:
|
||||
return "ILT";
|
||||
case QED_LL2_QUEUE:
|
||||
return "LL2_QUEUE";
|
||||
case QED_LL2_RAM_QUEUE:
|
||||
return "LL2_RAM_QUEUE";
|
||||
case QED_LL2_CTX_QUEUE:
|
||||
return "LL2_CTX_QUEUE";
|
||||
case QED_CMDQS_CQS:
|
||||
return "CMDQS_CQS";
|
||||
case QED_RDMA_STATS_QUEUE:
|
||||
|
@ -3606,18 +3623,46 @@ __qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static u32 qed_hsi_def_val[][MAX_CHIP_IDS] = {
|
||||
{MAX_NUM_VFS_BB, MAX_NUM_VFS_K2},
|
||||
{MAX_NUM_L2_QUEUES_BB, MAX_NUM_L2_QUEUES_K2},
|
||||
{MAX_NUM_PORTS_BB, MAX_NUM_PORTS_K2},
|
||||
{MAX_SB_PER_PATH_BB, MAX_SB_PER_PATH_K2,},
|
||||
{MAX_NUM_PFS_BB, MAX_NUM_PFS_K2},
|
||||
{MAX_NUM_VPORTS_BB, MAX_NUM_VPORTS_K2},
|
||||
{ETH_RSS_ENGINE_NUM_BB, ETH_RSS_ENGINE_NUM_K2},
|
||||
{MAX_QM_TX_QUEUES_BB, MAX_QM_TX_QUEUES_K2},
|
||||
{PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2},
|
||||
{RDMA_NUM_STATISTIC_COUNTERS_BB, RDMA_NUM_STATISTIC_COUNTERS_K2},
|
||||
{MAX_QM_GLOBAL_RLS, MAX_QM_GLOBAL_RLS},
|
||||
{PBF_MAX_CMD_LINES, PBF_MAX_CMD_LINES},
|
||||
{BTB_MAX_BLOCKS_BB, BTB_MAX_BLOCKS_K2},
|
||||
};
|
||||
|
||||
u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type)
|
||||
{
|
||||
enum chip_ids chip_id = QED_IS_BB(cdev) ? CHIP_BB : CHIP_K2;
|
||||
|
||||
if (type >= QED_NUM_HSI_DEFS) {
|
||||
DP_ERR(cdev, "Unexpected HSI definition type [%d]\n", type);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return qed_hsi_def_val[type][chip_id];
|
||||
}
|
||||
static int
|
||||
qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
|
||||
{
|
||||
bool b_ah = QED_IS_AH(p_hwfn->cdev);
|
||||
u32 resc_max_val, mcp_resp;
|
||||
u8 res_id;
|
||||
int rc;
|
||||
|
||||
for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
|
||||
switch (res_id) {
|
||||
case QED_LL2_QUEUE:
|
||||
resc_max_val = MAX_NUM_LL2_RX_QUEUES;
|
||||
case QED_LL2_RAM_QUEUE:
|
||||
resc_max_val = MAX_NUM_LL2_RX_RAM_QUEUES;
|
||||
break;
|
||||
case QED_LL2_CTX_QUEUE:
|
||||
resc_max_val = MAX_NUM_LL2_RX_CTX_QUEUES;
|
||||
break;
|
||||
case QED_RDMA_CNQ_RAM:
|
||||
/* No need for a case for QED_CMDQS_CQS since
|
||||
|
@ -3626,8 +3671,8 @@ qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
|
|||
resc_max_val = NUM_OF_GLOBAL_QUEUES;
|
||||
break;
|
||||
case QED_RDMA_STATS_QUEUE:
|
||||
resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2
|
||||
: RDMA_NUM_STATISTIC_COUNTERS_BB;
|
||||
resc_max_val =
|
||||
NUM_OF_RDMA_STATISTIC_COUNTERS(p_hwfn->cdev);
|
||||
break;
|
||||
case QED_BDQ:
|
||||
resc_max_val = BDQ_NUM_RESOURCES;
|
||||
|
@ -3660,28 +3705,24 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
|
|||
u32 *p_resc_num, u32 *p_resc_start)
|
||||
{
|
||||
u8 num_funcs = p_hwfn->num_funcs_on_engine;
|
||||
bool b_ah = QED_IS_AH(p_hwfn->cdev);
|
||||
struct qed_dev *cdev = p_hwfn->cdev;
|
||||
|
||||
switch (res_id) {
|
||||
case QED_L2_QUEUE:
|
||||
*p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
|
||||
MAX_NUM_L2_QUEUES_BB) / num_funcs;
|
||||
*p_resc_num = NUM_OF_L2_QUEUES(cdev) / num_funcs;
|
||||
break;
|
||||
case QED_VPORT:
|
||||
*p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
|
||||
MAX_NUM_VPORTS_BB) / num_funcs;
|
||||
*p_resc_num = NUM_OF_VPORTS(cdev) / num_funcs;
|
||||
break;
|
||||
case QED_RSS_ENG:
|
||||
*p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
|
||||
ETH_RSS_ENGINE_NUM_BB) / num_funcs;
|
||||
*p_resc_num = NUM_OF_RSS_ENGINES(cdev) / num_funcs;
|
||||
break;
|
||||
case QED_PQ:
|
||||
*p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 :
|
||||
MAX_QM_TX_QUEUES_BB) / num_funcs;
|
||||
*p_resc_num = NUM_OF_QM_TX_QUEUES(cdev) / num_funcs;
|
||||
*p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */
|
||||
break;
|
||||
case QED_RL:
|
||||
*p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
|
||||
*p_resc_num = NUM_OF_QM_GLOBAL_RLS(cdev) / num_funcs;
|
||||
break;
|
||||
case QED_MAC:
|
||||
case QED_VLAN:
|
||||
|
@ -3689,11 +3730,13 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
|
|||
*p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
|
||||
break;
|
||||
case QED_ILT:
|
||||
*p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
|
||||
PXP_NUM_ILT_RECORDS_BB) / num_funcs;
|
||||
*p_resc_num = NUM_OF_PXP_ILT_RECORDS(cdev) / num_funcs;
|
||||
break;
|
||||
case QED_LL2_QUEUE:
|
||||
*p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
|
||||
case QED_LL2_RAM_QUEUE:
|
||||
*p_resc_num = MAX_NUM_LL2_RX_RAM_QUEUES / num_funcs;
|
||||
break;
|
||||
case QED_LL2_CTX_QUEUE:
|
||||
*p_resc_num = MAX_NUM_LL2_RX_CTX_QUEUES / num_funcs;
|
||||
break;
|
||||
case QED_RDMA_CNQ_RAM:
|
||||
case QED_CMDQS_CQS:
|
||||
|
@ -3701,8 +3744,7 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
|
|||
*p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs;
|
||||
break;
|
||||
case QED_RDMA_STATS_QUEUE:
|
||||
*p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 :
|
||||
RDMA_NUM_STATISTIC_COUNTERS_BB) / num_funcs;
|
||||
*p_resc_num = NUM_OF_RDMA_STATISTIC_COUNTERS(cdev) / num_funcs;
|
||||
break;
|
||||
case QED_BDQ:
|
||||
if (p_hwfn->hw_info.personality != QED_PCI_ISCSI &&
|
||||
|
@ -5087,11 +5129,11 @@ static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
|
|||
for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
|
||||
u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
|
||||
|
||||
vport_params[i].vport_wfq = (wfq_speed * QED_WFQ_UNIT) /
|
||||
vport_params[i].wfq = (wfq_speed * QED_WFQ_UNIT) /
|
||||
min_pf_rate;
|
||||
qed_init_vport_wfq(p_hwfn, p_ptt,
|
||||
vport_params[i].first_tx_pq_id,
|
||||
vport_params[i].vport_wfq);
|
||||
vport_params[i].wfq);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5102,7 +5144,7 @@ static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn,
|
|||
int i;
|
||||
|
||||
for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
|
||||
p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
|
||||
p_hwfn->qm_info.qm_vport_params[i].wfq = 1;
|
||||
}
|
||||
|
||||
static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
|
||||
|
@ -5118,7 +5160,7 @@ static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
|
|||
qed_init_wfq_default_param(p_hwfn, min_pf_rate);
|
||||
qed_init_vport_wfq(p_hwfn, p_ptt,
|
||||
vport_params[i].first_tx_pq_id,
|
||||
vport_params[i].vport_wfq);
|
||||
vport_params[i].wfq);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -230,30 +230,6 @@ enum qed_dmae_address_type_t {
|
|||
QED_DMAE_ADDRESS_GRC
|
||||
};
|
||||
|
||||
/* value of flags If QED_DMAE_FLAG_RW_REPL_SRC flag is set and the
|
||||
* source is a block of length DMAE_MAX_RW_SIZE and the
|
||||
* destination is larger, the source block will be duplicated as
|
||||
* many times as required to fill the destination block. This is
|
||||
* used mostly to write a zeroed buffer to destination address
|
||||
* using DMA
|
||||
*/
|
||||
#define QED_DMAE_FLAG_RW_REPL_SRC 0x00000001
|
||||
#define QED_DMAE_FLAG_VF_SRC 0x00000002
|
||||
#define QED_DMAE_FLAG_VF_DST 0x00000004
|
||||
#define QED_DMAE_FLAG_COMPLETION_DST 0x00000008
|
||||
#define QED_DMAE_FLAG_PORT 0x00000010
|
||||
#define QED_DMAE_FLAG_PF_SRC 0x00000020
|
||||
#define QED_DMAE_FLAG_PF_DST 0x00000040
|
||||
|
||||
struct qed_dmae_params {
|
||||
u32 flags; /* consists of QED_DMAE_FLAG_* values */
|
||||
u8 src_vfid;
|
||||
u8 dst_vfid;
|
||||
u8 port_id;
|
||||
u8 src_pfid;
|
||||
u8 dst_pfid;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief qed_dmae_host2grc - copy data from source addr to
|
||||
* dmae registers using the given ptt
|
||||
|
|
|
@ -167,6 +167,8 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
|
|||
goto err;
|
||||
}
|
||||
p_cxt = cxt_info.p_cxt;
|
||||
memset(p_cxt, 0, sizeof(*p_cxt));
|
||||
|
||||
SET_FIELD(p_cxt->tstorm_ag_context.flags3,
|
||||
E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -393,7 +393,7 @@ u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid)
|
|||
|
||||
/* DMAE */
|
||||
#define QED_DMAE_FLAGS_IS_SET(params, flag) \
|
||||
((params) != NULL && ((params)->flags & QED_DMAE_FLAG_##flag))
|
||||
((params) != NULL && GET_FIELD((params)->flags, QED_DMAE_PARAMS_##flag))
|
||||
|
||||
static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
|
||||
const u8 is_src_type_grc,
|
||||
|
@ -408,62 +408,55 @@ static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
|
|||
* 0- The source is the PCIe
|
||||
* 1- The source is the GRC.
|
||||
*/
|
||||
opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
|
||||
: DMAE_CMD_SRC_MASK_PCIE) <<
|
||||
DMAE_CMD_SRC_SHIFT;
|
||||
src_pfid = QED_DMAE_FLAGS_IS_SET(p_params, PF_SRC) ?
|
||||
p_params->src_pfid : p_hwfn->rel_pf_id;
|
||||
opcode |= ((src_pfid & DMAE_CMD_SRC_PF_ID_MASK) <<
|
||||
DMAE_CMD_SRC_PF_ID_SHIFT);
|
||||
SET_FIELD(opcode, DMAE_CMD_SRC,
|
||||
(is_src_type_grc ? dmae_cmd_src_grc : dmae_cmd_src_pcie));
|
||||
src_pfid = QED_DMAE_FLAGS_IS_SET(p_params, SRC_PF_VALID) ?
|
||||
p_params->src_pfid : p_hwfn->rel_pf_id;
|
||||
SET_FIELD(opcode, DMAE_CMD_SRC_PF_ID, src_pfid);
|
||||
|
||||
/* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
|
||||
opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
|
||||
: DMAE_CMD_DST_MASK_PCIE) <<
|
||||
DMAE_CMD_DST_SHIFT;
|
||||
dst_pfid = QED_DMAE_FLAGS_IS_SET(p_params, PF_DST) ?
|
||||
p_params->dst_pfid : p_hwfn->rel_pf_id;
|
||||
opcode |= ((dst_pfid & DMAE_CMD_DST_PF_ID_MASK) <<
|
||||
DMAE_CMD_DST_PF_ID_SHIFT);
|
||||
SET_FIELD(opcode, DMAE_CMD_DST,
|
||||
(is_dst_type_grc ? dmae_cmd_dst_grc : dmae_cmd_dst_pcie));
|
||||
dst_pfid = QED_DMAE_FLAGS_IS_SET(p_params, DST_PF_VALID) ?
|
||||
p_params->dst_pfid : p_hwfn->rel_pf_id;
|
||||
SET_FIELD(opcode, DMAE_CMD_DST_PF_ID, dst_pfid);
|
||||
|
||||
|
||||
/* Whether to write a completion word to the completion destination:
|
||||
* 0-Do not write a completion word
|
||||
* 1-Write the completion word
|
||||
*/
|
||||
opcode |= (DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT);
|
||||
opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
|
||||
DMAE_CMD_SRC_ADDR_RESET_SHIFT);
|
||||
SET_FIELD(opcode, DMAE_CMD_COMP_WORD_EN, 1);
|
||||
SET_FIELD(opcode, DMAE_CMD_SRC_ADDR_RESET, 1);
|
||||
|
||||
if (QED_DMAE_FLAGS_IS_SET(p_params, COMPLETION_DST))
|
||||
opcode |= (1 << DMAE_CMD_COMP_FUNC_SHIFT);
|
||||
SET_FIELD(opcode, DMAE_CMD_COMP_FUNC, 1);
|
||||
|
||||
opcode |= (DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT);
|
||||
/* swapping mode 3 - big endian */
|
||||
SET_FIELD(opcode, DMAE_CMD_ENDIANITY_MODE, DMAE_CMD_ENDIANITY);
|
||||
|
||||
port_id = (QED_DMAE_FLAGS_IS_SET(p_params, PORT)) ?
|
||||
p_params->port_id : p_hwfn->port_id;
|
||||
opcode |= (port_id << DMAE_CMD_PORT_ID_SHIFT);
|
||||
port_id = (QED_DMAE_FLAGS_IS_SET(p_params, PORT_VALID)) ?
|
||||
p_params->port_id : p_hwfn->port_id;
|
||||
SET_FIELD(opcode, DMAE_CMD_PORT_ID, port_id);
|
||||
|
||||
/* reset source address in next go */
|
||||
opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
|
||||
DMAE_CMD_SRC_ADDR_RESET_SHIFT);
|
||||
SET_FIELD(opcode, DMAE_CMD_SRC_ADDR_RESET, 1);
|
||||
|
||||
/* reset dest address in next go */
|
||||
opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK <<
|
||||
DMAE_CMD_DST_ADDR_RESET_SHIFT);
|
||||
SET_FIELD(opcode, DMAE_CMD_DST_ADDR_RESET, 1);
|
||||
|
||||
/* SRC/DST VFID: all 1's - pf, otherwise VF id */
|
||||
if (QED_DMAE_FLAGS_IS_SET(p_params, VF_SRC)) {
|
||||
opcode |= 1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT;
|
||||
opcode_b |= p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT;
|
||||
if (QED_DMAE_FLAGS_IS_SET(p_params, SRC_VF_VALID)) {
|
||||
SET_FIELD(opcode, DMAE_CMD_SRC_VF_ID_VALID, 1);
|
||||
SET_FIELD(opcode_b, DMAE_CMD_SRC_VF_ID, p_params->src_vfid);
|
||||
} else {
|
||||
opcode_b |= DMAE_CMD_SRC_VF_ID_MASK <<
|
||||
DMAE_CMD_SRC_VF_ID_SHIFT;
|
||||
SET_FIELD(opcode_b, DMAE_CMD_SRC_VF_ID, 0xFF);
|
||||
}
|
||||
|
||||
if (QED_DMAE_FLAGS_IS_SET(p_params, VF_DST)) {
|
||||
opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
|
||||
opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
|
||||
if (QED_DMAE_FLAGS_IS_SET(p_params, DST_VF_VALID)) {
|
||||
SET_FIELD(opcode, DMAE_CMD_DST_VF_ID_VALID, 1);
|
||||
SET_FIELD(opcode_b, DMAE_CMD_DST_VF_ID, p_params->dst_vfid);
|
||||
} else {
|
||||
opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT;
|
||||
SET_FIELD(opcode_b, DMAE_CMD_DST_VF_ID, 0xFF);
|
||||
}
|
||||
|
||||
p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode);
|
||||
|
|
|
@ -44,9 +44,9 @@
|
|||
#define CDU_VALIDATION_DEFAULT_CFG 61
|
||||
|
||||
static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = {
|
||||
{400, 336, 352, 304, 304, 384, 416, 352}, /* region 3 offsets */
|
||||
{528, 496, 416, 448, 448, 512, 544, 480}, /* region 4 offsets */
|
||||
{608, 544, 496, 512, 576, 592, 624, 560} /* region 5 offsets */
|
||||
{400, 336, 352, 368, 304, 384, 416, 352}, /* region 3 offsets */
|
||||
{528, 496, 416, 512, 448, 512, 544, 480}, /* region 4 offsets */
|
||||
{608, 544, 496, 576, 576, 592, 624, 560} /* region 5 offsets */
|
||||
};
|
||||
|
||||
static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
|
||||
|
@ -61,6 +61,9 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
|
|||
0x100) - 1 : 0)
|
||||
#define QM_INVALID_PQ_ID 0xffff
|
||||
|
||||
/* Max link speed (in Mbps) */
|
||||
#define QM_MAX_LINK_SPEED 100000
|
||||
|
||||
/* Feature enable */
|
||||
#define QM_BYPASS_EN 1
|
||||
#define QM_BYTE_CRD_EN 1
|
||||
|
@ -128,8 +131,6 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
|
|||
/* Pure LB CmdQ lines (+spare) */
|
||||
#define PBF_CMDQ_PURE_LB_LINES 150
|
||||
|
||||
#define PBF_CMDQ_LINES_E5_RSVD_RATIO 8
|
||||
|
||||
#define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
|
||||
(PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
|
||||
(ext_voq) * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
|
||||
|
@ -140,6 +141,9 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
|
|||
(ext_voq) * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
|
||||
PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
|
||||
|
||||
/* Returns the VOQ line credit for the specified number of PBF command lines.
|
||||
* PBF lines are specified in 256b units.
|
||||
*/
|
||||
#define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
|
||||
((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
|
||||
|
||||
|
@ -178,14 +182,14 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
|
|||
cmd ## _ ## field, \
|
||||
value)
|
||||
|
||||
#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, rl_valid, vp_pq_id, rl_id, \
|
||||
#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, vp_pq_id, rl_valid, rl_id, \
|
||||
ext_voq, wrr) \
|
||||
do { \
|
||||
typeof(map) __map; \
|
||||
memset(&__map, 0, sizeof(__map)); \
|
||||
SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _PQ_VALID, 1); \
|
||||
SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_VALID, \
|
||||
rl_valid); \
|
||||
rl_valid ? 1 : 0);\
|
||||
SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _VP_PQ_ID, \
|
||||
vp_pq_id); \
|
||||
SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_ID, rl_id); \
|
||||
|
@ -200,9 +204,12 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
|
|||
#define WRITE_PQ_INFO_TO_RAM 1
|
||||
#define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \
|
||||
(((vp) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | \
|
||||
((rl_valid) << 22) | ((rl) << 24))
|
||||
((rl_valid ? 1 : 0) << 22) | (((rl) & 255) << 24) | \
|
||||
(((rl) >> 8) << 9))
|
||||
|
||||
#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
|
||||
(XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4)
|
||||
XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \
|
||||
XSTORM_PQ_INFO_OFFSET(pq_id)
|
||||
|
||||
/******************** INTERNAL IMPLEMENTATION *********************/
|
||||
|
||||
|
@ -228,9 +235,6 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
|
|||
STORE_RT_REG(p_hwfn,
|
||||
QM_REG_RLPFVOQENABLE_RT_OFFSET,
|
||||
(u32)voq_bit_mask);
|
||||
if (num_ext_voqs >= 32)
|
||||
STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET,
|
||||
(u32)(voq_bit_mask >> 32));
|
||||
|
||||
/* Write RL period */
|
||||
STORE_RT_REG(p_hwfn,
|
||||
|
@ -259,12 +263,12 @@ static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en)
|
|||
QM_WFQ_UPPER_BOUND);
|
||||
}
|
||||
|
||||
/* Prepare VPORT RL enable/disable runtime init values */
|
||||
static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, bool vport_rl_en)
|
||||
/* Prepare global RL enable/disable runtime init values */
|
||||
static void qed_enable_global_rl(struct qed_hwfn *p_hwfn, bool global_rl_en)
|
||||
{
|
||||
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
|
||||
vport_rl_en ? 1 : 0);
|
||||
if (vport_rl_en) {
|
||||
global_rl_en ? 1 : 0);
|
||||
if (global_rl_en) {
|
||||
/* Write RL period (use timer 0 only) */
|
||||
STORE_RT_REG(p_hwfn,
|
||||
QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
|
||||
|
@ -331,8 +335,7 @@ static void qed_cmdq_lines_rt_init(
|
|||
continue;
|
||||
|
||||
/* Find number of command queue lines to divide between the
|
||||
* active physical TCs. In E5, 1/8 of the lines are reserved.
|
||||
* the lines for pure LB TC are subtracted.
|
||||
* active physical TCs.
|
||||
*/
|
||||
phys_lines = port_params[port_id].num_pbf_cmd_lines;
|
||||
phys_lines -= PBF_CMDQ_PURE_LB_LINES;
|
||||
|
@ -361,11 +364,30 @@ static void qed_cmdq_lines_rt_init(
|
|||
ext_voq = qed_get_ext_voq(p_hwfn,
|
||||
port_id,
|
||||
PURE_LB_TC, max_phys_tcs_per_port);
|
||||
qed_cmdq_lines_voq_rt_init(p_hwfn,
|
||||
ext_voq, PBF_CMDQ_PURE_LB_LINES);
|
||||
qed_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
|
||||
PBF_CMDQ_PURE_LB_LINES);
|
||||
}
|
||||
}
|
||||
|
||||
/* Prepare runtime init values to allocate guaranteed BTB blocks for the
|
||||
* specified port. The guaranteed BTB space is divided between the TCs as
|
||||
* follows (shared space Is currently not used):
|
||||
* 1. Parameters:
|
||||
* B - BTB blocks for this port
|
||||
* C - Number of physical TCs for this port
|
||||
* 2. Calculation:
|
||||
* a. 38 blocks (9700B jumbo frame) are allocated for global per port
|
||||
* headroom.
|
||||
* b. B = B - 38 (remainder after global headroom allocation).
|
||||
* c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
|
||||
* d. B = B - MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
|
||||
* e. B/C blocks are allocated for each physical TC.
|
||||
* Assumptions:
|
||||
* - MTU is up to 9700 bytes (38 blocks)
|
||||
* - All TCs are considered symmetrical (same rate and packet size)
|
||||
* - No optimization for lossy TC (all are considered lossless). Shared space
|
||||
* is not enabled and allocated for each TC.
|
||||
*/
|
||||
static void qed_btb_blocks_rt_init(
|
||||
struct qed_hwfn *p_hwfn,
|
||||
u8 max_ports_per_engine,
|
||||
|
@ -424,6 +446,34 @@ static void qed_btb_blocks_rt_init(
|
|||
}
|
||||
}
|
||||
|
||||
/* Prepare runtime init values for the specified RL.
|
||||
* Set max link speed (100Gbps) per rate limiter.
|
||||
* Return -1 on error.
|
||||
*/
|
||||
static int qed_global_rl_rt_init(struct qed_hwfn *p_hwfn)
|
||||
{
|
||||
u32 upper_bound = QM_VP_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) |
|
||||
(u32)QM_RL_CRD_REG_SIGN_BIT;
|
||||
u32 inc_val;
|
||||
u16 rl_id;
|
||||
|
||||
/* Go over all global RLs */
|
||||
for (rl_id = 0; rl_id < MAX_QM_GLOBAL_RLS; rl_id++) {
|
||||
inc_val = QM_RL_INC_VAL(QM_MAX_LINK_SPEED);
|
||||
|
||||
STORE_RT_REG(p_hwfn,
|
||||
QM_REG_RLGLBLCRD_RT_OFFSET + rl_id,
|
||||
(u32)QM_RL_CRD_REG_SIGN_BIT);
|
||||
STORE_RT_REG(p_hwfn,
|
||||
QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id,
|
||||
upper_bound);
|
||||
STORE_RT_REG(p_hwfn,
|
||||
QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id, inc_val);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Prepare Tx PQ mapping runtime init values for the specified PF */
|
||||
static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt,
|
||||
|
@ -460,18 +510,17 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
|
|||
|
||||
/* Go over all Tx PQs */
|
||||
for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
|
||||
u8 ext_voq, vport_id_in_pf, tc_id = pq_params[i].tc_id;
|
||||
u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
|
||||
u16 *p_first_tx_pq_id, vport_id_in_pf;
|
||||
struct qm_rf_pq_map_e4 tx_pq_map;
|
||||
bool is_vf_pq, rl_valid;
|
||||
u16 *p_first_tx_pq_id;
|
||||
u8 tc_id = pq_params[i].tc_id;
|
||||
bool is_vf_pq;
|
||||
u8 ext_voq;
|
||||
|
||||
ext_voq = qed_get_ext_voq(p_hwfn,
|
||||
pq_params[i].port_id,
|
||||
tc_id,
|
||||
p_params->max_phys_tcs_per_port);
|
||||
is_vf_pq = (i >= p_params->num_pf_pqs);
|
||||
rl_valid = pq_params[i].rl_valid > 0;
|
||||
|
||||
/* Update first Tx PQ of VPORT/TC */
|
||||
vport_id_in_pf = pq_params[i].vport_id - p_params->start_vport;
|
||||
|
@ -492,21 +541,14 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
|
|||
map_val);
|
||||
}
|
||||
|
||||
/* Check RL ID */
|
||||
if (rl_valid && pq_params[i].vport_id >= max_qm_global_rls) {
|
||||
DP_NOTICE(p_hwfn,
|
||||
"Invalid VPORT ID for rate limiter configuration\n");
|
||||
rl_valid = false;
|
||||
}
|
||||
|
||||
/* Prepare PQ map entry */
|
||||
QM_INIT_TX_PQ_MAP(p_hwfn,
|
||||
tx_pq_map,
|
||||
E4,
|
||||
pq_id,
|
||||
rl_valid ? 1 : 0,
|
||||
*p_first_tx_pq_id,
|
||||
rl_valid ? pq_params[i].vport_id : 0,
|
||||
pq_params[i].rl_valid,
|
||||
pq_params[i].rl_id,
|
||||
ext_voq, pq_params[i].wrr_group);
|
||||
|
||||
/* Set PQ base address */
|
||||
|
@ -529,9 +571,8 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
|
|||
p_params->pf_id,
|
||||
tc_id,
|
||||
pq_params[i].port_id,
|
||||
rl_valid ? 1 : 0,
|
||||
rl_valid ?
|
||||
pq_params[i].vport_id : 0);
|
||||
pq_params[i].rl_valid,
|
||||
pq_params[i].rl_id);
|
||||
qed_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
|
||||
pq_info);
|
||||
}
|
||||
|
@ -669,19 +710,19 @@ static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
|
|||
* Return -1 on error.
|
||||
*/
|
||||
static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
|
||||
u8 num_vports,
|
||||
u16 num_vports,
|
||||
struct init_qm_vport_params *vport_params)
|
||||
{
|
||||
u16 vport_pq_id;
|
||||
u16 vport_pq_id, i;
|
||||
u32 inc_val;
|
||||
u8 tc, i;
|
||||
u8 tc;
|
||||
|
||||
/* Go over all PF VPORTs */
|
||||
for (i = 0; i < num_vports; i++) {
|
||||
if (!vport_params[i].vport_wfq)
|
||||
if (!vport_params[i].wfq)
|
||||
continue;
|
||||
|
||||
inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
|
||||
inc_val = QM_WFQ_INC_VAL(vport_params[i].wfq);
|
||||
if (inc_val > QM_WFQ_MAX_INC_VAL) {
|
||||
DP_NOTICE(p_hwfn,
|
||||
"Invalid VPORT WFQ weight configuration\n");
|
||||
|
@ -706,48 +747,6 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Prepare VPORT RL runtime init values for the specified VPORTs.
|
||||
* Return -1 on error.
|
||||
*/
|
||||
static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
|
||||
u8 start_vport,
|
||||
u8 num_vports,
|
||||
u32 link_speed,
|
||||
struct init_qm_vport_params *vport_params)
|
||||
{
|
||||
u8 i, vport_id;
|
||||
u32 inc_val;
|
||||
|
||||
if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
|
||||
DP_NOTICE(p_hwfn,
|
||||
"Invalid VPORT ID for rate limiter configuration\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Go over all PF VPORTs */
|
||||
for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
|
||||
inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl ?
|
||||
vport_params[i].vport_rl :
|
||||
link_speed);
|
||||
if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
|
||||
DP_NOTICE(p_hwfn,
|
||||
"Invalid VPORT rate-limit configuration\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
|
||||
(u32)QM_RL_CRD_REG_SIGN_BIT);
|
||||
STORE_RT_REG(p_hwfn,
|
||||
QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
|
||||
QM_VP_RL_UPPER_BOUND(link_speed) |
|
||||
(u32)QM_RL_CRD_REG_SIGN_BIT);
|
||||
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
|
||||
inc_val);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt)
|
||||
{
|
||||
|
@ -799,23 +798,20 @@ u32 qed_qm_pf_mem_size(u32 num_pf_cids,
|
|||
int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
|
||||
struct qed_qm_common_rt_init_params *p_params)
|
||||
{
|
||||
/* Init AFullOprtnstcCrdMask */
|
||||
u32 mask = (QM_OPPOR_LINE_VOQ_DEF <<
|
||||
QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
|
||||
(QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
|
||||
(p_params->pf_wfq_en <<
|
||||
QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
|
||||
(p_params->vport_wfq_en <<
|
||||
QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
|
||||
(p_params->pf_rl_en <<
|
||||
QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
|
||||
(p_params->vport_rl_en <<
|
||||
QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
|
||||
(QM_OPPOR_FW_STOP_DEF <<
|
||||
QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
|
||||
(QM_OPPOR_PQ_EMPTY_DEF <<
|
||||
QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
|
||||
u32 mask = 0;
|
||||
|
||||
/* Init AFullOprtnstcCrdMask */
|
||||
SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_LINEVOQ,
|
||||
QM_OPPOR_LINE_VOQ_DEF);
|
||||
SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ, QM_BYTE_CRD_EN);
|
||||
SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFWFQ, p_params->pf_wfq_en);
|
||||
SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPWFQ, p_params->vport_wfq_en);
|
||||
SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFRL, p_params->pf_rl_en);
|
||||
SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPQCNRL,
|
||||
p_params->global_rl_en);
|
||||
SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_FWPAUSE, QM_OPPOR_FW_STOP_DEF);
|
||||
SET_FIELD(mask,
|
||||
QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY, QM_OPPOR_PQ_EMPTY_DEF);
|
||||
STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
|
||||
|
||||
/* Enable/disable PF RL */
|
||||
|
@ -824,8 +820,8 @@ int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
|
|||
/* Enable/disable PF WFQ */
|
||||
qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en);
|
||||
|
||||
/* Enable/disable VPORT RL */
|
||||
qed_enable_vport_rl(p_hwfn, p_params->vport_rl_en);
|
||||
/* Enable/disable global RL */
|
||||
qed_enable_global_rl(p_hwfn, p_params->global_rl_en);
|
||||
|
||||
/* Enable/disable VPORT WFQ */
|
||||
qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en);
|
||||
|
@ -842,6 +838,8 @@ int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
|
|||
p_params->max_phys_tcs_per_port,
|
||||
p_params->port_params);
|
||||
|
||||
qed_global_rl_rt_init(p_hwfn);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -853,7 +851,9 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
|
|||
u32 other_mem_size_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids +
|
||||
p_params->num_tids) *
|
||||
QM_OTHER_PQS_PER_PF;
|
||||
u8 tc, i;
|
||||
u16 i;
|
||||
u8 tc;
|
||||
|
||||
|
||||
/* Clear first Tx PQ ID array for each VPORT */
|
||||
for (i = 0; i < p_params->num_vports; i++)
|
||||
|
@ -878,16 +878,10 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
|
|||
if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
|
||||
return -1;
|
||||
|
||||
/* Set VPORT WFQ */
|
||||
/* Init VPORT WFQ */
|
||||
if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params))
|
||||
return -1;
|
||||
|
||||
/* Set VPORT RL */
|
||||
if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport,
|
||||
p_params->num_vports, p_params->link_speed,
|
||||
vport_params))
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -925,18 +919,19 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
|
|||
|
||||
int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt,
|
||||
u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
|
||||
u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq)
|
||||
{
|
||||
u16 vport_pq_id;
|
||||
u32 inc_val;
|
||||
u8 tc;
|
||||
|
||||
inc_val = QM_WFQ_INC_VAL(vport_wfq);
|
||||
inc_val = QM_WFQ_INC_VAL(wfq);
|
||||
if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
|
||||
DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration\n");
|
||||
DP_NOTICE(p_hwfn, "Invalid VPORT WFQ configuration.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* A VPORT can have several VPORT PQ IDs for various TCs */
|
||||
for (tc = 0; tc < NUM_OF_TCS; tc++) {
|
||||
vport_pq_id = first_tx_pq_id[tc];
|
||||
if (vport_pq_id != QM_INVALID_PQ_ID)
|
||||
|
@ -948,28 +943,20 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt,
|
||||
u8 vport_id, u32 vport_rl, u32 link_speed)
|
||||
int qed_init_global_rl(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit)
|
||||
{
|
||||
u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
|
||||
u32 inc_val;
|
||||
|
||||
if (vport_id >= max_qm_global_rls) {
|
||||
DP_NOTICE(p_hwfn,
|
||||
"Invalid VPORT ID for rate limiter configuration\n");
|
||||
inc_val = QM_RL_INC_VAL(rate_limit);
|
||||
if (inc_val > QM_VP_RL_MAX_INC_VAL(rate_limit)) {
|
||||
DP_NOTICE(p_hwfn, "Invalid rate limit configuration.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed);
|
||||
if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
|
||||
DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
qed_wr(p_hwfn,
|
||||
p_ptt,
|
||||
QM_REG_RLGLBLCRD + vport_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
|
||||
qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
|
||||
qed_wr(p_hwfn, p_ptt,
|
||||
QM_REG_RLGLBLCRD + rl_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
|
||||
qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + rl_id * 4, inc_val);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1013,7 +1000,6 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
|
|||
return true;
|
||||
}
|
||||
|
||||
|
||||
#define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
|
||||
do { \
|
||||
typeof(var) *__p_var = &(var); \
|
||||
|
@ -1021,8 +1007,59 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
|
|||
*__p_var = (*__p_var & ~BIT(__offset)) | \
|
||||
((enable) ? BIT(__offset) : 0); \
|
||||
} while (0)
|
||||
#define PRS_ETH_TUNN_OUTPUT_FORMAT -188897008
|
||||
#define PRS_ETH_OUTPUT_FORMAT -46832
|
||||
|
||||
#define PRS_ETH_TUNN_OUTPUT_FORMAT 0xF4DAB910
|
||||
#define PRS_ETH_OUTPUT_FORMAT 0xFFFF4910
|
||||
|
||||
#define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
|
||||
do { \
|
||||
u32 i; \
|
||||
\
|
||||
for (i = 0; i < (arr_size); i++) \
|
||||
qed_wr(dev, ptt, \
|
||||
((addr) + (4 * i)), \
|
||||
((u32 *)&(arr))[i]); \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
* @brief qed_dmae_to_grc - is an internal function - writes from host to
|
||||
* wide-bus registers (split registers are not supported yet)
|
||||
*
|
||||
* @param p_hwfn - HW device data
|
||||
* @param p_ptt - ptt window used for writing the registers.
|
||||
* @param p_data - pointer to source data.
|
||||
* @param addr - Destination register address.
|
||||
* @param len_in_dwords - data length in DWARDS (u32)
|
||||
*/
|
||||
static int qed_dmae_to_grc(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt,
|
||||
u32 *p_data, u32 addr, u32 len_in_dwords)
|
||||
{
|
||||
struct qed_dmae_params params = {};
|
||||
int rc;
|
||||
|
||||
if (!p_data)
|
||||
return -1;
|
||||
|
||||
/* Set DMAE params */
|
||||
SET_FIELD(params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 1);
|
||||
|
||||
/* Execute DMAE command */
|
||||
rc = qed_dmae_host2grc(p_hwfn, p_ptt,
|
||||
(u64)(uintptr_t)(p_data),
|
||||
addr, len_in_dwords, ¶ms);
|
||||
|
||||
/* If not read using DMAE, read using GRC */
|
||||
if (rc) {
|
||||
DP_VERBOSE(p_hwfn,
|
||||
QED_MSG_DEBUG,
|
||||
"Failed writing to chip using DMAE, using GRC instead\n");
|
||||
/* write to registers using GRC */
|
||||
ARR_REG_WR(p_hwfn, p_ptt, addr, p_data, len_in_dwords);
|
||||
}
|
||||
|
||||
return len_in_dwords;
|
||||
}
|
||||
|
||||
void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt, u16 dest_port)
|
||||
|
@ -1166,8 +1203,8 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
|
|||
ip_geneve_enable ? 1 : 0);
|
||||
}
|
||||
|
||||
#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 4
|
||||
#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -927094512
|
||||
#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 3
|
||||
#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -925189872
|
||||
|
||||
void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt, bool enable)
|
||||
|
@ -1208,6 +1245,8 @@ void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
|
|||
|
||||
void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id)
|
||||
{
|
||||
struct regpair ram_line = { };
|
||||
|
||||
/* Disable gft search for PF */
|
||||
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
|
||||
|
||||
|
@ -1217,12 +1256,9 @@ void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id)
|
|||
qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
|
||||
|
||||
/* Zero ramline */
|
||||
qed_wr(p_hwfn,
|
||||
p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, 0);
|
||||
qed_wr(p_hwfn,
|
||||
p_ptt,
|
||||
PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + REG_SIZE,
|
||||
0);
|
||||
qed_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line,
|
||||
PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
|
||||
sizeof(ram_line) / REG_SIZE);
|
||||
}
|
||||
|
||||
void qed_gft_config(struct qed_hwfn *p_hwfn,
|
||||
|
@ -1232,7 +1268,8 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
|
|||
bool udp,
|
||||
bool ipv4, bool ipv6, enum gft_profile_type profile_type)
|
||||
{
|
||||
u32 reg_val, cam_line, ram_line_lo, ram_line_hi, search_non_ip_as_gft;
|
||||
u32 reg_val, cam_line, search_non_ip_as_gft;
|
||||
struct regpair ram_line = { };
|
||||
|
||||
if (!ipv6 && !ipv4)
|
||||
DP_NOTICE(p_hwfn,
|
||||
|
@ -1298,35 +1335,33 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
|
|||
qed_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
|
||||
|
||||
/* Write line to RAM - compare to filter 4 tuple */
|
||||
ram_line_lo = 0;
|
||||
ram_line_hi = 0;
|
||||
|
||||
/* Search no IP as GFT */
|
||||
search_non_ip_as_gft = 0;
|
||||
|
||||
/* Tunnel type */
|
||||
SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
|
||||
SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
|
||||
SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
|
||||
SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
|
||||
|
||||
if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
|
||||
SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
|
||||
SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
|
||||
SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
|
||||
SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
|
||||
SET_FIELD(ram_line_lo, GFT_RAM_LINE_SRC_PORT, 1);
|
||||
SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
|
||||
SET_FIELD(ram_line.hi, GFT_RAM_LINE_DST_IP, 1);
|
||||
SET_FIELD(ram_line.hi, GFT_RAM_LINE_SRC_IP, 1);
|
||||
SET_FIELD(ram_line.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
|
||||
SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
|
||||
SET_FIELD(ram_line.lo, GFT_RAM_LINE_SRC_PORT, 1);
|
||||
SET_FIELD(ram_line.lo, GFT_RAM_LINE_DST_PORT, 1);
|
||||
} else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) {
|
||||
SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
|
||||
SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
|
||||
SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
|
||||
SET_FIELD(ram_line.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
|
||||
SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
|
||||
SET_FIELD(ram_line.lo, GFT_RAM_LINE_DST_PORT, 1);
|
||||
} else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) {
|
||||
SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
|
||||
SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
|
||||
SET_FIELD(ram_line.hi, GFT_RAM_LINE_DST_IP, 1);
|
||||
SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
|
||||
} else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) {
|
||||
SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
|
||||
SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
|
||||
SET_FIELD(ram_line.hi, GFT_RAM_LINE_SRC_IP, 1);
|
||||
SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
|
||||
} else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) {
|
||||
SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
|
||||
SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
|
||||
|
||||
/* Allow tunneled traffic without inner IP */
|
||||
search_non_ip_as_gft = 1;
|
||||
|
@ -1334,24 +1369,17 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
|
|||
|
||||
qed_wr(p_hwfn,
|
||||
p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT, search_non_ip_as_gft);
|
||||
qed_wr(p_hwfn,
|
||||
p_ptt,
|
||||
PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
|
||||
ram_line_lo);
|
||||
qed_wr(p_hwfn,
|
||||
p_ptt,
|
||||
PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + REG_SIZE,
|
||||
ram_line_hi);
|
||||
qed_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line,
|
||||
PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
|
||||
sizeof(ram_line) / REG_SIZE);
|
||||
|
||||
/* Set default profile so that no filter match will happen */
|
||||
qed_wr(p_hwfn,
|
||||
p_ptt,
|
||||
PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
|
||||
PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff);
|
||||
qed_wr(p_hwfn,
|
||||
p_ptt,
|
||||
PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
|
||||
PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff);
|
||||
ram_line.lo = 0xffffffff;
|
||||
ram_line.hi = 0x3ff;
|
||||
qed_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line,
|
||||
PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
|
||||
PRS_GFT_CAM_LINES_NO_MATCH,
|
||||
sizeof(ram_line) / REG_SIZE);
|
||||
|
||||
/* Enable gft search */
|
||||
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
|
||||
|
@ -1544,3 +1572,144 @@ void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn,
|
|||
qed_wr(p_hwfn, p_ptt, ram_addr, assert_level[storm_id]);
|
||||
}
|
||||
}
|
||||
|
||||
#define PHYS_ADDR_DWORDS DIV_ROUND_UP(sizeof(dma_addr_t), 4)
|
||||
#define OVERLAY_HDR_SIZE_DWORDS (sizeof(struct fw_overlay_buf_hdr) / 4)
|
||||
|
||||
static u32 qed_get_overlay_addr_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id)
|
||||
{
|
||||
switch (storm_id) {
|
||||
case 0:
|
||||
return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
|
||||
TSTORM_OVERLAY_BUF_ADDR_OFFSET;
|
||||
case 1:
|
||||
return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
|
||||
MSTORM_OVERLAY_BUF_ADDR_OFFSET;
|
||||
case 2:
|
||||
return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
|
||||
USTORM_OVERLAY_BUF_ADDR_OFFSET;
|
||||
case 3:
|
||||
return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
|
||||
XSTORM_OVERLAY_BUF_ADDR_OFFSET;
|
||||
case 4:
|
||||
return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
|
||||
YSTORM_OVERLAY_BUF_ADDR_OFFSET;
|
||||
case 5:
|
||||
return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
|
||||
PSTORM_OVERLAY_BUF_ADDR_OFFSET;
|
||||
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
struct phys_mem_desc *qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn,
|
||||
const u32 * const
|
||||
fw_overlay_in_buf,
|
||||
u32 buf_size_in_bytes)
|
||||
{
|
||||
u32 buf_size = buf_size_in_bytes / sizeof(u32), buf_offset = 0;
|
||||
struct phys_mem_desc *allocated_mem;
|
||||
|
||||
if (!buf_size)
|
||||
return NULL;
|
||||
|
||||
allocated_mem = kcalloc(NUM_STORMS, sizeof(struct phys_mem_desc),
|
||||
GFP_KERNEL);
|
||||
if (!allocated_mem)
|
||||
return NULL;
|
||||
|
||||
memset(allocated_mem, 0, NUM_STORMS * sizeof(struct phys_mem_desc));
|
||||
|
||||
/* For each Storm, set physical address in RAM */
|
||||
while (buf_offset < buf_size) {
|
||||
struct phys_mem_desc *storm_mem_desc;
|
||||
struct fw_overlay_buf_hdr *hdr;
|
||||
u32 storm_buf_size;
|
||||
u8 storm_id;
|
||||
|
||||
hdr =
|
||||
(struct fw_overlay_buf_hdr *)&fw_overlay_in_buf[buf_offset];
|
||||
storm_buf_size = GET_FIELD(hdr->data,
|
||||
FW_OVERLAY_BUF_HDR_BUF_SIZE);
|
||||
storm_id = GET_FIELD(hdr->data, FW_OVERLAY_BUF_HDR_STORM_ID);
|
||||
storm_mem_desc = allocated_mem + storm_id;
|
||||
storm_mem_desc->size = storm_buf_size * sizeof(u32);
|
||||
|
||||
/* Allocate physical memory for Storm's overlays buffer */
|
||||
storm_mem_desc->virt_addr =
|
||||
dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
|
||||
storm_mem_desc->size,
|
||||
&storm_mem_desc->phys_addr, GFP_KERNEL);
|
||||
if (!storm_mem_desc->virt_addr)
|
||||
break;
|
||||
|
||||
/* Skip overlays buffer header */
|
||||
buf_offset += OVERLAY_HDR_SIZE_DWORDS;
|
||||
|
||||
/* Copy Storm's overlays buffer to allocated memory */
|
||||
memcpy(storm_mem_desc->virt_addr,
|
||||
&fw_overlay_in_buf[buf_offset], storm_mem_desc->size);
|
||||
|
||||
/* Advance to next Storm */
|
||||
buf_offset += storm_buf_size;
|
||||
}
|
||||
|
||||
/* If memory allocation has failed, free all allocated memory */
|
||||
if (buf_offset < buf_size) {
|
||||
qed_fw_overlay_mem_free(p_hwfn, allocated_mem);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return allocated_mem;
|
||||
}
|
||||
|
||||
void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt,
|
||||
struct phys_mem_desc *fw_overlay_mem)
|
||||
{
|
||||
u8 storm_id;
|
||||
|
||||
for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
|
||||
struct phys_mem_desc *storm_mem_desc =
|
||||
(struct phys_mem_desc *)fw_overlay_mem + storm_id;
|
||||
u32 ram_addr, i;
|
||||
|
||||
/* Skip Storms with no FW overlays */
|
||||
if (!storm_mem_desc->virt_addr)
|
||||
continue;
|
||||
|
||||
/* Calculate overlay RAM GRC address of current PF */
|
||||
ram_addr = qed_get_overlay_addr_ram_addr(p_hwfn, storm_id) +
|
||||
sizeof(dma_addr_t) * p_hwfn->rel_pf_id;
|
||||
|
||||
/* Write Storm's overlay physical address to RAM */
|
||||
for (i = 0; i < PHYS_ADDR_DWORDS; i++, ram_addr += sizeof(u32))
|
||||
qed_wr(p_hwfn, p_ptt, ram_addr,
|
||||
((u32 *)&storm_mem_desc->phys_addr)[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
|
||||
struct phys_mem_desc *fw_overlay_mem)
|
||||
{
|
||||
u8 storm_id;
|
||||
|
||||
if (!fw_overlay_mem)
|
||||
return;
|
||||
|
||||
for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
|
||||
struct phys_mem_desc *storm_mem_desc =
|
||||
(struct phys_mem_desc *)fw_overlay_mem + storm_id;
|
||||
|
||||
/* Free Storm's physical memory */
|
||||
if (storm_mem_desc->virt_addr)
|
||||
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
|
||||
storm_mem_desc->size,
|
||||
storm_mem_desc->virt_addr,
|
||||
storm_mem_desc->phys_addr);
|
||||
}
|
||||
|
||||
/* Free allocated virtual memory */
|
||||
kfree(fw_overlay_mem);
|
||||
}
|
||||
|
|
|
@ -54,15 +54,15 @@ static u32 pxp_global_win[] = {
|
|||
0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
|
||||
0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
|
||||
0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
|
||||
0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */
|
||||
0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */
|
||||
0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */
|
||||
0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */
|
||||
0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */
|
||||
0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0x1d02, /* win 6: addr=0x1d02000, size=4096 bytes */
|
||||
0x1d80, /* win 7: addr=0x1d80000, size=4096 bytes */
|
||||
0x1d81, /* win 8: addr=0x1d81000, size=4096 bytes */
|
||||
0x1d82, /* win 9: addr=0x1d82000, size=4096 bytes */
|
||||
0x1e00, /* win 10: addr=0x1e00000, size=4096 bytes */
|
||||
0x1e01, /* win 11: addr=0x1e01000, size=4096 bytes */
|
||||
0x1e80, /* win 12: addr=0x1e80000, size=4096 bytes */
|
||||
0x1f00, /* win 13: addr=0x1f00000, size=4096 bytes */
|
||||
0x1c08, /* win 14: addr=0x1c08000, size=4096 bytes */
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
|
@ -74,15 +74,6 @@ void qed_init_iro_array(struct qed_dev *cdev)
|
|||
cdev->iro_arr = iro_arr;
|
||||
}
|
||||
|
||||
/* Runtime configuration helpers */
|
||||
void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
|
||||
p_hwfn->rt_data.b_valid[i] = false;
|
||||
}
|
||||
|
||||
void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val)
|
||||
{
|
||||
p_hwfn->rt_data.init_val[rt_offset] = val;
|
||||
|
@ -106,7 +97,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn,
|
|||
{
|
||||
u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
|
||||
bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
|
||||
u16 i, segment;
|
||||
u16 i, j, segment;
|
||||
int rc = 0;
|
||||
|
||||
/* Since not all RT entries are initialized, go over the RT and
|
||||
|
@ -121,6 +112,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn,
|
|||
*/
|
||||
if (!b_must_dmae) {
|
||||
qed_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]);
|
||||
p_valid[i] = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -135,6 +127,10 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn,
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* invalidate after writing */
|
||||
for (j = i; j < i + segment; j++)
|
||||
p_valid[j] = false;
|
||||
|
||||
/* Jump over the entire segment, including invalid entry */
|
||||
i += segment;
|
||||
}
|
||||
|
@ -215,7 +211,7 @@ static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
|
|||
* 3. p_hwfb->temp_data,
|
||||
* 4. fill_count
|
||||
*/
|
||||
params.flags = QED_DMAE_FLAG_RW_REPL_SRC;
|
||||
SET_FIELD(params.flags, QED_DMAE_PARAMS_RW_REPL_SRC, 0x1);
|
||||
return qed_dmae_host2grc(p_hwfn, p_ptt,
|
||||
(uintptr_t)(&zero_buffer[0]),
|
||||
addr, fill_count, ¶ms);
|
||||
|
@ -490,10 +486,10 @@ static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
|
|||
int qed_init_run(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt, int phase, int phase_id, int modes)
|
||||
{
|
||||
bool b_dmae = (phase != PHASE_ENGINE);
|
||||
struct qed_dev *cdev = p_hwfn->cdev;
|
||||
u32 cmd_num, num_init_ops;
|
||||
union init_op *init_ops;
|
||||
bool b_dmae = false;
|
||||
int rc = 0;
|
||||
|
||||
num_init_ops = cdev->fw_data->init_ops_size;
|
||||
|
@ -522,7 +518,6 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
|
|||
case INIT_OP_IF_PHASE:
|
||||
cmd_num += qed_init_cmd_phase(p_hwfn, &cmd->if_phase,
|
||||
phase, phase_id);
|
||||
b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
|
||||
break;
|
||||
case INIT_OP_DELAY:
|
||||
/* qed_init_run is always invoked from
|
||||
|
@ -533,6 +528,9 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
|
|||
|
||||
case INIT_OP_CALLBACK:
|
||||
rc = qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
|
||||
if (phase == PHASE_ENGINE &&
|
||||
cmd->callback.callback_id == DMAE_READY_CB)
|
||||
b_dmae = true;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -587,5 +585,10 @@ int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
|
|||
len = buf_hdr[BIN_BUF_INIT_CMD].length;
|
||||
fw->init_ops_size = len / sizeof(struct init_raw_op);
|
||||
|
||||
offset = buf_hdr[BIN_BUF_INIT_OVERLAYS].offset;
|
||||
fw->fw_overlays = (u32 *)(data + offset);
|
||||
len = buf_hdr[BIN_BUF_INIT_OVERLAYS].length;
|
||||
fw->fw_overlays_len = len;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -80,14 +80,6 @@ int qed_init_alloc(struct qed_hwfn *p_hwfn);
|
|||
*/
|
||||
void qed_init_free(struct qed_hwfn *p_hwfn);
|
||||
|
||||
/**
|
||||
* @brief qed_init_clear_rt_data - Clears the runtime init array.
|
||||
*
|
||||
*
|
||||
* @param p_hwfn
|
||||
*/
|
||||
void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn);
|
||||
|
||||
/**
|
||||
* @brief qed_init_store_rt_reg - Store a configuration value in the RT array.
|
||||
*
|
||||
|
|
|
@ -204,17 +204,14 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
SET_FIELD(p_init->hdr.flags,
|
||||
ISCSI_SLOW_PATH_HDR_LAYER_CODE, ISCSI_SLOW_PATH_LAYER_CODE);
|
||||
p_init->hdr.op_code = ISCSI_RAMROD_CMD_ID_INIT_FUNC;
|
||||
|
||||
val = p_params->half_way_close_timeout;
|
||||
p_init->half_way_close_timeout = cpu_to_le16(val);
|
||||
p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
|
||||
p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
|
||||
p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
|
||||
p_init->ll2_rx_queue_id = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] +
|
||||
p_params->ll2_ooo_queue_id;
|
||||
p_init->ll2_rx_queue_id =
|
||||
p_hwfn->hw_info.resc_start[QED_LL2_RAM_QUEUE] +
|
||||
p_params->ll2_ooo_queue_id;
|
||||
|
||||
p_init->func_params.log_page_size = p_params->log_page_size;
|
||||
val = p_params->num_tasks;
|
||||
|
@ -331,12 +328,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
|
|||
p_conn->physical_q1 = cpu_to_le16(physical_q);
|
||||
p_ramrod->iscsi.physical_q1 = cpu_to_le16(physical_q);
|
||||
|
||||
p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN;
|
||||
SET_FIELD(p_ramrod->hdr.flags, ISCSI_SLOW_PATH_HDR_LAYER_CODE,
|
||||
p_conn->layer_code);
|
||||
|
||||
p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
|
||||
p_ramrod->fw_cid = cpu_to_le32(p_conn->icid);
|
||||
|
||||
DMA_REGPAIR_LE(p_ramrod->iscsi.sq_pbl_addr, p_conn->sq_pbl_addr);
|
||||
|
||||
|
@ -492,12 +484,8 @@ static int qed_sp_iscsi_conn_update(struct qed_hwfn *p_hwfn,
|
|||
return rc;
|
||||
|
||||
p_ramrod = &p_ent->ramrod.iscsi_conn_update;
|
||||
p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_UPDATE_CONN;
|
||||
SET_FIELD(p_ramrod->hdr.flags,
|
||||
ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code);
|
||||
|
||||
p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
|
||||
p_ramrod->fw_cid = cpu_to_le32(p_conn->icid);
|
||||
p_ramrod->flags = p_conn->update_flag;
|
||||
p_ramrod->max_seq_size = cpu_to_le32(p_conn->max_seq_size);
|
||||
dval = p_conn->max_recv_pdu_length;
|
||||
|
@ -537,12 +525,8 @@ qed_sp_iscsi_mac_update(struct qed_hwfn *p_hwfn,
|
|||
return rc;
|
||||
|
||||
p_ramrod = &p_ent->ramrod.iscsi_conn_mac_update;
|
||||
p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_MAC_UPDATE;
|
||||
SET_FIELD(p_ramrod->hdr.flags,
|
||||
ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code);
|
||||
|
||||
p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
|
||||
p_ramrod->fw_cid = cpu_to_le32(p_conn->icid);
|
||||
ucval = p_conn->remote_mac[1];
|
||||
((u8 *)(&p_ramrod->remote_mac_addr_hi))[0] = ucval;
|
||||
ucval = p_conn->remote_mac[0];
|
||||
|
@ -583,12 +567,8 @@ static int qed_sp_iscsi_conn_terminate(struct qed_hwfn *p_hwfn,
|
|||
return rc;
|
||||
|
||||
p_ramrod = &p_ent->ramrod.iscsi_conn_terminate;
|
||||
p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_TERMINATION_CONN;
|
||||
SET_FIELD(p_ramrod->hdr.flags,
|
||||
ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code);
|
||||
|
||||
p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
|
||||
p_ramrod->fw_cid = cpu_to_le32(p_conn->icid);
|
||||
p_ramrod->abortive = p_conn->abortive_dsconnect;
|
||||
|
||||
DMA_REGPAIR_LE(p_ramrod->query_params_addr,
|
||||
|
@ -603,7 +583,6 @@ static int qed_sp_iscsi_conn_clear_sq(struct qed_hwfn *p_hwfn,
|
|||
enum spq_mode comp_mode,
|
||||
struct qed_spq_comp_cb *p_comp_addr)
|
||||
{
|
||||
struct iscsi_slow_path_hdr *p_ramrod = NULL;
|
||||
struct qed_spq_entry *p_ent = NULL;
|
||||
struct qed_sp_init_data init_data;
|
||||
int rc = -EINVAL;
|
||||
|
@ -621,11 +600,6 @@ static int qed_sp_iscsi_conn_clear_sq(struct qed_hwfn *p_hwfn,
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
p_ramrod = &p_ent->ramrod.iscsi_empty;
|
||||
p_ramrod->op_code = ISCSI_RAMROD_CMD_ID_CLEAR_SQ;
|
||||
SET_FIELD(p_ramrod->flags,
|
||||
ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code);
|
||||
|
||||
return qed_spq_post(p_hwfn, p_ent, NULL);
|
||||
}
|
||||
|
||||
|
@ -633,7 +607,6 @@ static int qed_sp_iscsi_func_stop(struct qed_hwfn *p_hwfn,
|
|||
enum spq_mode comp_mode,
|
||||
struct qed_spq_comp_cb *p_comp_addr)
|
||||
{
|
||||
struct iscsi_spe_func_dstry *p_ramrod = NULL;
|
||||
struct qed_spq_entry *p_ent = NULL;
|
||||
struct qed_sp_init_data init_data;
|
||||
int rc = 0;
|
||||
|
@ -651,9 +624,6 @@ static int qed_sp_iscsi_func_stop(struct qed_hwfn *p_hwfn,
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
p_ramrod = &p_ent->ramrod.iscsi_destroy;
|
||||
p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_DESTROY_FUNC;
|
||||
|
||||
rc = qed_spq_post(p_hwfn, p_ent, NULL);
|
||||
|
||||
qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ISCSI);
|
||||
|
|
|
@ -137,8 +137,8 @@ qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
|
|||
struct iwarp_init_func_ramrod_data *p_ramrod)
|
||||
{
|
||||
p_ramrod->iwarp.ll2_ooo_q_index =
|
||||
RESC_START(p_hwfn, QED_LL2_QUEUE) +
|
||||
p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
|
||||
RESC_START(p_hwfn, QED_LL2_RAM_QUEUE) +
|
||||
p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
|
||||
|
||||
p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT;
|
||||
|
||||
|
@ -2651,6 +2651,8 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
|
|||
|
||||
memset(&data, 0, sizeof(data));
|
||||
data.input.conn_type = QED_LL2_TYPE_IWARP;
|
||||
/* SYN will use ctx based queues */
|
||||
data.input.rx_conn_type = QED_LL2_RX_TYPE_CTX;
|
||||
data.input.mtu = params->max_mtu;
|
||||
data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
|
||||
data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
|
||||
|
@ -2683,6 +2685,8 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
|
|||
|
||||
/* Start OOO connection */
|
||||
data.input.conn_type = QED_LL2_TYPE_OOO;
|
||||
/* OOO/unaligned will use legacy ll2 queues (ram based) */
|
||||
data.input.rx_conn_type = QED_LL2_RX_TYPE_LEGACY;
|
||||
data.input.mtu = params->max_mtu;
|
||||
|
||||
n_ooo_bufs = (QED_IWARP_MAX_OOO * rcv_wnd_size) /
|
||||
|
|
|
@ -962,7 +962,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
|
|||
return rc;
|
||||
|
||||
p_ramrod = &p_ent->ramrod.core_rx_queue_start;
|
||||
|
||||
memset(p_ramrod, 0, sizeof(*p_ramrod));
|
||||
p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
|
||||
p_ramrod->sb_index = p_rx->rx_sb_index;
|
||||
p_ramrod->complete_event_flg = 1;
|
||||
|
@ -996,6 +996,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
|
|||
|
||||
p_ramrod->action_on_error.error_type = action_on_error;
|
||||
p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
|
||||
p_ramrod->zero_prod_flg = 1;
|
||||
|
||||
return qed_spq_post(p_hwfn, p_ent, NULL);
|
||||
}
|
||||
|
||||
|
@ -1317,6 +1319,25 @@ qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void _qed_ll2_calc_allowed_conns(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ll2_acquire_data *data,
|
||||
u8 *start_idx, u8 *last_idx)
|
||||
{
|
||||
/* LL2 queues handles will be split as follows:
|
||||
* First will be the legacy queues, and then the ctx based.
|
||||
*/
|
||||
if (data->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) {
|
||||
*start_idx = QED_LL2_LEGACY_CONN_BASE_PF;
|
||||
*last_idx = *start_idx +
|
||||
QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF;
|
||||
} else {
|
||||
/* QED_LL2_RX_TYPE_CTX */
|
||||
*start_idx = QED_LL2_CTX_CONN_BASE_PF;
|
||||
*last_idx = *start_idx +
|
||||
QED_MAX_NUM_OF_CTX_LL2_CONNS_PF;
|
||||
}
|
||||
}
|
||||
|
||||
static enum core_error_handle
|
||||
qed_ll2_get_error_choice(enum qed_ll2_error_handle err)
|
||||
{
|
||||
|
@ -1337,14 +1358,16 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
|
|||
struct qed_hwfn *p_hwfn = cxt;
|
||||
qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
|
||||
struct qed_ll2_info *p_ll2_info = NULL;
|
||||
u8 i, *p_tx_max;
|
||||
u8 i, first_idx, last_idx, *p_tx_max;
|
||||
int rc;
|
||||
|
||||
if (!data->p_connection_handle || !p_hwfn->p_ll2_info)
|
||||
return -EINVAL;
|
||||
|
||||
_qed_ll2_calc_allowed_conns(p_hwfn, data, &first_idx, &last_idx);
|
||||
|
||||
/* Find a free connection to be used */
|
||||
for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
|
||||
for (i = first_idx; i < last_idx; i++) {
|
||||
mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
|
||||
if (p_hwfn->p_ll2_info[i].b_active) {
|
||||
mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
|
||||
|
@ -1448,6 +1471,7 @@ static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
|
|||
enum qed_ll2_error_handle error_input;
|
||||
enum core_error_handle error_mode;
|
||||
u8 action_on_error = 0;
|
||||
int rc;
|
||||
|
||||
if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
|
||||
return 0;
|
||||
|
@ -1461,7 +1485,18 @@ static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
|
|||
error_mode = qed_ll2_get_error_choice(error_input);
|
||||
SET_FIELD(action_on_error, CORE_RX_ACTION_ON_ERROR_NO_BUFF, error_mode);
|
||||
|
||||
return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
|
||||
rc = qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (p_ll2_conn->rx_queue.ctx_based) {
|
||||
rc = qed_db_recovery_add(p_hwfn->cdev,
|
||||
p_ll2_conn->rx_queue.set_prod_addr,
|
||||
&p_ll2_conn->rx_queue.db_data,
|
||||
DB_REC_WIDTH_64B, DB_REC_KERNEL);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1475,13 +1510,41 @@ qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
|
|||
qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
|
||||
}
|
||||
|
||||
static inline u8 qed_ll2_handle_to_queue_id(struct qed_hwfn *p_hwfn,
|
||||
u8 handle,
|
||||
u8 ll2_queue_type)
|
||||
{
|
||||
u8 qid;
|
||||
|
||||
if (ll2_queue_type == QED_LL2_RX_TYPE_LEGACY)
|
||||
return p_hwfn->hw_info.resc_start[QED_LL2_RAM_QUEUE] + handle;
|
||||
|
||||
/* QED_LL2_RX_TYPE_CTX
|
||||
* FW distinguishes between the legacy queues (ram based) and the
|
||||
* ctx based queues by the queue_id.
|
||||
* The first MAX_NUM_LL2_RX_RAM_QUEUES queues are legacy
|
||||
* and the queue ids above that are ctx base.
|
||||
*/
|
||||
qid = p_hwfn->hw_info.resc_start[QED_LL2_CTX_QUEUE] +
|
||||
MAX_NUM_LL2_RX_RAM_QUEUES;
|
||||
|
||||
/* See comment on the acquire connection for how the ll2
|
||||
* queues handles are divided.
|
||||
*/
|
||||
qid += (handle - QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF);
|
||||
|
||||
return qid;
|
||||
}
|
||||
|
||||
int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = cxt;
|
||||
struct qed_ll2_info *p_ll2_conn;
|
||||
struct e4_core_conn_context *p_cxt;
|
||||
struct qed_ll2_tx_packet *p_pkt;
|
||||
struct qed_ll2_info *p_ll2_conn;
|
||||
struct qed_hwfn *p_hwfn = cxt;
|
||||
struct qed_ll2_rx_queue *p_rx;
|
||||
struct qed_ll2_tx_queue *p_tx;
|
||||
struct qed_cxt_info cxt_info;
|
||||
struct qed_ptt *p_ptt;
|
||||
int rc = -EINVAL;
|
||||
u32 i, capacity;
|
||||
|
@ -1539,13 +1602,46 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
|
|||
rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
|
||||
if (rc)
|
||||
goto out;
|
||||
cxt_info.iid = p_ll2_conn->cid;
|
||||
rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
|
||||
if (rc) {
|
||||
DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
|
||||
p_ll2_conn->cid);
|
||||
goto out;
|
||||
}
|
||||
|
||||
qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
|
||||
p_cxt = cxt_info.p_cxt;
|
||||
|
||||
memset(p_cxt, 0, sizeof(*p_cxt));
|
||||
|
||||
qid = qed_ll2_handle_to_queue_id(p_hwfn, connection_handle,
|
||||
p_ll2_conn->input.rx_conn_type);
|
||||
p_ll2_conn->queue_id = qid;
|
||||
p_ll2_conn->tx_stats_id = qid;
|
||||
p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
|
||||
GTT_BAR0_MAP_REG_TSDM_RAM +
|
||||
TSTORM_LL2_RX_PRODS_OFFSET(qid);
|
||||
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_LL2,
|
||||
"Establishing ll2 queue. PF %d ctx_based=%d abs qid=%d\n",
|
||||
p_hwfn->rel_pf_id, p_ll2_conn->input.rx_conn_type, qid);
|
||||
|
||||
if (p_ll2_conn->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) {
|
||||
p_rx->set_prod_addr = p_hwfn->regview +
|
||||
GTT_BAR0_MAP_REG_TSDM_RAM + TSTORM_LL2_RX_PRODS_OFFSET(qid);
|
||||
} else {
|
||||
/* QED_LL2_RX_TYPE_CTX - using doorbell */
|
||||
p_rx->ctx_based = 1;
|
||||
|
||||
p_rx->set_prod_addr = p_hwfn->doorbells +
|
||||
p_hwfn->dpi_start_offset +
|
||||
DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_LL2_PROD_UPDATE);
|
||||
|
||||
/* prepare db data */
|
||||
p_rx->db_data.icid = cpu_to_le16((u16)p_ll2_conn->cid);
|
||||
SET_FIELD(p_rx->db_data.params,
|
||||
CORE_PWM_PROD_UPDATE_DATA_AGG_CMD, DB_AGG_CMD_SET);
|
||||
SET_FIELD(p_rx->db_data.params,
|
||||
CORE_PWM_PROD_UPDATE_DATA_RESERVED1, 0);
|
||||
}
|
||||
|
||||
p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
|
||||
qed_db_addr(p_ll2_conn->cid,
|
||||
DQ_DEMS_LEGACY);
|
||||
|
@ -1556,7 +1652,6 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
|
|||
DQ_XCM_CORE_TX_BD_PROD_CMD);
|
||||
p_tx->db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
|
||||
|
||||
|
||||
rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
@ -1590,7 +1685,7 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
|
|||
struct qed_ll2_rx_packet *p_curp)
|
||||
{
|
||||
struct qed_ll2_rx_packet *p_posting_packet = NULL;
|
||||
struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
|
||||
struct core_ll2_rx_prod rx_prod = { 0, 0 };
|
||||
bool b_notify_fw = false;
|
||||
u16 bd_prod, cq_prod;
|
||||
|
||||
|
@ -1615,13 +1710,27 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
|
|||
|
||||
bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
|
||||
cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
|
||||
rx_prod.bd_prod = cpu_to_le16(bd_prod);
|
||||
rx_prod.cqe_prod = cpu_to_le16(cq_prod);
|
||||
if (p_rx->ctx_based) {
|
||||
/* update producer by giving a doorbell */
|
||||
p_rx->db_data.prod.bd_prod = cpu_to_le16(bd_prod);
|
||||
p_rx->db_data.prod.cqe_prod = cpu_to_le16(cq_prod);
|
||||
/* Make sure chain element is updated before ringing the
|
||||
* doorbell
|
||||
*/
|
||||
dma_wmb();
|
||||
DIRECT_REG_WR64(p_rx->set_prod_addr,
|
||||
*((u64 *)&p_rx->db_data));
|
||||
} else {
|
||||
rx_prod.bd_prod = cpu_to_le16(bd_prod);
|
||||
rx_prod.cqe_prod = cpu_to_le16(cq_prod);
|
||||
|
||||
/* Make sure chain element is updated before ringing the doorbell */
|
||||
dma_wmb();
|
||||
/* Make sure chain element is updated before ringing the
|
||||
* doorbell
|
||||
*/
|
||||
dma_wmb();
|
||||
|
||||
DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
|
||||
DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
|
||||
}
|
||||
}
|
||||
|
||||
int qed_ll2_post_rx_buffer(void *cxt,
|
||||
|
@ -1965,6 +2074,12 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
|
|||
if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
|
||||
p_ll2_conn->rx_queue.b_cb_registered = false;
|
||||
smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
|
||||
|
||||
if (p_ll2_conn->rx_queue.ctx_based)
|
||||
qed_db_recovery_del(p_hwfn->cdev,
|
||||
p_ll2_conn->rx_queue.set_prod_addr,
|
||||
&p_ll2_conn->rx_queue.db_data);
|
||||
|
||||
rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
|
|
@ -46,6 +46,18 @@
|
|||
#include "qed_sp.h"
|
||||
|
||||
#define QED_MAX_NUM_OF_LL2_CONNECTIONS (4)
|
||||
/* LL2 queues handles will be split as follows:
|
||||
* first will be legacy queues, and then the ctx based queues.
|
||||
*/
|
||||
#define QED_MAX_NUM_OF_LL2_CONNS_PF (4)
|
||||
#define QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF (3)
|
||||
|
||||
#define QED_MAX_NUM_OF_CTX_LL2_CONNS_PF \
|
||||
(QED_MAX_NUM_OF_LL2_CONNS_PF - QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF)
|
||||
|
||||
#define QED_LL2_LEGACY_CONN_BASE_PF 0
|
||||
#define QED_LL2_CTX_CONN_BASE_PF QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF
|
||||
|
||||
|
||||
struct qed_ll2_rx_packet {
|
||||
struct list_head list_entry;
|
||||
|
@ -79,6 +91,7 @@ struct qed_ll2_rx_queue {
|
|||
struct qed_chain rxq_chain;
|
||||
struct qed_chain rcq_chain;
|
||||
u8 rx_sb_index;
|
||||
u8 ctx_based;
|
||||
bool b_cb_registered;
|
||||
__le16 *p_fw_cons;
|
||||
struct list_head active_descq;
|
||||
|
@ -86,6 +99,7 @@ struct qed_ll2_rx_queue {
|
|||
struct list_head posting_descq;
|
||||
struct qed_ll2_rx_packet *descq_array;
|
||||
void __iomem *set_prod_addr;
|
||||
struct core_pwm_prod_update_data db_data;
|
||||
};
|
||||
|
||||
struct qed_ll2_tx_queue {
|
||||
|
|
|
@ -2637,7 +2637,7 @@ static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val)
|
|||
if (!ptt)
|
||||
return -EAGAIN;
|
||||
|
||||
rc = qed_dbg_grc_config(hwfn, ptt, cfg_id, val);
|
||||
rc = qed_dbg_grc_config(hwfn, cfg_id, val);
|
||||
|
||||
qed_ptt_release(hwfn, ptt);
|
||||
|
||||
|
|
|
@ -48,6 +48,8 @@
|
|||
#include "qed_reg_addr.h"
|
||||
#include "qed_sriov.h"
|
||||
|
||||
#define GRCBASE_MCP 0xe00000
|
||||
|
||||
#define QED_MCP_RESP_ITER_US 10
|
||||
|
||||
#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
|
||||
|
@ -3165,6 +3167,9 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
|
|||
case QED_NVM_IMAGE_FCOE_CFG:
|
||||
type = NVM_TYPE_FCOE_CFG;
|
||||
break;
|
||||
case QED_NVM_IMAGE_MDUMP:
|
||||
type = NVM_TYPE_MDUMP;
|
||||
break;
|
||||
case QED_NVM_IMAGE_NVM_CFG1:
|
||||
type = NVM_TYPE_NVM_CFG1;
|
||||
break;
|
||||
|
@ -3261,9 +3266,12 @@ static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id)
|
|||
case QED_ILT:
|
||||
mfw_res_id = RESOURCE_ILT_E;
|
||||
break;
|
||||
case QED_LL2_QUEUE:
|
||||
case QED_LL2_RAM_QUEUE:
|
||||
mfw_res_id = RESOURCE_LL2_QUEUE_E;
|
||||
break;
|
||||
case QED_LL2_CTX_QUEUE:
|
||||
mfw_res_id = RESOURCE_LL2_CQS_E;
|
||||
break;
|
||||
case QED_RDMA_CNQ_RAM:
|
||||
case QED_CMDQS_CQS:
|
||||
/* CNQ/CMDQS are the same resource */
|
||||
|
|
|
@ -178,6 +178,8 @@
|
|||
0x008c80UL
|
||||
#define MCP_REG_SCRATCH \
|
||||
0xe20000UL
|
||||
#define MCP_REG_SCRATCH_SIZE \
|
||||
57344
|
||||
#define CNIG_REG_NW_PORT_MODE_BB \
|
||||
0x218200UL
|
||||
#define MISCS_REG_CHIP_NUM \
|
||||
|
@ -212,6 +214,8 @@
|
|||
0x580900UL
|
||||
#define DBG_REG_CLIENT_ENABLE \
|
||||
0x010004UL
|
||||
#define DBG_REG_TIMESTAMP_VALID_EN \
|
||||
0x010b58UL
|
||||
#define DMAE_REG_INIT \
|
||||
0x00c000UL
|
||||
#define DORQ_REG_IFEN \
|
||||
|
@ -350,6 +354,10 @@
|
|||
0x24000cUL
|
||||
#define PSWRQ2_REG_ILT_MEMORY \
|
||||
0x260000UL
|
||||
#define PSWRQ2_REG_ILT_MEMORY_SIZE_BB \
|
||||
15200
|
||||
#define PSWRQ2_REG_ILT_MEMORY_SIZE_K2 \
|
||||
22000
|
||||
#define PSWHST_REG_DISCARD_INTERNAL_WRITES \
|
||||
0x2a0040UL
|
||||
#define PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \
|
||||
|
@ -1453,6 +1461,8 @@
|
|||
0x1401404UL
|
||||
#define XSEM_REG_DBG_FRAME_MODE_BB_K2 \
|
||||
0x1401408UL
|
||||
#define XSEM_REG_DBG_GPRE_VECT \
|
||||
0x1401410UL
|
||||
#define XSEM_REG_DBG_MODE1_CFG_BB_K2 \
|
||||
0x1401420UL
|
||||
#define XSEM_REG_FAST_MEMORY \
|
||||
|
@ -1465,6 +1475,8 @@
|
|||
0x1501404UL
|
||||
#define YSEM_REG_DBG_FRAME_MODE_BB_K2 \
|
||||
0x1501408UL
|
||||
#define YSEM_REG_DBG_GPRE_VECT \
|
||||
0x1501410UL
|
||||
#define YSEM_REG_DBG_MODE1_CFG_BB_K2 \
|
||||
0x1501420UL
|
||||
#define YSEM_REG_FAST_MEMORY \
|
||||
|
@ -1479,6 +1491,8 @@
|
|||
0x1601404UL
|
||||
#define PSEM_REG_DBG_FRAME_MODE_BB_K2 \
|
||||
0x1601408UL
|
||||
#define PSEM_REG_DBG_GPRE_VECT \
|
||||
0x1601410UL
|
||||
#define PSEM_REG_DBG_MODE1_CFG_BB_K2 \
|
||||
0x1601420UL
|
||||
#define PSEM_REG_FAST_MEMORY \
|
||||
|
@ -1493,6 +1507,8 @@
|
|||
0x1701404UL
|
||||
#define TSEM_REG_DBG_FRAME_MODE_BB_K2 \
|
||||
0x1701408UL
|
||||
#define TSEM_REG_DBG_GPRE_VECT \
|
||||
0x1701410UL
|
||||
#define TSEM_REG_DBG_MODE1_CFG_BB_K2 \
|
||||
0x1701420UL
|
||||
#define TSEM_REG_FAST_MEMORY \
|
||||
|
@ -1507,12 +1523,16 @@
|
|||
0x1801404UL
|
||||
#define MSEM_REG_DBG_FRAME_MODE_BB_K2 \
|
||||
0x1801408UL
|
||||
#define MSEM_REG_DBG_GPRE_VECT \
|
||||
0x1801410UL
|
||||
#define MSEM_REG_DBG_MODE1_CFG_BB_K2 \
|
||||
0x1801420UL
|
||||
#define MSEM_REG_FAST_MEMORY \
|
||||
0x1840000UL
|
||||
#define USEM_REG_SLOW_DBG_EMPTY_BB_K2 \
|
||||
0x1901140UL
|
||||
#define SEM_FAST_REG_INT_RAM_SIZE \
|
||||
20480
|
||||
#define USEM_REG_SYNC_DBG_EMPTY \
|
||||
0x1901160UL
|
||||
#define USEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
|
||||
|
@ -1521,14 +1541,26 @@
|
|||
0x1901404UL
|
||||
#define USEM_REG_DBG_FRAME_MODE_BB_K2 \
|
||||
0x1901408UL
|
||||
#define USEM_REG_DBG_GPRE_VECT \
|
||||
0x1901410UL
|
||||
#define USEM_REG_DBG_MODE1_CFG_BB_K2 \
|
||||
0x1901420UL
|
||||
#define USEM_REG_FAST_MEMORY \
|
||||
0x1940000UL
|
||||
#define SEM_FAST_REG_DBG_MODE23_SRC_DISABLE \
|
||||
0x000748UL
|
||||
#define SEM_FAST_REG_DBG_MODE4_SRC_DISABLE \
|
||||
0x00074cUL
|
||||
#define SEM_FAST_REG_DBG_MODE6_SRC_DISABLE \
|
||||
0x000750UL
|
||||
#define SEM_FAST_REG_DEBUG_ACTIVE \
|
||||
0x000740UL
|
||||
#define SEM_FAST_REG_INT_RAM \
|
||||
0x020000UL
|
||||
#define SEM_FAST_REG_INT_RAM_SIZE_BB_K2 \
|
||||
20480
|
||||
#define SEM_FAST_REG_RECORD_FILTER_ENABLE \
|
||||
0x000768UL
|
||||
#define GRC_REG_TRACE_FIFO_VALID_DATA \
|
||||
0x050064UL
|
||||
#define GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW \
|
||||
|
@ -1583,14 +1615,20 @@
|
|||
0x181530UL
|
||||
#define DBG_REG_DBG_BLOCK_ON \
|
||||
0x010454UL
|
||||
#define DBG_REG_FILTER_ENABLE \
|
||||
0x0109d0UL
|
||||
#define DBG_REG_FRAMING_MODE \
|
||||
0x010058UL
|
||||
#define DBG_REG_TRIGGER_ENABLE \
|
||||
0x01054cUL
|
||||
#define SEM_FAST_REG_VFC_DATA_WR \
|
||||
0x000b40UL
|
||||
#define SEM_FAST_REG_VFC_ADDR \
|
||||
0x000b44UL
|
||||
#define SEM_FAST_REG_VFC_DATA_RD \
|
||||
0x000b48UL
|
||||
#define SEM_FAST_REG_VFC_STATUS \
|
||||
0x000b4cUL
|
||||
#define RSS_REG_RSS_RAM_DATA \
|
||||
0x238c20UL
|
||||
#define RSS_REG_RSS_RAM_DATA_SIZE \
|
||||
|
|
|
@ -900,7 +900,7 @@ int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
|
|||
goto err_resp;
|
||||
|
||||
out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn);
|
||||
rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->err_flag),
|
||||
rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->flags),
|
||||
ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
|
||||
|
||||
dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
|
||||
|
|
|
@ -120,9 +120,7 @@ union ramrod_data {
|
|||
struct fcoe_conn_terminate_ramrod_params fcoe_conn_terminate;
|
||||
struct fcoe_stat_ramrod_params fcoe_stat;
|
||||
|
||||
struct iscsi_slow_path_hdr iscsi_empty;
|
||||
struct iscsi_init_ramrod_params iscsi_init;
|
||||
struct iscsi_spe_func_dstry iscsi_destroy;
|
||||
struct iscsi_spe_conn_offload iscsi_conn_offload;
|
||||
struct iscsi_conn_update_ramrod_params iscsi_conn_update;
|
||||
struct iscsi_spe_conn_mac_update iscsi_conn_mac_update;
|
||||
|
|
|
@ -352,7 +352,7 @@ static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
|
|||
|
||||
/* propagate bulletin board via dmae to vm memory */
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
params.flags = QED_DMAE_FLAG_VF_DST;
|
||||
SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1);
|
||||
params.dst_vfid = p_vf->abs_vf_id;
|
||||
return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
|
||||
p_vf->vf_bulletin, p_vf->bulletin.size / 4,
|
||||
|
@ -1225,8 +1225,8 @@ static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
|
|||
|
||||
eng_vf_id = p_vf->abs_vf_id;
|
||||
|
||||
memset(¶ms, 0, sizeof(struct qed_dmae_params));
|
||||
params.flags = QED_DMAE_FLAG_VF_DST;
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1);
|
||||
params.dst_vfid = eng_vf_id;
|
||||
|
||||
qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
|
||||
|
@ -4103,8 +4103,9 @@ static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
|
|||
if (!vf_info)
|
||||
return -EINVAL;
|
||||
|
||||
memset(¶ms, 0, sizeof(struct qed_dmae_params));
|
||||
params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST;
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
SET_FIELD(params.flags, QED_DMAE_PARAMS_SRC_VF_VALID, 0x1);
|
||||
SET_FIELD(params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 0x1);
|
||||
params.src_vfid = vf_info->abs_vf_id;
|
||||
|
||||
if (qed_dmae_host2host(p_hwfn, ptt,
|
||||
|
@ -4354,9 +4355,9 @@ qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
|
|||
static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt, int vfid, int val)
|
||||
{
|
||||
struct qed_mcp_link_state *p_link;
|
||||
struct qed_vf_info *vf;
|
||||
u8 abs_vp_id = 0;
|
||||
u16 rl_id;
|
||||
int rc;
|
||||
|
||||
vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
|
||||
|
@ -4367,10 +4368,8 @@ static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
|
||||
|
||||
return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val,
|
||||
p_link->speed);
|
||||
rl_id = abs_vp_id; /* The "rl_id" is set as the "vport_id" */
|
||||
return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val);
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -848,13 +848,13 @@ static void qede_tpa_start(struct qede_dev *edev,
|
|||
qede_set_gro_params(edev, tpa_info->skb, cqe);
|
||||
|
||||
cons_buf: /* We still need to handle bd_len_list to consume buffers */
|
||||
if (likely(cqe->ext_bd_len_list[0]))
|
||||
if (likely(cqe->bw_ext_bd_len_list[0]))
|
||||
qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
|
||||
le16_to_cpu(cqe->ext_bd_len_list[0]));
|
||||
le16_to_cpu(cqe->bw_ext_bd_len_list[0]));
|
||||
|
||||
if (unlikely(cqe->ext_bd_len_list[1])) {
|
||||
if (unlikely(cqe->bw_ext_bd_len_list[1])) {
|
||||
DP_ERR(edev,
|
||||
"Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n");
|
||||
"Unlikely - got a TPA aggregation with more than one bw_ext_bd_len_list entry in the TPA start\n");
|
||||
tpa_info->state = QEDE_AGG_STATE_ERROR;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -76,7 +76,6 @@
|
|||
|
||||
#define FW_ASSERT_GENERAL_ATTN_IDX 32
|
||||
|
||||
#define MAX_PINNED_CCFC 32
|
||||
|
||||
/* Queue Zone sizes in bytes */
|
||||
#define TSTORM_QZONE_SIZE 8
|
||||
|
@ -105,12 +104,19 @@
|
|||
|
||||
#define CORE_SPQE_PAGE_SIZE_BYTES 4096
|
||||
|
||||
#define MAX_NUM_LL2_RX_QUEUES 48
|
||||
#define MAX_NUM_LL2_TX_STATS_COUNTERS 48
|
||||
/* Number of LL2 RAM based queues */
|
||||
#define MAX_NUM_LL2_RX_RAM_QUEUES 32
|
||||
|
||||
/* Number of LL2 context based queues */
|
||||
#define MAX_NUM_LL2_RX_CTX_QUEUES 208
|
||||
#define MAX_NUM_LL2_RX_QUEUES \
|
||||
(MAX_NUM_LL2_RX_RAM_QUEUES + MAX_NUM_LL2_RX_CTX_QUEUES)
|
||||
|
||||
#define MAX_NUM_LL2_TX_STATS_COUNTERS 48
|
||||
|
||||
#define FW_MAJOR_VERSION 8
|
||||
#define FW_MINOR_VERSION 37
|
||||
#define FW_REVISION_VERSION 7
|
||||
#define FW_MINOR_VERSION 42
|
||||
#define FW_REVISION_VERSION 2
|
||||
#define FW_ENGINEERING_VERSION 0
|
||||
|
||||
/***********************/
|
||||
|
@ -132,10 +138,10 @@
|
|||
#define MAX_NUM_VFS (MAX_NUM_VFS_K2)
|
||||
|
||||
#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
|
||||
#define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + MAX_NUM_VFS)
|
||||
|
||||
#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB)
|
||||
#define MAX_FUNCTION_NUMBER (MAX_NUM_PFS + MAX_NUM_VFS)
|
||||
#define MAX_FUNCTION_NUMBER_K2 (MAX_NUM_PFS + MAX_NUM_VFS_K2)
|
||||
#define MAX_NUM_FUNCTIONS (MAX_FUNCTION_NUMBER_K2)
|
||||
|
||||
#define MAX_NUM_VPORTS_K2 (208)
|
||||
#define MAX_NUM_VPORTS_BB (160)
|
||||
|
@ -222,6 +228,7 @@
|
|||
#define DQ_XCM_TOE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
|
||||
#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
|
||||
#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4
|
||||
#define DQ_XCM_ROCE_ACK_EDPM_DORQ_SEQ_CMD DQ_XCM_AGG_VAL_SEL_WORD5
|
||||
|
||||
/* UCM agg val selection (HW) */
|
||||
#define DQ_UCM_AGG_VAL_SEL_WORD0 0
|
||||
|
@ -340,6 +347,10 @@
|
|||
#define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 1)
|
||||
#define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 3)
|
||||
|
||||
/* DQ_DEMS_AGG_VAL_BASE */
|
||||
#define DQ_PWM_OFFSET_TCM_LL2_PROD_UPDATE \
|
||||
(DQ_PWM_OFFSET_TCM32_BASE + DQ_TCM_AGG_VAL_SEL_REG9 - 4)
|
||||
|
||||
#define DQ_REGION_SHIFT (12)
|
||||
|
||||
/* DPM */
|
||||
|
@ -395,6 +406,7 @@
|
|||
|
||||
/* Number of Protocol Indices per Status Block */
|
||||
#define PIS_PER_SB_E4 12
|
||||
#define MAX_PIS_PER_SB PIS_PER_SB
|
||||
|
||||
#define CAU_HC_STOPPED_STATE 3
|
||||
#define CAU_HC_DISABLE_STATE 4
|
||||
|
@ -425,8 +437,6 @@
|
|||
#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff
|
||||
|
||||
#define IGU_CMD_INT_ACK_BASE 0x0400
|
||||
#define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \
|
||||
MAX_TOT_SB_PER_PATH - 1)
|
||||
#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff
|
||||
|
||||
#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0
|
||||
|
@ -439,8 +449,6 @@
|
|||
#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05f6
|
||||
|
||||
#define IGU_CMD_PROD_UPD_BASE 0x0600
|
||||
#define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE +\
|
||||
MAX_TOT_SB_PER_PATH - 1)
|
||||
#define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff
|
||||
|
||||
/*****************/
|
||||
|
@ -652,8 +660,8 @@
|
|||
#define PBF_MAX_CMD_LINES 3328
|
||||
|
||||
/* Number of BTB blocks. Each block is 256B. */
|
||||
#define BTB_MAX_BLOCKS 1440
|
||||
|
||||
#define BTB_MAX_BLOCKS_BB 1440
|
||||
#define BTB_MAX_BLOCKS_K2 1840
|
||||
/*****************/
|
||||
/* PRS CONSTANTS */
|
||||
/*****************/
|
||||
|
@ -730,6 +738,8 @@ enum protocol_type {
|
|||
PROTOCOLID_PREROCE,
|
||||
PROTOCOLID_COMMON,
|
||||
PROTOCOLID_RESERVED1,
|
||||
PROTOCOLID_RDMA,
|
||||
PROTOCOLID_SCSI,
|
||||
MAX_PROTOCOL_TYPE
|
||||
};
|
||||
|
||||
|
@ -750,6 +760,10 @@ union rdma_eqe_data {
|
|||
struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
|
||||
};
|
||||
|
||||
struct tstorm_queue_zone {
|
||||
__le32 reserved[2];
|
||||
};
|
||||
|
||||
/* Ustorm Queue Zone */
|
||||
struct ustorm_eth_queue_zone {
|
||||
struct coalescing_timeset int_coalescing_timeset;
|
||||
|
@ -872,8 +886,8 @@ struct db_l2_dpm_data {
|
|||
#define DB_L2_DPM_DATA_RESERVED0_SHIFT 27
|
||||
#define DB_L2_DPM_DATA_SGE_NUM_MASK 0x7
|
||||
#define DB_L2_DPM_DATA_SGE_NUM_SHIFT 28
|
||||
#define DB_L2_DPM_DATA_GFS_SRC_EN_MASK 0x1
|
||||
#define DB_L2_DPM_DATA_GFS_SRC_EN_SHIFT 31
|
||||
#define DB_L2_DPM_DATA_TGFS_SRC_EN_MASK 0x1
|
||||
#define DB_L2_DPM_DATA_TGFS_SRC_EN_SHIFT 31
|
||||
};
|
||||
|
||||
/* Structure for SGE in a DPM doorbell of type DPM_L2_BD */
|
||||
|
|
|
@ -38,9 +38,11 @@
|
|||
/********************/
|
||||
|
||||
#define ETH_HSI_VER_MAJOR 3
|
||||
#define ETH_HSI_VER_MINOR 10
|
||||
#define ETH_HSI_VER_MINOR 11
|
||||
|
||||
#define ETH_HSI_VER_NO_PKT_LEN_TUNN 5
|
||||
#define ETH_HSI_VER_NO_PKT_LEN_TUNN 5
|
||||
/* Maximum number of pinned L2 connections (CIDs) */
|
||||
#define ETH_PINNED_CONN_MAX_NUM 32
|
||||
|
||||
#define ETH_CACHE_LINE_SIZE 64
|
||||
#define ETH_RX_CQE_GAP 32
|
||||
|
@ -61,6 +63,7 @@
|
|||
#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3
|
||||
#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2
|
||||
#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2
|
||||
#define ETH_TX_MIN_BDS_PER_PKT_W_VPORT_FORWARDING 4
|
||||
#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8))
|
||||
#define ETH_TX_MAX_LSO_HDR_BYTES 510
|
||||
#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1)
|
||||
|
@ -75,9 +78,8 @@
|
|||
#define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE \
|
||||
(ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4)
|
||||
|
||||
/* Maximum number of buffers, used for RX packet placement */
|
||||
#define ETH_RX_MAX_BUFF_PER_PKT 5
|
||||
#define ETH_RX_BD_THRESHOLD 12
|
||||
#define ETH_RX_BD_THRESHOLD 16
|
||||
|
||||
/* Num of MAC/VLAN filters */
|
||||
#define ETH_NUM_MAC_FILTERS 512
|
||||
|
@ -96,24 +98,24 @@
|
|||
#define ETH_RSS_ENGINE_NUM_BB 127
|
||||
|
||||
/* TPA constants */
|
||||
#define ETH_TPA_MAX_AGGS_NUM 64
|
||||
#define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT
|
||||
#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
|
||||
#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
|
||||
#define ETH_TPA_MAX_AGGS_NUM 64
|
||||
#define ETH_TPA_CQE_START_BW_LEN_LIST_SIZE 2
|
||||
#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
|
||||
#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
|
||||
|
||||
/* Control frame check constants */
|
||||
#define ETH_CTL_FRAME_ETH_TYPE_NUM 4
|
||||
#define ETH_CTL_FRAME_ETH_TYPE_NUM 4
|
||||
|
||||
/* GFS constants */
|
||||
#define ETH_GFT_TRASHCAN_VPORT 0x1FF /* GFT drop flow vport number */
|
||||
|
||||
/* Destination port mode */
|
||||
enum dest_port_mode {
|
||||
DEST_PORT_PHY,
|
||||
DEST_PORT_LOOPBACK,
|
||||
DEST_PORT_PHY_LOOPBACK,
|
||||
DEST_PORT_DROP,
|
||||
MAX_DEST_PORT_MODE
|
||||
enum dst_port_mode {
|
||||
DST_PORT_PHY,
|
||||
DST_PORT_LOOPBACK,
|
||||
DST_PORT_PHY_LOOPBACK,
|
||||
DST_PORT_DROP,
|
||||
MAX_DST_PORT_MODE
|
||||
};
|
||||
|
||||
/* Ethernet address type */
|
||||
|
@ -167,8 +169,8 @@ struct eth_tx_data_2nd_bd {
|
|||
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
|
||||
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3
|
||||
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4
|
||||
#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3
|
||||
#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6
|
||||
#define ETH_TX_DATA_2ND_BD_DST_PORT_MODE_MASK 0x3
|
||||
#define ETH_TX_DATA_2ND_BD_DST_PORT_MODE_SHIFT 6
|
||||
#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1
|
||||
#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8
|
||||
#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3
|
||||
|
@ -244,8 +246,9 @@ struct eth_fast_path_rx_reg_cqe {
|
|||
struct eth_tunnel_parsing_flags tunnel_pars_flags;
|
||||
u8 bd_num;
|
||||
u8 reserved;
|
||||
__le16 flow_id;
|
||||
u8 reserved1[11];
|
||||
__le16 reserved2;
|
||||
__le32 flow_id_or_resource_id;
|
||||
u8 reserved1[7];
|
||||
struct eth_pmd_flow_flags pmd_flags;
|
||||
};
|
||||
|
||||
|
@ -296,9 +299,10 @@ struct eth_fast_path_rx_tpa_start_cqe {
|
|||
struct eth_tunnel_parsing_flags tunnel_pars_flags;
|
||||
u8 tpa_agg_index;
|
||||
u8 header_len;
|
||||
__le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
|
||||
__le16 flow_id;
|
||||
u8 reserved;
|
||||
__le16 bw_ext_bd_len_list[ETH_TPA_CQE_START_BW_LEN_LIST_SIZE];
|
||||
__le16 reserved2;
|
||||
__le32 flow_id_or_resource_id;
|
||||
u8 reserved[3];
|
||||
struct eth_pmd_flow_flags pmd_flags;
|
||||
};
|
||||
|
||||
|
@ -407,6 +411,29 @@ struct eth_tx_3rd_bd {
|
|||
struct eth_tx_data_3rd_bd data;
|
||||
};
|
||||
|
||||
/* The parsing information data for the forth tx bd of a given packet. */
|
||||
struct eth_tx_data_4th_bd {
|
||||
u8 dst_vport_id;
|
||||
u8 reserved4;
|
||||
__le16 bitfields;
|
||||
#define ETH_TX_DATA_4TH_BD_DST_VPORT_ID_VALID_MASK 0x1
|
||||
#define ETH_TX_DATA_4TH_BD_DST_VPORT_ID_VALID_SHIFT 0
|
||||
#define ETH_TX_DATA_4TH_BD_RESERVED1_MASK 0x7F
|
||||
#define ETH_TX_DATA_4TH_BD_RESERVED1_SHIFT 1
|
||||
#define ETH_TX_DATA_4TH_BD_START_BD_MASK 0x1
|
||||
#define ETH_TX_DATA_4TH_BD_START_BD_SHIFT 8
|
||||
#define ETH_TX_DATA_4TH_BD_RESERVED2_MASK 0x7F
|
||||
#define ETH_TX_DATA_4TH_BD_RESERVED2_SHIFT 9
|
||||
__le16 reserved3;
|
||||
};
|
||||
|
||||
/* The forth tx bd of a given packet */
|
||||
struct eth_tx_4th_bd {
|
||||
struct regpair addr; /* Single continuous buffer */
|
||||
__le16 nbytes; /* Number of bytes in this BD */
|
||||
struct eth_tx_data_4th_bd data; /* Parsing information data */
|
||||
};
|
||||
|
||||
/* Complementary information for the regular tx bd of a given packet */
|
||||
struct eth_tx_data_bd {
|
||||
__le16 reserved0;
|
||||
|
@ -431,6 +458,7 @@ union eth_tx_bd_types {
|
|||
struct eth_tx_1st_bd first_bd;
|
||||
struct eth_tx_2nd_bd second_bd;
|
||||
struct eth_tx_3rd_bd third_bd;
|
||||
struct eth_tx_4th_bd fourth_bd;
|
||||
struct eth_tx_bd reg_bd;
|
||||
};
|
||||
|
||||
|
@ -443,6 +471,12 @@ enum eth_tx_tunn_type {
|
|||
MAX_ETH_TX_TUNN_TYPE
|
||||
};
|
||||
|
||||
/* Mstorm Queue Zone */
|
||||
struct mstorm_eth_queue_zone {
|
||||
struct eth_rx_prod_data rx_producers;
|
||||
__le32 reserved[3];
|
||||
};
|
||||
|
||||
/* Ystorm Queue Zone */
|
||||
struct xstorm_eth_queue_zone {
|
||||
struct coalescing_timeset int_coalescing_timeset;
|
||||
|
|
|
@ -999,7 +999,6 @@ struct iscsi_conn_offload_params {
|
|||
struct regpair r2tq_pbl_addr;
|
||||
struct regpair xhq_pbl_addr;
|
||||
struct regpair uhq_pbl_addr;
|
||||
__le32 initial_ack;
|
||||
__le16 physical_q0;
|
||||
__le16 physical_q1;
|
||||
u8 flags;
|
||||
|
@ -1011,10 +1010,10 @@ struct iscsi_conn_offload_params {
|
|||
#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2
|
||||
#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x1F
|
||||
#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 3
|
||||
u8 pbl_page_size_log;
|
||||
u8 pbe_page_size_log;
|
||||
u8 default_cq;
|
||||
__le16 reserved0;
|
||||
__le32 stat_sn;
|
||||
__le32 initial_ack;
|
||||
};
|
||||
|
||||
/* iSCSI connection statistics */
|
||||
|
@ -1029,25 +1028,14 @@ struct iscsi_conn_stats_params {
|
|||
__le32 reserved;
|
||||
};
|
||||
|
||||
/* spe message header */
|
||||
struct iscsi_slow_path_hdr {
|
||||
u8 op_code;
|
||||
u8 flags;
|
||||
#define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK 0xF
|
||||
#define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT 0
|
||||
#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK 0x7
|
||||
#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT 4
|
||||
#define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK 0x1
|
||||
#define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT 7
|
||||
};
|
||||
|
||||
/* iSCSI connection update params passed by driver to FW in ISCSI update
|
||||
*ramrod.
|
||||
*/
|
||||
struct iscsi_conn_update_ramrod_params {
|
||||
struct iscsi_slow_path_hdr hdr;
|
||||
__le16 reserved0;
|
||||
__le16 conn_id;
|
||||
__le32 fw_cid;
|
||||
__le32 reserved1;
|
||||
u8 flags;
|
||||
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1
|
||||
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0
|
||||
|
@ -1065,7 +1053,7 @@ struct iscsi_conn_update_ramrod_params {
|
|||
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_SHIFT 6
|
||||
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_MASK 0x1
|
||||
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_SHIFT 7
|
||||
u8 reserved0[3];
|
||||
u8 reserved3[3];
|
||||
__le32 max_seq_size;
|
||||
__le32 max_send_pdu_length;
|
||||
__le32 max_recv_pdu_length;
|
||||
|
@ -1251,22 +1239,22 @@ enum iscsi_ramrod_cmd_id {
|
|||
|
||||
/* iSCSI connection termination request */
|
||||
struct iscsi_spe_conn_mac_update {
|
||||
struct iscsi_slow_path_hdr hdr;
|
||||
__le16 reserved0;
|
||||
__le16 conn_id;
|
||||
__le32 fw_cid;
|
||||
__le32 reserved1;
|
||||
__le16 remote_mac_addr_lo;
|
||||
__le16 remote_mac_addr_mid;
|
||||
__le16 remote_mac_addr_hi;
|
||||
u8 reserved0[2];
|
||||
u8 reserved2[2];
|
||||
};
|
||||
|
||||
/* iSCSI and TCP connection (Option 1) offload params passed by driver to FW in
|
||||
* iSCSI offload ramrod.
|
||||
*/
|
||||
struct iscsi_spe_conn_offload {
|
||||
struct iscsi_slow_path_hdr hdr;
|
||||
__le16 reserved0;
|
||||
__le16 conn_id;
|
||||
__le32 fw_cid;
|
||||
__le32 reserved1;
|
||||
struct iscsi_conn_offload_params iscsi;
|
||||
struct tcp_offload_params tcp;
|
||||
};
|
||||
|
@ -1275,44 +1263,36 @@ struct iscsi_spe_conn_offload {
|
|||
* iSCSI offload ramrod.
|
||||
*/
|
||||
struct iscsi_spe_conn_offload_option2 {
|
||||
struct iscsi_slow_path_hdr hdr;
|
||||
__le16 reserved0;
|
||||
__le16 conn_id;
|
||||
__le32 fw_cid;
|
||||
__le32 reserved1;
|
||||
struct iscsi_conn_offload_params iscsi;
|
||||
struct tcp_offload_params_opt2 tcp;
|
||||
};
|
||||
|
||||
/* iSCSI collect connection statistics request */
|
||||
struct iscsi_spe_conn_statistics {
|
||||
struct iscsi_slow_path_hdr hdr;
|
||||
__le16 reserved0;
|
||||
__le16 conn_id;
|
||||
__le32 fw_cid;
|
||||
__le32 reserved1;
|
||||
u8 reset_stats;
|
||||
u8 reserved0[7];
|
||||
u8 reserved2[7];
|
||||
struct regpair stats_cnts_addr;
|
||||
};
|
||||
|
||||
/* iSCSI connection termination request */
|
||||
struct iscsi_spe_conn_termination {
|
||||
struct iscsi_slow_path_hdr hdr;
|
||||
__le16 reserved0;
|
||||
__le16 conn_id;
|
||||
__le32 fw_cid;
|
||||
__le32 reserved1;
|
||||
u8 abortive;
|
||||
u8 reserved0[7];
|
||||
u8 reserved2[7];
|
||||
struct regpair queue_cnts_addr;
|
||||
struct regpair query_params_addr;
|
||||
};
|
||||
|
||||
/* iSCSI firmware function destroy parameters */
|
||||
struct iscsi_spe_func_dstry {
|
||||
struct iscsi_slow_path_hdr hdr;
|
||||
__le16 reserved0;
|
||||
__le32 reserved1;
|
||||
};
|
||||
|
||||
/* iSCSI firmware function init parameters */
|
||||
struct iscsi_spe_func_init {
|
||||
struct iscsi_slow_path_hdr hdr;
|
||||
__le16 half_way_close_timeout;
|
||||
u8 num_sq_pages_in_ring;
|
||||
u8 num_r2tq_pages_in_ring;
|
||||
|
@ -1324,8 +1304,12 @@ struct iscsi_spe_func_init {
|
|||
#define ISCSI_SPE_FUNC_INIT_RESERVED0_MASK 0x7F
|
||||
#define ISCSI_SPE_FUNC_INIT_RESERVED0_SHIFT 1
|
||||
struct iscsi_debug_modes debug_mode;
|
||||
__le16 reserved1;
|
||||
__le32 reserved2;
|
||||
u8 params;
|
||||
#define ISCSI_SPE_FUNC_INIT_MAX_SYN_RT_MASK 0xF
|
||||
#define ISCSI_SPE_FUNC_INIT_MAX_SYN_RT_SHIFT 0
|
||||
#define ISCSI_SPE_FUNC_INIT_RESERVED1_MASK 0xF
|
||||
#define ISCSI_SPE_FUNC_INIT_RESERVED1_SHIFT 4
|
||||
u8 reserved2[7];
|
||||
struct scsi_init_func_params func_params;
|
||||
struct scsi_init_func_queues q_params;
|
||||
};
|
||||
|
|
|
@ -159,6 +159,7 @@ struct qed_dcbx_get {
|
|||
enum qed_nvm_images {
|
||||
QED_NVM_IMAGE_ISCSI_CFG,
|
||||
QED_NVM_IMAGE_FCOE_CFG,
|
||||
QED_NVM_IMAGE_MDUMP,
|
||||
QED_NVM_IMAGE_NVM_CFG1,
|
||||
QED_NVM_IMAGE_DEFAULT_CFG,
|
||||
QED_NVM_IMAGE_NVM_META,
|
||||
|
@ -463,7 +464,7 @@ enum qed_db_rec_space {
|
|||
|
||||
#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
|
||||
|
||||
#define DIRECT_REG_WR64(reg_addr, val) writeq((u32)val, \
|
||||
#define DIRECT_REG_WR64(reg_addr, val) writeq((u64)val, \
|
||||
(void __iomem *)(reg_addr))
|
||||
|
||||
#define QED_COALESCE_MAX 0x1FF
|
||||
|
@ -1177,6 +1178,17 @@ struct qed_common_ops {
|
|||
#define GET_FIELD(value, name) \
|
||||
(((value) >> (name ## _SHIFT)) & name ## _MASK)
|
||||
|
||||
#define GET_MFW_FIELD(name, field) \
|
||||
(((name) & (field ## _MASK)) >> (field ## _OFFSET))
|
||||
|
||||
#define SET_MFW_FIELD(name, field, value) \
|
||||
do { \
|
||||
(name) &= ~(field ## _MASK); \
|
||||
(name) |= (((value) << (field ## _OFFSET)) & (field ## _MASK));\
|
||||
} while (0)
|
||||
|
||||
#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
|
||||
|
||||
/* Debug print definitions */
|
||||
#define DP_ERR(cdev, fmt, ...) \
|
||||
do { \
|
||||
|
|
|
@ -52,6 +52,12 @@ enum qed_ll2_conn_type {
|
|||
QED_LL2_TYPE_ROCE,
|
||||
QED_LL2_TYPE_IWARP,
|
||||
QED_LL2_TYPE_RESERVED3,
|
||||
MAX_QED_LL2_CONN_TYPE
|
||||
};
|
||||
|
||||
enum qed_ll2_rx_conn_type {
|
||||
QED_LL2_RX_TYPE_LEGACY,
|
||||
QED_LL2_RX_TYPE_CTX,
|
||||
MAX_QED_LL2_RX_CONN_TYPE
|
||||
};
|
||||
|
||||
|
@ -165,6 +171,7 @@ struct qed_ll2_cbs {
|
|||
};
|
||||
|
||||
struct qed_ll2_acquire_data_inputs {
|
||||
enum qed_ll2_rx_conn_type rx_conn_type;
|
||||
enum qed_ll2_conn_type conn_type;
|
||||
u16 mtu;
|
||||
u16 rx_num_desc;
|
||||
|
|
|
@ -107,8 +107,9 @@ struct scsi_drv_cmdq {
|
|||
struct scsi_init_func_params {
|
||||
__le16 num_tasks;
|
||||
u8 log_page_size;
|
||||
u8 log_page_size_conn;
|
||||
u8 debug_mode;
|
||||
u8 reserved2[12];
|
||||
u8 reserved2[11];
|
||||
};
|
||||
|
||||
/* SCSI RQ/CQ/CMDQ firmware function init parameters */
|
||||
|
|
Loading…
Reference in New Issue