qed*: Fix Kconfig dependencies with INFINIBAND_QEDR
The qedr driver would require a tristate Kconfig option [to allow
it to compile as a module], and toward that end we've added the
INFINIBAND_QEDR option. But as we've made the compilation of the
qed/qede infrastructure required for RoCE dependent on the option
we'd be facing linking difficulties in case that QED=y or QEDE=y,
and INFINIBAND_QEDR=m.
To resolve this, we seperate between the INFINIBAND_QEDR option
and the infrastructure support in qed/qede by introducing a new
QED_RDMA option which would be selected by INFINIBAND_QEDR but would
be a boolean instead of a tristate; Following that, the qed/qede is
fixed based on this new option so that all config combinations would
be supported.
Fixes: cee9fbd8e2
("qede: add qedr framework")
Reported-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Yuval Mintz <Yuval.Mintz@caviumnetworks.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
ce6b04ee8b
commit
0189efb8f4
|
@ -107,10 +107,14 @@ config QEDE
|
|||
---help---
|
||||
This enables the support for ...
|
||||
|
||||
config QED_RDMA
|
||||
bool
|
||||
|
||||
config INFINIBAND_QEDR
|
||||
tristate "QLogic qede RoCE sources [debug]"
|
||||
depends on QEDE && 64BIT
|
||||
select QED_LL2
|
||||
select QED_RDMA
|
||||
default n
|
||||
---help---
|
||||
This provides a temporary node that allows the compilation
|
||||
|
|
|
@ -5,4 +5,4 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
|
|||
qed_selftest.o qed_dcbx.o qed_debug.o
|
||||
qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
|
||||
qed-$(CONFIG_QED_LL2) += qed_ll2.o
|
||||
qed-$(CONFIG_INFINIBAND_QEDR) += qed_roce.o
|
||||
qed-$(CONFIG_QED_RDMA) += qed_roce.o
|
||||
|
|
|
@ -47,13 +47,8 @@
|
|||
#define TM_ALIGN BIT(TM_SHIFT)
|
||||
#define TM_ELEM_SIZE 4
|
||||
|
||||
/* ILT constants */
|
||||
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
|
||||
/* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */
|
||||
#define ILT_DEFAULT_HW_P_SIZE 4
|
||||
#else
|
||||
#define ILT_DEFAULT_HW_P_SIZE 3
|
||||
#endif
|
||||
#define ILT_DEFAULT_HW_P_SIZE (IS_ENABLED(CONFIG_QED_RDMA) ? 4 : 3)
|
||||
|
||||
#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
|
||||
#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
|
||||
|
|
|
@ -1422,19 +1422,19 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
|
|||
u32 *feat_num = p_hwfn->hw_info.feat_num;
|
||||
int num_features = 1;
|
||||
|
||||
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
|
||||
/* Roce CNQ each requires: 1 status block + 1 CNQ. We divide the
|
||||
* status blocks equally between L2 / RoCE but with consideration as
|
||||
* to how many l2 queues / cnqs we have
|
||||
*/
|
||||
if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
|
||||
if (IS_ENABLED(CONFIG_QED_RDMA) &&
|
||||
p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
|
||||
/* Roce CNQ each requires: 1 status block + 1 CNQ. We divide
|
||||
* the status blocks equally between L2 / RoCE but with
|
||||
* consideration as to how many l2 queues / cnqs we have.
|
||||
*/
|
||||
num_features++;
|
||||
|
||||
feat_num[QED_RDMA_CNQ] =
|
||||
min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features,
|
||||
RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM));
|
||||
}
|
||||
#endif
|
||||
|
||||
feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
|
||||
num_features,
|
||||
RESC_NUM(p_hwfn, QED_L2_QUEUE));
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
#include "qed_mcp.h"
|
||||
#include "qed_reg_addr.h"
|
||||
#include "qed_sp.h"
|
||||
#include "qed_roce.h"
|
||||
|
||||
#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
|
||||
#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
|
||||
|
|
|
@ -293,24 +293,4 @@ void qed_ll2_setup(struct qed_hwfn *p_hwfn,
|
|||
*/
|
||||
void qed_ll2_free(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ll2_info *p_ll2_connections);
|
||||
void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
|
||||
u8 connection_handle,
|
||||
void *cookie,
|
||||
dma_addr_t rx_buf_addr,
|
||||
u16 data_length,
|
||||
u8 data_length_error,
|
||||
u16 parse_flags,
|
||||
u16 vlan,
|
||||
u32 src_mac_addr_hi,
|
||||
u16 src_mac_addr_lo, bool b_last_packet);
|
||||
void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
|
||||
u8 connection_handle,
|
||||
void *cookie,
|
||||
dma_addr_t first_frag_addr,
|
||||
bool b_last_fragment, bool b_last_packet);
|
||||
void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
|
||||
u8 connection_handle,
|
||||
void *cookie,
|
||||
dma_addr_t first_frag_addr,
|
||||
bool b_last_fragment, bool b_last_packet);
|
||||
#endif
|
||||
|
|
|
@ -33,10 +33,8 @@
|
|||
#include "qed_hw.h"
|
||||
#include "qed_selftest.h"
|
||||
|
||||
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
|
||||
#define QED_ROCE_QPS (8192)
|
||||
#define QED_ROCE_DPIS (8)
|
||||
#endif
|
||||
|
||||
static char version[] =
|
||||
"QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
|
||||
|
@ -682,9 +680,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
|
|||
enum qed_int_mode int_mode)
|
||||
{
|
||||
struct qed_sb_cnt_info sb_cnt_info;
|
||||
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
|
||||
int num_l2_queues;
|
||||
#endif
|
||||
int num_l2_queues = 0;
|
||||
int rc;
|
||||
int i;
|
||||
|
||||
|
@ -715,8 +711,9 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
|
|||
cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
|
||||
cdev->num_hwfns;
|
||||
|
||||
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
|
||||
num_l2_queues = 0;
|
||||
if (!IS_ENABLED(CONFIG_QED_RDMA))
|
||||
return 0;
|
||||
|
||||
for_each_hwfn(cdev, i)
|
||||
num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
|
||||
|
||||
|
@ -738,7 +735,6 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
|
|||
DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
|
||||
cdev->int_params.rdma_msix_cnt,
|
||||
cdev->int_params.rdma_msix_base);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -843,18 +839,20 @@ static void qed_update_pf_params(struct qed_dev *cdev,
|
|||
{
|
||||
int i;
|
||||
|
||||
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
|
||||
params->rdma_pf_params.num_qps = QED_ROCE_QPS;
|
||||
params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
|
||||
/* divide by 3 the MRs to avoid MF ILT overflow */
|
||||
params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
|
||||
params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
|
||||
#endif
|
||||
for (i = 0; i < cdev->num_hwfns; i++) {
|
||||
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
|
||||
|
||||
p_hwfn->pf_params = *params;
|
||||
}
|
||||
|
||||
if (!IS_ENABLED(CONFIG_QED_RDMA))
|
||||
return;
|
||||
|
||||
params->rdma_pf_params.num_qps = QED_ROCE_QPS;
|
||||
params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
|
||||
/* divide by 3 the MRs to avoid MF ILT overflow */
|
||||
params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
|
||||
params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
|
||||
}
|
||||
|
||||
static int qed_slowpath_start(struct qed_dev *cdev,
|
||||
|
|
|
@ -129,17 +129,12 @@ static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
|
|||
}
|
||||
}
|
||||
|
||||
u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
|
||||
static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
|
||||
{
|
||||
/* First sb id for RoCE is after all the l2 sb */
|
||||
return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
|
||||
}
|
||||
|
||||
u32 qed_rdma_query_cau_timer_res(void *rdma_cxt)
|
||||
{
|
||||
return QED_CAU_DEF_RX_TIMER_RES;
|
||||
}
|
||||
|
||||
static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt,
|
||||
struct qed_rdma_start_in_params *params)
|
||||
|
@ -275,7 +270,7 @@ free_rdma_info:
|
|||
return rc;
|
||||
}
|
||||
|
||||
void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
|
||||
static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
|
||||
{
|
||||
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
|
||||
|
||||
|
@ -527,6 +522,26 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
|
|||
return qed_spq_post(p_hwfn, p_ent, NULL);
|
||||
}
|
||||
|
||||
static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
|
||||
int rc;
|
||||
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
|
||||
|
||||
spin_lock_bh(&p_hwfn->p_rdma_info->lock);
|
||||
rc = qed_rdma_bmap_alloc_id(p_hwfn,
|
||||
&p_hwfn->p_rdma_info->tid_map, itid);
|
||||
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
|
||||
out:
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
|
||||
{
|
||||
struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
|
||||
|
@ -573,7 +588,7 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
|
|||
return qed_rdma_start_fw(p_hwfn, params, p_ptt);
|
||||
}
|
||||
|
||||
int qed_rdma_stop(void *rdma_cxt)
|
||||
static int qed_rdma_stop(void *rdma_cxt)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
|
||||
struct rdma_close_func_ramrod_data *p_ramrod;
|
||||
|
@ -629,8 +644,8 @@ out:
|
|||
return rc;
|
||||
}
|
||||
|
||||
int qed_rdma_add_user(void *rdma_cxt,
|
||||
struct qed_rdma_add_user_out_params *out_params)
|
||||
static int qed_rdma_add_user(void *rdma_cxt,
|
||||
struct qed_rdma_add_user_out_params *out_params)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
|
||||
u32 dpi_start_offset;
|
||||
|
@ -664,7 +679,7 @@ int qed_rdma_add_user(void *rdma_cxt,
|
|||
return rc;
|
||||
}
|
||||
|
||||
struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
|
||||
static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
|
||||
struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
|
||||
|
@ -680,7 +695,7 @@ struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
|
|||
return p_port;
|
||||
}
|
||||
|
||||
struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
|
||||
static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
|
||||
|
||||
|
@ -690,7 +705,7 @@ struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
|
|||
return p_hwfn->p_rdma_info->dev;
|
||||
}
|
||||
|
||||
void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
|
||||
static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
|
||||
|
||||
|
@ -701,27 +716,7 @@ void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
|
|||
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
|
||||
}
|
||||
|
||||
int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
|
||||
int rc;
|
||||
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
|
||||
|
||||
spin_lock_bh(&p_hwfn->p_rdma_info->lock);
|
||||
rc = qed_rdma_bmap_alloc_id(p_hwfn,
|
||||
&p_hwfn->p_rdma_info->tid_map, itid);
|
||||
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
|
||||
out:
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
|
||||
static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn;
|
||||
u16 qz_num;
|
||||
|
@ -816,7 +811,7 @@ static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
|
||||
static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
|
||||
u32 returned_id;
|
||||
|
@ -1985,9 +1980,9 @@ int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int qed_rdma_query_qp(void *rdma_cxt,
|
||||
struct qed_rdma_qp *qp,
|
||||
struct qed_rdma_query_qp_out_params *out_params)
|
||||
static int qed_rdma_query_qp(void *rdma_cxt,
|
||||
struct qed_rdma_qp *qp,
|
||||
struct qed_rdma_query_qp_out_params *out_params)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
|
||||
int rc;
|
||||
|
@ -2022,7 +2017,7 @@ int qed_rdma_query_qp(void *rdma_cxt,
|
|||
return rc;
|
||||
}
|
||||
|
||||
int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
|
||||
static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
|
||||
int rc = 0;
|
||||
|
@ -2215,9 +2210,9 @@ static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
|
|||
return rc;
|
||||
}
|
||||
|
||||
int qed_rdma_modify_qp(void *rdma_cxt,
|
||||
struct qed_rdma_qp *qp,
|
||||
struct qed_rdma_modify_qp_in_params *params)
|
||||
static int qed_rdma_modify_qp(void *rdma_cxt,
|
||||
struct qed_rdma_qp *qp,
|
||||
struct qed_rdma_modify_qp_in_params *params)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
|
||||
enum qed_roce_qp_state prev_state;
|
||||
|
@ -2312,8 +2307,9 @@ int qed_rdma_modify_qp(void *rdma_cxt,
|
|||
return rc;
|
||||
}
|
||||
|
||||
int qed_rdma_register_tid(void *rdma_cxt,
|
||||
struct qed_rdma_register_tid_in_params *params)
|
||||
static int
|
||||
qed_rdma_register_tid(void *rdma_cxt,
|
||||
struct qed_rdma_register_tid_in_params *params)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
|
||||
struct rdma_register_tid_ramrod_data *p_ramrod;
|
||||
|
@ -2450,7 +2446,7 @@ int qed_rdma_register_tid(void *rdma_cxt,
|
|||
return rc;
|
||||
}
|
||||
|
||||
int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
|
||||
static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
|
||||
struct rdma_deregister_tid_ramrod_data *p_ramrod;
|
||||
|
@ -2561,7 +2557,8 @@ void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
|
|||
qed_rdma_dpm_conf(p_hwfn, p_ptt);
|
||||
}
|
||||
|
||||
int qed_rdma_start(void *rdma_cxt, struct qed_rdma_start_in_params *params)
|
||||
static int qed_rdma_start(void *rdma_cxt,
|
||||
struct qed_rdma_start_in_params *params)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
|
||||
struct qed_ptt *p_ptt;
|
||||
|
@ -2601,7 +2598,7 @@ static int qed_rdma_init(struct qed_dev *cdev,
|
|||
return qed_rdma_start(QED_LEADING_HWFN(cdev), params);
|
||||
}
|
||||
|
||||
void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
|
||||
static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
|
||||
|
||||
|
|
|
@ -181,36 +181,55 @@ struct qed_rdma_qp {
|
|||
dma_addr_t shared_queue_phys_addr;
|
||||
};
|
||||
|
||||
int
|
||||
qed_rdma_add_user(void *rdma_cxt,
|
||||
struct qed_rdma_add_user_out_params *out_params);
|
||||
int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd);
|
||||
int qed_rdma_alloc_tid(void *rdma_cxt, u32 *tid);
|
||||
int qed_rdma_deregister_tid(void *rdma_cxt, u32 tid);
|
||||
void qed_rdma_free_tid(void *rdma_cxt, u32 tid);
|
||||
struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt);
|
||||
struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt);
|
||||
int
|
||||
qed_rdma_register_tid(void *rdma_cxt,
|
||||
struct qed_rdma_register_tid_in_params *params);
|
||||
void qed_rdma_remove_user(void *rdma_cxt, u16 dpi);
|
||||
int qed_rdma_start(void *p_hwfn, struct qed_rdma_start_in_params *params);
|
||||
int qed_rdma_stop(void *rdma_cxt);
|
||||
u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id);
|
||||
u32 qed_rdma_query_cau_timer_res(void *p_hwfn);
|
||||
void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod);
|
||||
void qed_rdma_resc_free(struct qed_hwfn *p_hwfn);
|
||||
#if IS_ENABLED(CONFIG_QED_RDMA)
|
||||
void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
|
||||
void qed_async_roce_event(struct qed_hwfn *p_hwfn,
|
||||
struct event_ring_entry *p_eqe);
|
||||
int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp);
|
||||
int qed_rdma_modify_qp(void *rdma_cxt, struct qed_rdma_qp *qp,
|
||||
struct qed_rdma_modify_qp_in_params *params);
|
||||
int qed_rdma_query_qp(void *rdma_cxt, struct qed_rdma_qp *qp,
|
||||
struct qed_rdma_query_qp_out_params *out_params);
|
||||
|
||||
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
|
||||
void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
|
||||
void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
|
||||
u8 connection_handle,
|
||||
void *cookie,
|
||||
dma_addr_t first_frag_addr,
|
||||
bool b_last_fragment, bool b_last_packet);
|
||||
void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
|
||||
u8 connection_handle,
|
||||
void *cookie,
|
||||
dma_addr_t first_frag_addr,
|
||||
bool b_last_fragment, bool b_last_packet);
|
||||
void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
|
||||
u8 connection_handle,
|
||||
void *cookie,
|
||||
dma_addr_t rx_buf_addr,
|
||||
u16 data_length,
|
||||
u8 data_length_error,
|
||||
u16 parse_flags,
|
||||
u16 vlan,
|
||||
u32 src_mac_addr_hi,
|
||||
u16 src_mac_addr_lo, bool b_last_packet);
|
||||
#else
|
||||
void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
|
||||
static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
|
||||
static inline void qed_async_roce_event(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) {}
|
||||
static inline void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
|
||||
u8 connection_handle,
|
||||
void *cookie,
|
||||
dma_addr_t first_frag_addr,
|
||||
bool b_last_fragment,
|
||||
bool b_last_packet) {}
|
||||
static inline void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
|
||||
u8 connection_handle,
|
||||
void *cookie,
|
||||
dma_addr_t first_frag_addr,
|
||||
bool b_last_fragment,
|
||||
bool b_last_packet) {}
|
||||
static inline void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
|
||||
u8 connection_handle,
|
||||
void *cookie,
|
||||
dma_addr_t rx_buf_addr,
|
||||
u16 data_length,
|
||||
u8 data_length_error,
|
||||
u16 parse_flags,
|
||||
u16 vlan,
|
||||
u32 src_mac_addr_hi,
|
||||
u16 src_mac_addr_lo,
|
||||
bool b_last_packet) {}
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -28,9 +28,7 @@
|
|||
#include "qed_reg_addr.h"
|
||||
#include "qed_sp.h"
|
||||
#include "qed_sriov.h"
|
||||
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
|
||||
#include "qed_roce.h"
|
||||
#endif
|
||||
|
||||
/***************************************************************************
|
||||
* Structures & Definitions
|
||||
|
@ -240,11 +238,9 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
|
|||
struct event_ring_entry *p_eqe)
|
||||
{
|
||||
switch (p_eqe->protocol_id) {
|
||||
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
|
||||
case PROTOCOLID_ROCE:
|
||||
qed_async_roce_event(p_hwfn, p_eqe);
|
||||
return 0;
|
||||
#endif
|
||||
case PROTOCOLID_COMMON:
|
||||
return qed_sriov_eqe_event(p_hwfn,
|
||||
p_eqe->opcode,
|
||||
|
|
|
@ -2,4 +2,4 @@ obj-$(CONFIG_QEDE) := qede.o
|
|||
|
||||
qede-y := qede_main.o qede_ethtool.o
|
||||
qede-$(CONFIG_DCB) += qede_dcbnl.o
|
||||
qede-$(CONFIG_INFINIBAND_QEDR) += qede_roce.o
|
||||
qede-$(CONFIG_QED_RDMA) += qede_roce.o
|
||||
|
|
|
@ -68,7 +68,7 @@ void qede_roce_unregister_driver(struct qedr_driver *drv);
|
|||
|
||||
bool qede_roce_supported(struct qede_dev *dev);
|
||||
|
||||
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
|
||||
#if IS_ENABLED(CONFIG_QED_RDMA)
|
||||
int qede_roce_dev_add(struct qede_dev *dev);
|
||||
void qede_roce_dev_event_open(struct qede_dev *dev);
|
||||
void qede_roce_dev_event_close(struct qede_dev *dev);
|
||||
|
|
Loading…
Reference in New Issue