Merge branch 'from-rc' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git
Patches for 4.16 that are dependent on patches sent to 4.15-rc. These are small clean ups for the vmw_pvrdma and i40iw drivers. * 'from-rc' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git: RDMA/vmw_pvrdma: Remove usage of BIT() from UAPI header RDMA/vmw_pvrdma: Use refcount_t instead of atomic_t RDMA/vmw_pvrdma: Use more specific sizeof in kcalloc RDMA/vmw_pvrdma: Clarify QP and CQ is_kernel logic RDMA/vmw_pvrdma: Add UAR SRQ macros in ABI header file i40iw: Change accelerated flag to bool
This commit is contained in:
commit
76a895d9e1
|
@ -1,6 +1,6 @@
|
|||
menuconfig INFINIBAND
|
||||
tristate "InfiniBand support"
|
||||
depends on HAS_IOMEM
|
||||
depends on HAS_IOMEM && HAS_DMA
|
||||
depends on NET
|
||||
depends on INET
|
||||
depends on m || IPV6 != m
|
||||
|
|
|
@ -801,6 +801,7 @@ struct rdma_cm_id *rdma_create_id(struct net *net,
|
|||
INIT_LIST_HEAD(&id_priv->mc_list);
|
||||
get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
|
||||
id_priv->id.route.addr.dev_addr.net = get_net(net);
|
||||
id_priv->seq_num &= 0x00ffffff;
|
||||
|
||||
return &id_priv->id;
|
||||
}
|
||||
|
@ -4453,7 +4454,7 @@ out:
|
|||
return skb->len;
|
||||
}
|
||||
|
||||
static const struct rdma_nl_cbs cma_cb_table[] = {
|
||||
static const struct rdma_nl_cbs cma_cb_table[RDMA_NL_RDMA_CM_NUM_OPS] = {
|
||||
[RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats},
|
||||
};
|
||||
|
||||
|
|
|
@ -1136,7 +1136,7 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
|
|||
}
|
||||
EXPORT_SYMBOL(ib_get_net_dev_by_params);
|
||||
|
||||
static const struct rdma_nl_cbs ibnl_ls_cb_table[] = {
|
||||
static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
|
||||
[RDMA_NL_LS_OP_RESOLVE] = {
|
||||
.doit = ib_nl_handle_resolve_resp,
|
||||
.flags = RDMA_NL_ADMIN_PERM,
|
||||
|
@ -1243,5 +1243,5 @@ static void __exit ib_core_cleanup(void)
|
|||
|
||||
MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4);
|
||||
|
||||
module_init(ib_core_init);
|
||||
subsys_initcall(ib_core_init);
|
||||
module_exit(ib_core_cleanup);
|
||||
|
|
|
@ -80,7 +80,7 @@ const char *__attribute_const__ iwcm_reject_msg(int reason)
|
|||
}
|
||||
EXPORT_SYMBOL(iwcm_reject_msg);
|
||||
|
||||
static struct rdma_nl_cbs iwcm_nl_cb_table[] = {
|
||||
static struct rdma_nl_cbs iwcm_nl_cb_table[RDMA_NL_IWPM_NUM_OPS] = {
|
||||
[RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
|
||||
[RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
|
||||
[RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
|
||||
|
|
|
@ -303,7 +303,7 @@ out: cb->args[0] = idx;
|
|||
return skb->len;
|
||||
}
|
||||
|
||||
static const struct rdma_nl_cbs nldev_cb_table[] = {
|
||||
static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
|
||||
[RDMA_NLDEV_CMD_GET] = {
|
||||
.doit = nldev_get_doit,
|
||||
.dump = nldev_get_dumpit,
|
||||
|
|
|
@ -386,6 +386,9 @@ int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!qp->qp_sec)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&real_qp->qp_sec->mutex);
|
||||
ret = check_qp_port_pkey_settings(real_qp->qp_sec->ports_pkeys,
|
||||
qp->qp_sec);
|
||||
|
@ -417,8 +420,17 @@ void ib_close_shared_qp_security(struct ib_qp_security *sec)
|
|||
|
||||
int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
|
||||
{
|
||||
u8 i = rdma_start_port(dev);
|
||||
bool is_ib = false;
|
||||
int ret;
|
||||
|
||||
while (i <= rdma_end_port(dev) && !is_ib)
|
||||
is_ib = rdma_protocol_ib(dev, i++);
|
||||
|
||||
/* If this isn't an IB device don't create the security context */
|
||||
if (!is_ib)
|
||||
return 0;
|
||||
|
||||
qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL);
|
||||
if (!qp->qp_sec)
|
||||
return -ENOMEM;
|
||||
|
@ -441,6 +453,10 @@ EXPORT_SYMBOL(ib_create_qp_security);
|
|||
|
||||
void ib_destroy_qp_security_begin(struct ib_qp_security *sec)
|
||||
{
|
||||
/* Return if not IB */
|
||||
if (!sec)
|
||||
return;
|
||||
|
||||
mutex_lock(&sec->mutex);
|
||||
|
||||
/* Remove the QP from the lists so it won't get added to
|
||||
|
@ -470,6 +486,10 @@ void ib_destroy_qp_security_abort(struct ib_qp_security *sec)
|
|||
int ret;
|
||||
int i;
|
||||
|
||||
/* Return if not IB */
|
||||
if (!sec)
|
||||
return;
|
||||
|
||||
/* If a concurrent cache update is in progress this
|
||||
* QP security could be marked for an error state
|
||||
* transition. Wait for this to complete.
|
||||
|
@ -505,6 +525,10 @@ void ib_destroy_qp_security_end(struct ib_qp_security *sec)
|
|||
{
|
||||
int i;
|
||||
|
||||
/* Return if not IB */
|
||||
if (!sec)
|
||||
return;
|
||||
|
||||
/* If a concurrent cache update is occurring we must
|
||||
* wait until this QP security structure is processed
|
||||
* in the QP to error flow before destroying it because
|
||||
|
@ -557,7 +581,7 @@ int ib_security_modify_qp(struct ib_qp *qp,
|
|||
{
|
||||
int ret = 0;
|
||||
struct ib_ports_pkeys *tmp_pps;
|
||||
struct ib_ports_pkeys *new_pps;
|
||||
struct ib_ports_pkeys *new_pps = NULL;
|
||||
struct ib_qp *real_qp = qp->real_qp;
|
||||
bool special_qp = (real_qp->qp_type == IB_QPT_SMI ||
|
||||
real_qp->qp_type == IB_QPT_GSI ||
|
||||
|
@ -565,18 +589,27 @@ int ib_security_modify_qp(struct ib_qp *qp,
|
|||
bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) ||
|
||||
(qp_attr_mask & IB_QP_ALT_PATH));
|
||||
|
||||
WARN_ONCE((qp_attr_mask & IB_QP_PORT &&
|
||||
rdma_protocol_ib(real_qp->device, qp_attr->port_num) &&
|
||||
!real_qp->qp_sec),
|
||||
"%s: QP security is not initialized for IB QP: %d\n",
|
||||
__func__, real_qp->qp_num);
|
||||
|
||||
/* The port/pkey settings are maintained only for the real QP. Open
|
||||
* handles on the real QP will be in the shared_qp_list. When
|
||||
* enforcing security on the real QP all the shared QPs will be
|
||||
* checked as well.
|
||||
*/
|
||||
|
||||
if (pps_change && !special_qp) {
|
||||
if (pps_change && !special_qp && real_qp->qp_sec) {
|
||||
mutex_lock(&real_qp->qp_sec->mutex);
|
||||
new_pps = get_new_pps(real_qp,
|
||||
qp_attr,
|
||||
qp_attr_mask);
|
||||
|
||||
if (!new_pps) {
|
||||
mutex_unlock(&real_qp->qp_sec->mutex);
|
||||
return -ENOMEM;
|
||||
}
|
||||
/* Add this QP to the lists for the new port
|
||||
* and pkey settings before checking for permission
|
||||
* in case there is a concurrent cache update
|
||||
|
@ -600,7 +633,7 @@ int ib_security_modify_qp(struct ib_qp *qp,
|
|||
qp_attr_mask,
|
||||
udata);
|
||||
|
||||
if (pps_change && !special_qp) {
|
||||
if (new_pps) {
|
||||
/* Clean up the lists and free the appropriate
|
||||
* ports_pkeys structure.
|
||||
*/
|
||||
|
@ -630,6 +663,9 @@ static int ib_security_pkey_access(struct ib_device *dev,
|
|||
u16 pkey;
|
||||
int ret;
|
||||
|
||||
if (!rdma_protocol_ib(dev, port_num))
|
||||
return 0;
|
||||
|
||||
ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -663,6 +699,9 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (!rdma_protocol_ib(agent->device, agent->port_num))
|
||||
return 0;
|
||||
|
||||
ret = security_ib_alloc_security(&agent->security);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -688,6 +727,9 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
|
|||
|
||||
void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
|
||||
{
|
||||
if (!rdma_protocol_ib(agent->device, agent->port_num))
|
||||
return;
|
||||
|
||||
security_ib_free_security(agent->security);
|
||||
if (agent->lsm_nb_reg)
|
||||
unregister_lsm_notifier(&agent->lsm_nb);
|
||||
|
@ -695,8 +737,14 @@ void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
|
|||
|
||||
int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
|
||||
{
|
||||
if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed)
|
||||
return -EACCES;
|
||||
if (!rdma_protocol_ib(map->agent.device, map->agent.port_num))
|
||||
return 0;
|
||||
|
||||
if (map->agent.qp->qp_type == IB_QPT_SMI) {
|
||||
if (!map->agent.smp_allowed)
|
||||
return -EACCES;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return ib_security_pkey_access(map->agent.device,
|
||||
map->agent.port_num,
|
||||
|
|
|
@ -1968,6 +1968,12 @@ static int modify_qp(struct ib_uverbs_file *file,
|
|||
goto release_qp;
|
||||
}
|
||||
|
||||
if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
|
||||
!rdma_is_port_valid(qp->device, cmd->base.alt_port_num)) {
|
||||
ret = -EINVAL;
|
||||
goto release_qp;
|
||||
}
|
||||
|
||||
attr->qp_state = cmd->base.qp_state;
|
||||
attr->cur_qp_state = cmd->base.cur_qp_state;
|
||||
attr->path_mtu = cmd->base.path_mtu;
|
||||
|
@ -2065,8 +2071,8 @@ int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file,
|
|||
return -EOPNOTSUPP;
|
||||
|
||||
if (ucore->inlen > sizeof(cmd)) {
|
||||
if (ib_is_udata_cleared(ucore, sizeof(cmd),
|
||||
ucore->inlen - sizeof(cmd)))
|
||||
if (!ib_is_udata_cleared(ucore, sizeof(cmd),
|
||||
ucore->inlen - sizeof(cmd)))
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
|
|
|
@ -1439,7 +1439,8 @@ int ib_close_qp(struct ib_qp *qp)
|
|||
spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
|
||||
|
||||
atomic_dec(&real_qp->usecnt);
|
||||
ib_close_shared_qp_security(qp->qp_sec);
|
||||
if (qp->qp_sec)
|
||||
ib_close_shared_qp_security(qp->qp_sec);
|
||||
kfree(qp);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -395,6 +395,11 @@ next_cqe:
|
|||
|
||||
static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
|
||||
{
|
||||
if (DRAIN_CQE(cqe)) {
|
||||
WARN_ONCE(1, "Unexpected DRAIN CQE qp id %u!\n", wq->sq.qid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
|
||||
return 0;
|
||||
|
||||
|
@ -489,7 +494,7 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
|
|||
/*
|
||||
* Special cqe for drain WR completions...
|
||||
*/
|
||||
if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) {
|
||||
if (DRAIN_CQE(hw_cqe)) {
|
||||
*cookie = CQE_DRAIN_COOKIE(hw_cqe);
|
||||
*cqe = *hw_cqe;
|
||||
goto skip_cqe;
|
||||
|
@ -566,10 +571,10 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
|
|||
ret = -EAGAIN;
|
||||
goto skip_cqe;
|
||||
}
|
||||
if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
|
||||
if (unlikely(!CQE_STATUS(hw_cqe) &&
|
||||
CQE_WRID_MSN(hw_cqe) != wq->rq.msn)) {
|
||||
t4_set_wq_in_error(wq);
|
||||
hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN));
|
||||
goto proc_cqe;
|
||||
hw_cqe->header |= cpu_to_be32(CQE_STATUS_V(T4_ERR_MSN));
|
||||
}
|
||||
goto proc_cqe;
|
||||
}
|
||||
|
@ -743,9 +748,6 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
|
|||
c4iw_invalidate_mr(qhp->rhp,
|
||||
CQE_WRID_FR_STAG(&cqe));
|
||||
break;
|
||||
case C4IW_DRAIN_OPCODE:
|
||||
wc->opcode = IB_WC_SEND;
|
||||
break;
|
||||
default:
|
||||
pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
|
||||
CQE_OPCODE(&cqe), CQE_QPID(&cqe));
|
||||
|
|
|
@ -693,8 +693,6 @@ static inline int to_ib_qp_state(int c4iw_qp_state)
|
|||
return IB_QPS_ERR;
|
||||
}
|
||||
|
||||
#define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN
|
||||
|
||||
static inline u32 c4iw_ib_to_tpt_access(int a)
|
||||
{
|
||||
return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
|
||||
|
|
|
@ -790,21 +790,57 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
|
||||
static int ib_to_fw_opcode(int ib_opcode)
|
||||
{
|
||||
int opcode;
|
||||
|
||||
switch (ib_opcode) {
|
||||
case IB_WR_SEND_WITH_INV:
|
||||
opcode = FW_RI_SEND_WITH_INV;
|
||||
break;
|
||||
case IB_WR_SEND:
|
||||
opcode = FW_RI_SEND;
|
||||
break;
|
||||
case IB_WR_RDMA_WRITE:
|
||||
opcode = FW_RI_RDMA_WRITE;
|
||||
break;
|
||||
case IB_WR_RDMA_READ:
|
||||
case IB_WR_RDMA_READ_WITH_INV:
|
||||
opcode = FW_RI_READ_REQ;
|
||||
break;
|
||||
case IB_WR_REG_MR:
|
||||
opcode = FW_RI_FAST_REGISTER;
|
||||
break;
|
||||
case IB_WR_LOCAL_INV:
|
||||
opcode = FW_RI_LOCAL_INV;
|
||||
break;
|
||||
default:
|
||||
opcode = -EINVAL;
|
||||
}
|
||||
return opcode;
|
||||
}
|
||||
|
||||
static int complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
|
||||
{
|
||||
struct t4_cqe cqe = {};
|
||||
struct c4iw_cq *schp;
|
||||
unsigned long flag;
|
||||
struct t4_cq *cq;
|
||||
int opcode;
|
||||
|
||||
schp = to_c4iw_cq(qhp->ibqp.send_cq);
|
||||
cq = &schp->cq;
|
||||
|
||||
opcode = ib_to_fw_opcode(wr->opcode);
|
||||
if (opcode < 0)
|
||||
return opcode;
|
||||
|
||||
cqe.u.drain_cookie = wr->wr_id;
|
||||
cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
|
||||
CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
|
||||
CQE_OPCODE_V(opcode) |
|
||||
CQE_TYPE_V(1) |
|
||||
CQE_SWCQE_V(1) |
|
||||
CQE_DRAIN_V(1) |
|
||||
CQE_QPID_V(qhp->wq.sq.qid));
|
||||
|
||||
spin_lock_irqsave(&schp->lock, flag);
|
||||
|
@ -819,6 +855,23 @@ static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
|
|||
schp->ibcq.cq_context);
|
||||
spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int complete_sq_drain_wrs(struct c4iw_qp *qhp, struct ib_send_wr *wr,
|
||||
struct ib_send_wr **bad_wr)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
while (wr) {
|
||||
ret = complete_sq_drain_wr(qhp, wr);
|
||||
if (ret) {
|
||||
*bad_wr = wr;
|
||||
break;
|
||||
}
|
||||
wr = wr->next;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
|
||||
|
@ -833,9 +886,10 @@ static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
|
|||
|
||||
cqe.u.drain_cookie = wr->wr_id;
|
||||
cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
|
||||
CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
|
||||
CQE_OPCODE_V(FW_RI_SEND) |
|
||||
CQE_TYPE_V(0) |
|
||||
CQE_SWCQE_V(1) |
|
||||
CQE_DRAIN_V(1) |
|
||||
CQE_QPID_V(qhp->wq.sq.qid));
|
||||
|
||||
spin_lock_irqsave(&rchp->lock, flag);
|
||||
|
@ -852,6 +906,14 @@ static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
|
|||
}
|
||||
}
|
||||
|
||||
static void complete_rq_drain_wrs(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
|
||||
{
|
||||
while (wr) {
|
||||
complete_rq_drain_wr(qhp, wr);
|
||||
wr = wr->next;
|
||||
}
|
||||
}
|
||||
|
||||
int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
struct ib_send_wr **bad_wr)
|
||||
{
|
||||
|
@ -868,9 +930,14 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
|||
|
||||
qhp = to_c4iw_qp(ibqp);
|
||||
spin_lock_irqsave(&qhp->lock, flag);
|
||||
if (t4_wq_in_error(&qhp->wq)) {
|
||||
|
||||
/*
|
||||
* If the qp has been flushed, then just insert a special
|
||||
* drain cqe.
|
||||
*/
|
||||
if (qhp->wq.flushed) {
|
||||
spin_unlock_irqrestore(&qhp->lock, flag);
|
||||
complete_sq_drain_wr(qhp, wr);
|
||||
err = complete_sq_drain_wrs(qhp, wr, bad_wr);
|
||||
return err;
|
||||
}
|
||||
num_wrs = t4_sq_avail(&qhp->wq);
|
||||
|
@ -1011,9 +1078,14 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
|||
|
||||
qhp = to_c4iw_qp(ibqp);
|
||||
spin_lock_irqsave(&qhp->lock, flag);
|
||||
if (t4_wq_in_error(&qhp->wq)) {
|
||||
|
||||
/*
|
||||
* If the qp has been flushed, then just insert a special
|
||||
* drain cqe.
|
||||
*/
|
||||
if (qhp->wq.flushed) {
|
||||
spin_unlock_irqrestore(&qhp->lock, flag);
|
||||
complete_rq_drain_wr(qhp, wr);
|
||||
complete_rq_drain_wrs(qhp, wr);
|
||||
return err;
|
||||
}
|
||||
num_wrs = t4_rq_avail(&qhp->wq);
|
||||
|
@ -1285,21 +1357,21 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
|
|||
spin_unlock_irqrestore(&rchp->lock, flag);
|
||||
|
||||
if (schp == rchp) {
|
||||
if (t4_clear_cq_armed(&rchp->cq) &&
|
||||
(rq_flushed || sq_flushed)) {
|
||||
if ((rq_flushed || sq_flushed) &&
|
||||
t4_clear_cq_armed(&rchp->cq)) {
|
||||
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
|
||||
(*rchp->ibcq.comp_handler)(&rchp->ibcq,
|
||||
rchp->ibcq.cq_context);
|
||||
spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
|
||||
}
|
||||
} else {
|
||||
if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) {
|
||||
if (rq_flushed && t4_clear_cq_armed(&rchp->cq)) {
|
||||
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
|
||||
(*rchp->ibcq.comp_handler)(&rchp->ibcq,
|
||||
rchp->ibcq.cq_context);
|
||||
spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
|
||||
}
|
||||
if (t4_clear_cq_armed(&schp->cq) && sq_flushed) {
|
||||
if (sq_flushed && t4_clear_cq_armed(&schp->cq)) {
|
||||
spin_lock_irqsave(&schp->comp_handler_lock, flag);
|
||||
(*schp->ibcq.comp_handler)(&schp->ibcq,
|
||||
schp->ibcq.cq_context);
|
||||
|
|
|
@ -197,6 +197,11 @@ struct t4_cqe {
|
|||
#define CQE_SWCQE_G(x) ((((x) >> CQE_SWCQE_S)) & CQE_SWCQE_M)
|
||||
#define CQE_SWCQE_V(x) ((x)<<CQE_SWCQE_S)
|
||||
|
||||
#define CQE_DRAIN_S 10
|
||||
#define CQE_DRAIN_M 0x1
|
||||
#define CQE_DRAIN_G(x) ((((x) >> CQE_DRAIN_S)) & CQE_DRAIN_M)
|
||||
#define CQE_DRAIN_V(x) ((x)<<CQE_DRAIN_S)
|
||||
|
||||
#define CQE_STATUS_S 5
|
||||
#define CQE_STATUS_M 0x1F
|
||||
#define CQE_STATUS_G(x) ((((x) >> CQE_STATUS_S)) & CQE_STATUS_M)
|
||||
|
@ -213,6 +218,7 @@ struct t4_cqe {
|
|||
#define CQE_OPCODE_V(x) ((x)<<CQE_OPCODE_S)
|
||||
|
||||
#define SW_CQE(x) (CQE_SWCQE_G(be32_to_cpu((x)->header)))
|
||||
#define DRAIN_CQE(x) (CQE_DRAIN_G(be32_to_cpu((x)->header)))
|
||||
#define CQE_QPID(x) (CQE_QPID_G(be32_to_cpu((x)->header)))
|
||||
#define CQE_TYPE(x) (CQE_TYPE_G(be32_to_cpu((x)->header)))
|
||||
#define SQ_TYPE(x) (CQE_TYPE((x)))
|
||||
|
|
|
@ -1131,7 +1131,6 @@ struct hfi1_devdata {
|
|||
u16 pcie_lnkctl;
|
||||
u16 pcie_devctl2;
|
||||
u32 pci_msix0;
|
||||
u32 pci_lnkctl3;
|
||||
u32 pci_tph2;
|
||||
|
||||
/*
|
||||
|
|
|
@ -411,15 +411,12 @@ int restore_pci_variables(struct hfi1_devdata *dd)
|
|||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_SPCIE1,
|
||||
dd->pci_lnkctl3);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_TPH2, dd->pci_tph2);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
if (pci_find_ext_capability(dd->pcidev, PCI_EXT_CAP_ID_TPH)) {
|
||||
ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_TPH2,
|
||||
dd->pci_tph2);
|
||||
if (ret)
|
||||
goto error;
|
||||
}
|
||||
return 0;
|
||||
|
||||
error:
|
||||
|
@ -469,15 +466,12 @@ int save_pci_variables(struct hfi1_devdata *dd)
|
|||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_SPCIE1,
|
||||
&dd->pci_lnkctl3);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_TPH2, &dd->pci_tph2);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
if (pci_find_ext_capability(dd->pcidev, PCI_EXT_CAP_ID_TPH)) {
|
||||
ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_TPH2,
|
||||
&dd->pci_tph2);
|
||||
if (ret)
|
||||
goto error;
|
||||
}
|
||||
return 0;
|
||||
|
||||
error:
|
||||
|
|
|
@ -814,7 +814,7 @@ static inline void hfi1_make_rc_ack_16B(struct rvt_qp *qp,
|
|||
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
|
||||
struct hfi1_16b_header *hdr = &opa_hdr->opah;
|
||||
struct ib_other_headers *ohdr;
|
||||
u32 bth0, bth1;
|
||||
u32 bth0, bth1 = 0;
|
||||
u16 len, pkey;
|
||||
u8 becn = !!is_fecn;
|
||||
u8 l4 = OPA_16B_L4_IB_LOCAL;
|
||||
|
|
|
@ -162,14 +162,10 @@ void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
|
|||
{
|
||||
int i;
|
||||
struct device *dev = hr_dev->dev;
|
||||
u32 bits_per_long = BITS_PER_LONG;
|
||||
|
||||
if (buf->nbufs == 1) {
|
||||
dma_free_coherent(dev, size, buf->direct.buf, buf->direct.map);
|
||||
} else {
|
||||
if (bits_per_long == 64 && buf->page_shift == PAGE_SHIFT)
|
||||
vunmap(buf->direct.buf);
|
||||
|
||||
for (i = 0; i < buf->nbufs; ++i)
|
||||
if (buf->page_list[i].buf)
|
||||
dma_free_coherent(dev, 1 << buf->page_shift,
|
||||
|
@ -185,9 +181,7 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
|
|||
{
|
||||
int i = 0;
|
||||
dma_addr_t t;
|
||||
struct page **pages;
|
||||
struct device *dev = hr_dev->dev;
|
||||
u32 bits_per_long = BITS_PER_LONG;
|
||||
u32 page_size = 1 << page_shift;
|
||||
u32 order;
|
||||
|
||||
|
@ -236,23 +230,6 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
|
|||
buf->page_list[i].map = t;
|
||||
memset(buf->page_list[i].buf, 0, page_size);
|
||||
}
|
||||
if (bits_per_long == 64 && page_shift == PAGE_SHIFT) {
|
||||
pages = kmalloc_array(buf->nbufs, sizeof(*pages),
|
||||
GFP_KERNEL);
|
||||
if (!pages)
|
||||
goto err_free;
|
||||
|
||||
for (i = 0; i < buf->nbufs; ++i)
|
||||
pages[i] = virt_to_page(buf->page_list[i].buf);
|
||||
|
||||
buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP,
|
||||
PAGE_KERNEL);
|
||||
kfree(pages);
|
||||
if (!buf->direct.buf)
|
||||
goto err_free;
|
||||
} else {
|
||||
buf->direct.buf = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -795,11 +795,9 @@ static inline struct hns_roce_qp
|
|||
|
||||
static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf, int offset)
|
||||
{
|
||||
u32 bits_per_long_val = BITS_PER_LONG;
|
||||
u32 page_size = 1 << buf->page_shift;
|
||||
|
||||
if ((bits_per_long_val == 64 && buf->page_shift == PAGE_SHIFT) ||
|
||||
buf->nbufs == 1)
|
||||
if (buf->nbufs == 1)
|
||||
return (char *)(buf->direct.buf) + offset;
|
||||
else
|
||||
return (char *)(buf->page_list[offset >> buf->page_shift].buf) +
|
||||
|
|
|
@ -224,6 +224,7 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
|
|||
sg_init_table(chunk->mem, HNS_ROCE_HEM_CHUNK_LEN);
|
||||
chunk->npages = 0;
|
||||
chunk->nsg = 0;
|
||||
memset(chunk->buf, 0, sizeof(chunk->buf));
|
||||
list_add_tail(&chunk->list, &hem->chunk_list);
|
||||
}
|
||||
|
||||
|
@ -240,8 +241,7 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
|
|||
if (!buf)
|
||||
goto fail;
|
||||
|
||||
sg_set_buf(mem, buf, PAGE_SIZE << order);
|
||||
WARN_ON(mem->offset);
|
||||
chunk->buf[chunk->npages] = buf;
|
||||
sg_dma_len(mem) = PAGE_SIZE << order;
|
||||
|
||||
++chunk->npages;
|
||||
|
@ -267,8 +267,8 @@ void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
|
|||
list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) {
|
||||
for (i = 0; i < chunk->npages; ++i)
|
||||
dma_free_coherent(hr_dev->dev,
|
||||
chunk->mem[i].length,
|
||||
lowmem_page_address(sg_page(&chunk->mem[i])),
|
||||
sg_dma_len(&chunk->mem[i]),
|
||||
chunk->buf[i],
|
||||
sg_dma_address(&chunk->mem[i]));
|
||||
kfree(chunk);
|
||||
}
|
||||
|
@ -722,11 +722,12 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
|
|||
struct hns_roce_hem_chunk *chunk;
|
||||
struct hns_roce_hem_mhop mhop;
|
||||
struct hns_roce_hem *hem;
|
||||
struct page *page = NULL;
|
||||
void *addr = NULL;
|
||||
unsigned long mhop_obj = obj;
|
||||
unsigned long obj_per_chunk;
|
||||
unsigned long idx_offset;
|
||||
int offset, dma_offset;
|
||||
int length;
|
||||
int i, j;
|
||||
u32 hem_idx = 0;
|
||||
|
||||
|
@ -763,25 +764,25 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
|
|||
|
||||
list_for_each_entry(chunk, &hem->chunk_list, list) {
|
||||
for (i = 0; i < chunk->npages; ++i) {
|
||||
length = sg_dma_len(&chunk->mem[i]);
|
||||
if (dma_handle && dma_offset >= 0) {
|
||||
if (sg_dma_len(&chunk->mem[i]) >
|
||||
(u32)dma_offset)
|
||||
if (length > (u32)dma_offset)
|
||||
*dma_handle = sg_dma_address(
|
||||
&chunk->mem[i]) + dma_offset;
|
||||
dma_offset -= sg_dma_len(&chunk->mem[i]);
|
||||
dma_offset -= length;
|
||||
}
|
||||
|
||||
if (chunk->mem[i].length > (u32)offset) {
|
||||
page = sg_page(&chunk->mem[i]);
|
||||
if (length > (u32)offset) {
|
||||
addr = chunk->buf[i] + offset;
|
||||
goto out;
|
||||
}
|
||||
offset -= chunk->mem[i].length;
|
||||
offset -= length;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&table->mutex);
|
||||
return page ? lowmem_page_address(page) + offset : NULL;
|
||||
return addr;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hns_roce_table_find);
|
||||
|
||||
|
|
|
@ -78,6 +78,7 @@ struct hns_roce_hem_chunk {
|
|||
int npages;
|
||||
int nsg;
|
||||
struct scatterlist mem[HNS_ROCE_HEM_CHUNK_LEN];
|
||||
void *buf[HNS_ROCE_HEM_CHUNK_LEN];
|
||||
};
|
||||
|
||||
struct hns_roce_hem {
|
||||
|
|
|
@ -1131,9 +1131,11 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
|
|||
{
|
||||
struct hns_roce_v2_mpt_entry *mpt_entry;
|
||||
struct scatterlist *sg;
|
||||
u64 page_addr;
|
||||
u64 *pages;
|
||||
int i, j;
|
||||
int len;
|
||||
int entry;
|
||||
int i;
|
||||
|
||||
mpt_entry = mb_buf;
|
||||
memset(mpt_entry, 0, sizeof(*mpt_entry));
|
||||
|
@ -1191,14 +1193,20 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
|
|||
|
||||
i = 0;
|
||||
for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
|
||||
pages[i] = ((u64)sg_dma_address(sg)) >> 6;
|
||||
len = sg_dma_len(sg) >> PAGE_SHIFT;
|
||||
for (j = 0; j < len; ++j) {
|
||||
page_addr = sg_dma_address(sg) +
|
||||
(j << mr->umem->page_shift);
|
||||
pages[i] = page_addr >> 6;
|
||||
|
||||
/* Record the first 2 entry directly to MTPT table */
|
||||
if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
|
||||
break;
|
||||
i++;
|
||||
/* Record the first 2 entry directly to MTPT table */
|
||||
if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
|
||||
goto found;
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
found:
|
||||
mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
|
||||
roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
|
||||
V2_MPT_BYTE_56_PA0_H_S,
|
||||
|
|
|
@ -1041,7 +1041,7 @@ negotiate_done:
|
|||
* i40iw_schedule_cm_timer
|
||||
* @@cm_node: connection's node
|
||||
* @sqbuf: buffer to send
|
||||
* @type: if it es send ot close
|
||||
* @type: if it is send or close
|
||||
* @send_retrans: if rexmits to be done
|
||||
* @close_when_complete: is cm_node to be removed
|
||||
*
|
||||
|
@ -1065,7 +1065,8 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
|
|||
|
||||
new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
|
||||
if (!new_send) {
|
||||
i40iw_free_sqbuf(vsi, (void *)sqbuf);
|
||||
if (type != I40IW_TIMER_TYPE_CLOSE)
|
||||
i40iw_free_sqbuf(vsi, (void *)sqbuf);
|
||||
return -ENOMEM;
|
||||
}
|
||||
new_send->retrycount = I40IW_DEFAULT_RETRYS;
|
||||
|
@ -1080,7 +1081,6 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
|
|||
new_send->timetosend += (HZ / 10);
|
||||
if (cm_node->close_entry) {
|
||||
kfree(new_send);
|
||||
i40iw_free_sqbuf(vsi, (void *)sqbuf);
|
||||
i40iw_pr_err("already close entry\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -2952,8 +2952,6 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
|
|||
loopback_remotenode->tcp_cntxt.snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
|
||||
cm_node->tcp_cntxt.snd_wscale = loopback_remotenode->tcp_cntxt.rcv_wscale;
|
||||
loopback_remotenode->tcp_cntxt.snd_wscale = cm_node->tcp_cntxt.rcv_wscale;
|
||||
loopback_remotenode->state = I40IW_CM_STATE_MPAREQ_RCVD;
|
||||
i40iw_create_event(loopback_remotenode, I40IW_CM_EVENT_MPA_REQ);
|
||||
}
|
||||
return cm_node;
|
||||
}
|
||||
|
@ -3694,11 +3692,16 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|||
cm_id->add_ref(cm_id);
|
||||
i40iw_add_ref(&iwqp->ibqp);
|
||||
|
||||
i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0);
|
||||
|
||||
attr.qp_state = IB_QPS_RTS;
|
||||
cm_node->qhash_set = false;
|
||||
i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
|
||||
|
||||
cm_node->accelerated = true;
|
||||
status =
|
||||
i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0);
|
||||
if (status)
|
||||
i40iw_debug(dev, I40IW_DEBUG_CM, "error sending cm event - ESTABLISHED\n");
|
||||
|
||||
if (cm_node->loopbackpartner) {
|
||||
cm_node->loopbackpartner->pdata.size = conn_param->private_data_len;
|
||||
|
||||
|
@ -3709,7 +3712,6 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|||
i40iw_create_event(cm_node->loopbackpartner, I40IW_CM_EVENT_CONNECTED);
|
||||
}
|
||||
|
||||
cm_node->accelerated = 1;
|
||||
if (cm_node->accept_pend) {
|
||||
atomic_dec(&cm_node->listener->pend_accepts_cnt);
|
||||
cm_node->accept_pend = 0;
|
||||
|
@ -3862,6 +3864,12 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|||
goto err;
|
||||
}
|
||||
|
||||
if (cm_node->loopbackpartner) {
|
||||
cm_node->loopbackpartner->state = I40IW_CM_STATE_MPAREQ_RCVD;
|
||||
i40iw_create_event(cm_node->loopbackpartner,
|
||||
I40IW_CM_EVENT_MPA_REQ);
|
||||
}
|
||||
|
||||
i40iw_debug(cm_node->dev,
|
||||
I40IW_DEBUG_CM,
|
||||
"Api - connect(): port=0x%04x, cm_node=%p, cm_id = %p.\n",
|
||||
|
@ -4042,16 +4050,17 @@ static void i40iw_cm_event_connected(struct i40iw_cm_event *event)
|
|||
dev->iw_priv_qp_ops->qp_send_rtt(&iwqp->sc_qp, read0);
|
||||
if (iwqp->page)
|
||||
kunmap(iwqp->page);
|
||||
status = i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY, 0);
|
||||
if (status)
|
||||
i40iw_pr_err("send cm event\n");
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.qp_state = IB_QPS_RTS;
|
||||
cm_node->qhash_set = false;
|
||||
i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
|
||||
|
||||
cm_node->accelerated = 1;
|
||||
cm_node->accelerated = true;
|
||||
status = i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY,
|
||||
0);
|
||||
if (status)
|
||||
i40iw_debug(dev, I40IW_DEBUG_CM, "error sending cm event - CONNECT_REPLY\n");
|
||||
|
||||
return;
|
||||
|
||||
|
|
|
@ -335,7 +335,7 @@ struct i40iw_cm_node {
|
|||
u16 mpav2_ird_ord;
|
||||
struct iw_cm_id *cm_id;
|
||||
struct list_head list;
|
||||
int accelerated;
|
||||
bool accelerated;
|
||||
struct i40iw_cm_listener *listener;
|
||||
int apbvt_set;
|
||||
int accept_pend;
|
||||
|
|
|
@ -513,7 +513,7 @@ static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp,
|
|||
|
||||
ret_code = i40iw_allocate_dma_mem(cqp->dev->hw,
|
||||
&cqp->sdbuf,
|
||||
128,
|
||||
I40IW_UPDATE_SD_BUF_SIZE * cqp->sq_size,
|
||||
I40IW_SD_BUF_ALIGNMENT);
|
||||
|
||||
if (ret_code)
|
||||
|
@ -596,14 +596,15 @@ void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp)
|
|||
}
|
||||
|
||||
/**
|
||||
* i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
|
||||
* @cqp: struct for cqp hw
|
||||
* @wqe_idx: we index of cqp ring
|
||||
* i40iw_sc_cqp_get_next_send_wqe_idx - get next WQE on CQP SQ and pass back the index
|
||||
* @cqp: pointer to CQP structure
|
||||
* @scratch: private data for CQP WQE
|
||||
* @wqe_idx: WQE index for next WQE on CQP SQ
|
||||
*/
|
||||
u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
|
||||
static u64 *i40iw_sc_cqp_get_next_send_wqe_idx(struct i40iw_sc_cqp *cqp,
|
||||
u64 scratch, u32 *wqe_idx)
|
||||
{
|
||||
u64 *wqe = NULL;
|
||||
u32 wqe_idx;
|
||||
enum i40iw_status_code ret_code;
|
||||
|
||||
if (I40IW_RING_FULL_ERR(cqp->sq_ring)) {
|
||||
|
@ -616,20 +617,32 @@ u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
|
|||
cqp->sq_ring.size);
|
||||
return NULL;
|
||||
}
|
||||
I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, wqe_idx, ret_code);
|
||||
I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code);
|
||||
cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS]++;
|
||||
if (ret_code)
|
||||
return NULL;
|
||||
if (!wqe_idx)
|
||||
if (!*wqe_idx)
|
||||
cqp->polarity = !cqp->polarity;
|
||||
|
||||
wqe = cqp->sq_base[wqe_idx].elem;
|
||||
cqp->scratch_array[wqe_idx] = scratch;
|
||||
wqe = cqp->sq_base[*wqe_idx].elem;
|
||||
cqp->scratch_array[*wqe_idx] = scratch;
|
||||
I40IW_CQP_INIT_WQE(wqe);
|
||||
|
||||
return wqe;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
|
||||
* @cqp: struct for cqp hw
|
||||
* @scratch: private data for CQP WQE
|
||||
*/
|
||||
u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
|
||||
{
|
||||
u32 wqe_idx;
|
||||
|
||||
return i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_sc_cqp_destroy - destroy cqp during close
|
||||
* @cqp: struct for cqp hw
|
||||
|
@ -3587,8 +3600,10 @@ static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp,
|
|||
u64 *wqe;
|
||||
int mem_entries, wqe_entries;
|
||||
struct i40iw_dma_mem *sdbuf = &cqp->sdbuf;
|
||||
u64 offset;
|
||||
u32 wqe_idx;
|
||||
|
||||
wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
|
||||
wqe = i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
|
||||
if (!wqe)
|
||||
return I40IW_ERR_RING_FULL;
|
||||
|
||||
|
@ -3601,8 +3616,10 @@ static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp,
|
|||
LS_64(mem_entries, I40IW_CQPSQ_UPESD_ENTRY_COUNT);
|
||||
|
||||
if (mem_entries) {
|
||||
memcpy(sdbuf->va, &info->entry[3], (mem_entries << 4));
|
||||
data = sdbuf->pa;
|
||||
offset = wqe_idx * I40IW_UPDATE_SD_BUF_SIZE;
|
||||
memcpy((char *)sdbuf->va + offset, &info->entry[3],
|
||||
mem_entries << 4);
|
||||
data = (u64)sdbuf->pa + offset;
|
||||
} else {
|
||||
data = 0;
|
||||
}
|
||||
|
|
|
@ -1115,7 +1115,7 @@
|
|||
#define I40IWQPC_VLANTAG_MASK (0xffffULL << I40IWQPC_VLANTAG_SHIFT)
|
||||
|
||||
#define I40IWQPC_ARPIDX_SHIFT 48
|
||||
#define I40IWQPC_ARPIDX_MASK (0xfffULL << I40IWQPC_ARPIDX_SHIFT)
|
||||
#define I40IWQPC_ARPIDX_MASK (0xffffULL << I40IWQPC_ARPIDX_SHIFT)
|
||||
|
||||
#define I40IWQPC_FLOWLABEL_SHIFT 0
|
||||
#define I40IWQPC_FLOWLABEL_MASK (0xfffffUL << I40IWQPC_FLOWLABEL_SHIFT)
|
||||
|
@ -1527,7 +1527,7 @@ enum i40iw_alignment {
|
|||
I40IW_AEQ_ALIGNMENT = 0x100,
|
||||
I40IW_CEQ_ALIGNMENT = 0x100,
|
||||
I40IW_CQ0_ALIGNMENT = 0x100,
|
||||
I40IW_SD_BUF_ALIGNMENT = 0x100
|
||||
I40IW_SD_BUF_ALIGNMENT = 0x80
|
||||
};
|
||||
|
||||
#define I40IW_WQE_SIZE_64 64
|
||||
|
@ -1535,6 +1535,8 @@ enum i40iw_alignment {
|
|||
#define I40IW_QP_WQE_MIN_SIZE 32
|
||||
#define I40IW_QP_WQE_MAX_SIZE 128
|
||||
|
||||
#define I40IW_UPDATE_SD_BUF_SIZE 128
|
||||
|
||||
#define I40IW_CQE_QTYPE_RQ 0
|
||||
#define I40IW_CQE_QTYPE_SQ 1
|
||||
|
||||
|
|
|
@ -666,6 +666,19 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
|
|||
return (-EOPNOTSUPP);
|
||||
}
|
||||
|
||||
if (ucmd->rx_hash_fields_mask & ~(MLX4_IB_RX_HASH_SRC_IPV4 |
|
||||
MLX4_IB_RX_HASH_DST_IPV4 |
|
||||
MLX4_IB_RX_HASH_SRC_IPV6 |
|
||||
MLX4_IB_RX_HASH_DST_IPV6 |
|
||||
MLX4_IB_RX_HASH_SRC_PORT_TCP |
|
||||
MLX4_IB_RX_HASH_DST_PORT_TCP |
|
||||
MLX4_IB_RX_HASH_SRC_PORT_UDP |
|
||||
MLX4_IB_RX_HASH_DST_PORT_UDP)) {
|
||||
pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n",
|
||||
ucmd->rx_hash_fields_mask);
|
||||
return (-EOPNOTSUPP);
|
||||
}
|
||||
|
||||
if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) &&
|
||||
(ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) {
|
||||
rss_ctx->flags = MLX4_RSS_IPV4;
|
||||
|
@ -691,11 +704,11 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
|
|||
return (-EOPNOTSUPP);
|
||||
}
|
||||
|
||||
if (rss_ctx->flags & MLX4_RSS_IPV4) {
|
||||
if (rss_ctx->flags & MLX4_RSS_IPV4)
|
||||
rss_ctx->flags |= MLX4_RSS_UDP_IPV4;
|
||||
} else if (rss_ctx->flags & MLX4_RSS_IPV6) {
|
||||
if (rss_ctx->flags & MLX4_RSS_IPV6)
|
||||
rss_ctx->flags |= MLX4_RSS_UDP_IPV6;
|
||||
} else {
|
||||
if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) {
|
||||
pr_debug("RX Hash fields_mask is not supported - UDP must be set with IPv4 or IPv6\n");
|
||||
return (-EOPNOTSUPP);
|
||||
}
|
||||
|
@ -707,15 +720,14 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
|
|||
|
||||
if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) &&
|
||||
(ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) {
|
||||
if (rss_ctx->flags & MLX4_RSS_IPV4) {
|
||||
if (rss_ctx->flags & MLX4_RSS_IPV4)
|
||||
rss_ctx->flags |= MLX4_RSS_TCP_IPV4;
|
||||
} else if (rss_ctx->flags & MLX4_RSS_IPV6) {
|
||||
if (rss_ctx->flags & MLX4_RSS_IPV6)
|
||||
rss_ctx->flags |= MLX4_RSS_TCP_IPV6;
|
||||
} else {
|
||||
if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) {
|
||||
pr_debug("RX Hash fields_mask is not supported - TCP must be set with IPv4 or IPv6\n");
|
||||
return (-EOPNOTSUPP);
|
||||
}
|
||||
|
||||
} else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) ||
|
||||
(ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) {
|
||||
pr_debug("RX Hash fields_mask is not supported - both TCP SRC and DST must be set\n");
|
||||
|
|
|
@ -47,17 +47,6 @@ int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey)
|
|||
return err;
|
||||
}
|
||||
|
||||
int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev,
|
||||
bool reset, void *out, int out_size)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = { };
|
||||
|
||||
MLX5_SET(query_cong_statistics_in, in, opcode,
|
||||
MLX5_CMD_OP_QUERY_CONG_STATISTICS);
|
||||
MLX5_SET(query_cong_statistics_in, in, clear, reset);
|
||||
return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
|
||||
}
|
||||
|
||||
int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
|
||||
void *out, int out_size)
|
||||
{
|
||||
|
|
|
@ -37,8 +37,6 @@
|
|||
#include <linux/mlx5/driver.h>
|
||||
|
||||
int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey);
|
||||
int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev,
|
||||
bool reset, void *out, int out_size);
|
||||
int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
|
||||
void *out, int out_size);
|
||||
int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *mdev,
|
||||
|
|
|
@ -1461,6 +1461,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
|
|||
}
|
||||
|
||||
INIT_LIST_HEAD(&context->vma_private_list);
|
||||
mutex_init(&context->vma_private_list_mutex);
|
||||
INIT_LIST_HEAD(&context->db_page_list);
|
||||
mutex_init(&context->db_page_mutex);
|
||||
|
||||
|
@ -1622,7 +1623,9 @@ static void mlx5_ib_vma_close(struct vm_area_struct *area)
|
|||
* mlx5_ib_disassociate_ucontext().
|
||||
*/
|
||||
mlx5_ib_vma_priv_data->vma = NULL;
|
||||
mutex_lock(mlx5_ib_vma_priv_data->vma_private_list_mutex);
|
||||
list_del(&mlx5_ib_vma_priv_data->list);
|
||||
mutex_unlock(mlx5_ib_vma_priv_data->vma_private_list_mutex);
|
||||
kfree(mlx5_ib_vma_priv_data);
|
||||
}
|
||||
|
||||
|
@ -1642,10 +1645,13 @@ static int mlx5_ib_set_vma_data(struct vm_area_struct *vma,
|
|||
return -ENOMEM;
|
||||
|
||||
vma_prv->vma = vma;
|
||||
vma_prv->vma_private_list_mutex = &ctx->vma_private_list_mutex;
|
||||
vma->vm_private_data = vma_prv;
|
||||
vma->vm_ops = &mlx5_ib_vm_ops;
|
||||
|
||||
mutex_lock(&ctx->vma_private_list_mutex);
|
||||
list_add(&vma_prv->list, vma_head);
|
||||
mutex_unlock(&ctx->vma_private_list_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1688,6 +1694,7 @@ static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
|
|||
* mlx5_ib_vma_close.
|
||||
*/
|
||||
down_write(&owning_mm->mmap_sem);
|
||||
mutex_lock(&context->vma_private_list_mutex);
|
||||
list_for_each_entry_safe(vma_private, n, &context->vma_private_list,
|
||||
list) {
|
||||
vma = vma_private->vma;
|
||||
|
@ -1702,6 +1709,7 @@ static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
|
|||
list_del(&vma_private->list);
|
||||
kfree(vma_private);
|
||||
}
|
||||
mutex_unlock(&context->vma_private_list_mutex);
|
||||
up_write(&owning_mm->mmap_sem);
|
||||
mmput(owning_mm);
|
||||
put_task_struct(owning_process);
|
||||
|
@ -3735,34 +3743,6 @@ free:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int mlx5_ib_query_cong_counters(struct mlx5_ib_dev *dev,
|
||||
struct mlx5_ib_port *port,
|
||||
struct rdma_hw_stats *stats)
|
||||
{
|
||||
int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
|
||||
void *out;
|
||||
int ret, i;
|
||||
int offset = port->cnts.num_q_counters;
|
||||
|
||||
out = kvzalloc(outlen, GFP_KERNEL);
|
||||
if (!out)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = mlx5_cmd_query_cong_counter(dev->mdev, false, out, outlen);
|
||||
if (ret)
|
||||
goto free;
|
||||
|
||||
for (i = 0; i < port->cnts.num_cong_counters; i++) {
|
||||
stats->value[i + offset] =
|
||||
be64_to_cpup((__be64 *)(out +
|
||||
port->cnts.offsets[i + offset]));
|
||||
}
|
||||
|
||||
free:
|
||||
kvfree(out);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
|
||||
struct rdma_hw_stats *stats,
|
||||
u8 port_num, int index)
|
||||
|
@ -3780,7 +3760,12 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
|
|||
num_counters = port->cnts.num_q_counters;
|
||||
|
||||
if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
|
||||
ret = mlx5_ib_query_cong_counters(dev, port, stats);
|
||||
ret = mlx5_lag_query_cong_counters(dev->mdev,
|
||||
stats->value +
|
||||
port->cnts.num_q_counters,
|
||||
port->cnts.num_cong_counters,
|
||||
port->cnts.offsets +
|
||||
port->cnts.num_q_counters);
|
||||
if (ret)
|
||||
return ret;
|
||||
num_counters += port->cnts.num_cong_counters;
|
||||
|
|
|
@ -115,6 +115,8 @@ enum {
|
|||
struct mlx5_ib_vma_private_data {
|
||||
struct list_head list;
|
||||
struct vm_area_struct *vma;
|
||||
/* protect vma_private_list add/del */
|
||||
struct mutex *vma_private_list_mutex;
|
||||
};
|
||||
|
||||
struct mlx5_ib_ucontext {
|
||||
|
@ -129,6 +131,8 @@ struct mlx5_ib_ucontext {
|
|||
/* Transport Domain number */
|
||||
u32 tdn;
|
||||
struct list_head vma_private_list;
|
||||
/* protect vma_private_list add/del */
|
||||
struct mutex vma_private_list_mutex;
|
||||
|
||||
unsigned long upd_xlt_page;
|
||||
/* protect ODP/KSM */
|
||||
|
|
|
@ -1640,6 +1640,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
|
|||
MLX5_SET(mkc, mkc, access_mode, mr->access_mode);
|
||||
MLX5_SET(mkc, mkc, umr_en, 1);
|
||||
|
||||
mr->ibmr.device = pd->device;
|
||||
err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
|
||||
if (err)
|
||||
goto err_destroy_psv;
|
||||
|
|
|
@ -93,8 +93,8 @@ struct pvrdma_cq {
|
|||
struct pvrdma_page_dir pdir;
|
||||
u32 cq_handle;
|
||||
bool is_kernel;
|
||||
atomic_t refcnt;
|
||||
wait_queue_head_t wait;
|
||||
refcount_t refcnt;
|
||||
struct completion free;
|
||||
};
|
||||
|
||||
struct pvrdma_id_table {
|
||||
|
@ -175,7 +175,7 @@ struct pvrdma_srq {
|
|||
u32 srq_handle;
|
||||
int npages;
|
||||
refcount_t refcnt;
|
||||
wait_queue_head_t wait;
|
||||
struct completion free;
|
||||
};
|
||||
|
||||
struct pvrdma_qp {
|
||||
|
@ -196,8 +196,8 @@ struct pvrdma_qp {
|
|||
u8 state;
|
||||
bool is_kernel;
|
||||
struct mutex mutex; /* QP state mutex. */
|
||||
atomic_t refcnt;
|
||||
wait_queue_head_t wait;
|
||||
refcount_t refcnt;
|
||||
struct completion free;
|
||||
};
|
||||
|
||||
struct pvrdma_dev {
|
||||
|
|
|
@ -132,8 +132,9 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
|
|||
}
|
||||
|
||||
cq->ibcq.cqe = entries;
|
||||
cq->is_kernel = !context;
|
||||
|
||||
if (context) {
|
||||
if (!cq->is_kernel) {
|
||||
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
|
||||
ret = -EFAULT;
|
||||
goto err_cq;
|
||||
|
@ -148,8 +149,6 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
|
|||
|
||||
npages = ib_umem_page_count(cq->umem);
|
||||
} else {
|
||||
cq->is_kernel = true;
|
||||
|
||||
/* One extra page for shared ring state */
|
||||
npages = 1 + (entries * sizeof(struct pvrdma_cqe) +
|
||||
PAGE_SIZE - 1) / PAGE_SIZE;
|
||||
|
@ -178,8 +177,8 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
|
|||
else
|
||||
pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0);
|
||||
|
||||
atomic_set(&cq->refcnt, 1);
|
||||
init_waitqueue_head(&cq->wait);
|
||||
refcount_set(&cq->refcnt, 1);
|
||||
init_completion(&cq->free);
|
||||
spin_lock_init(&cq->cq_lock);
|
||||
|
||||
memset(cmd, 0, sizeof(*cmd));
|
||||
|
@ -202,7 +201,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
|
|||
dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq;
|
||||
spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
|
||||
|
||||
if (context) {
|
||||
if (!cq->is_kernel) {
|
||||
cq->uar = &(to_vucontext(context)->uar);
|
||||
|
||||
/* Copy udata back. */
|
||||
|
@ -219,7 +218,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
|
|||
err_page_dir:
|
||||
pvrdma_page_dir_cleanup(dev, &cq->pdir);
|
||||
err_umem:
|
||||
if (context)
|
||||
if (!cq->is_kernel)
|
||||
ib_umem_release(cq->umem);
|
||||
err_cq:
|
||||
atomic_dec(&dev->num_cqs);
|
||||
|
@ -230,8 +229,9 @@ err_cq:
|
|||
|
||||
static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
|
||||
{
|
||||
atomic_dec(&cq->refcnt);
|
||||
wait_event(cq->wait, !atomic_read(&cq->refcnt));
|
||||
if (refcount_dec_and_test(&cq->refcnt))
|
||||
complete(&cq->free);
|
||||
wait_for_completion(&cq->free);
|
||||
|
||||
if (!cq->is_kernel)
|
||||
ib_umem_release(cq->umem);
|
||||
|
|
|
@ -243,13 +243,13 @@ static int pvrdma_register_device(struct pvrdma_dev *dev)
|
|||
mutex_init(&dev->port_mutex);
|
||||
spin_lock_init(&dev->desc_lock);
|
||||
|
||||
dev->cq_tbl = kcalloc(dev->dsr->caps.max_cq, sizeof(void *),
|
||||
dev->cq_tbl = kcalloc(dev->dsr->caps.max_cq, sizeof(struct pvrdma_cq *),
|
||||
GFP_KERNEL);
|
||||
if (!dev->cq_tbl)
|
||||
return ret;
|
||||
spin_lock_init(&dev->cq_tbl_lock);
|
||||
|
||||
dev->qp_tbl = kcalloc(dev->dsr->caps.max_qp, sizeof(void *),
|
||||
dev->qp_tbl = kcalloc(dev->dsr->caps.max_qp, sizeof(struct pvrdma_qp *),
|
||||
GFP_KERNEL);
|
||||
if (!dev->qp_tbl)
|
||||
goto err_cq_free;
|
||||
|
@ -333,7 +333,7 @@ static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type)
|
|||
spin_lock_irqsave(&dev->qp_tbl_lock, flags);
|
||||
qp = dev->qp_tbl[qpn % dev->dsr->caps.max_qp];
|
||||
if (qp)
|
||||
atomic_inc(&qp->refcnt);
|
||||
refcount_inc(&qp->refcnt);
|
||||
spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
|
||||
|
||||
if (qp && qp->ibqp.event_handler) {
|
||||
|
@ -346,9 +346,8 @@ static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type)
|
|||
ibqp->event_handler(&e, ibqp->qp_context);
|
||||
}
|
||||
if (qp) {
|
||||
atomic_dec(&qp->refcnt);
|
||||
if (atomic_read(&qp->refcnt) == 0)
|
||||
wake_up(&qp->wait);
|
||||
if (refcount_dec_and_test(&qp->refcnt))
|
||||
complete(&qp->free);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -360,7 +359,7 @@ static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
|
|||
spin_lock_irqsave(&dev->cq_tbl_lock, flags);
|
||||
cq = dev->cq_tbl[cqn % dev->dsr->caps.max_cq];
|
||||
if (cq)
|
||||
atomic_inc(&cq->refcnt);
|
||||
refcount_inc(&cq->refcnt);
|
||||
spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
|
||||
|
||||
if (cq && cq->ibcq.event_handler) {
|
||||
|
@ -373,9 +372,8 @@ static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
|
|||
ibcq->event_handler(&e, ibcq->cq_context);
|
||||
}
|
||||
if (cq) {
|
||||
atomic_dec(&cq->refcnt);
|
||||
if (atomic_read(&cq->refcnt) == 0)
|
||||
wake_up(&cq->wait);
|
||||
if (refcount_dec_and_test(&cq->refcnt))
|
||||
complete(&cq->free);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -404,7 +402,7 @@ static void pvrdma_srq_event(struct pvrdma_dev *dev, u32 srqn, int type)
|
|||
}
|
||||
if (srq) {
|
||||
if (refcount_dec_and_test(&srq->refcnt))
|
||||
wake_up(&srq->wait);
|
||||
complete(&srq->free);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -533,15 +531,14 @@ static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id)
|
|||
spin_lock_irqsave(&dev->cq_tbl_lock, flags);
|
||||
cq = dev->cq_tbl[cqne->info % dev->dsr->caps.max_cq];
|
||||
if (cq)
|
||||
atomic_inc(&cq->refcnt);
|
||||
refcount_inc(&cq->refcnt);
|
||||
spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
|
||||
|
||||
if (cq && cq->ibcq.comp_handler)
|
||||
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
|
||||
if (cq) {
|
||||
atomic_dec(&cq->refcnt);
|
||||
if (atomic_read(&cq->refcnt))
|
||||
wake_up(&cq->wait);
|
||||
if (refcount_dec_and_test(&cq->refcnt))
|
||||
complete(&cq->free);
|
||||
}
|
||||
pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
|
||||
}
|
||||
|
|
|
@ -245,12 +245,13 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
|
|||
spin_lock_init(&qp->sq.lock);
|
||||
spin_lock_init(&qp->rq.lock);
|
||||
mutex_init(&qp->mutex);
|
||||
atomic_set(&qp->refcnt, 1);
|
||||
init_waitqueue_head(&qp->wait);
|
||||
refcount_set(&qp->refcnt, 1);
|
||||
init_completion(&qp->free);
|
||||
|
||||
qp->state = IB_QPS_RESET;
|
||||
qp->is_kernel = !(pd->uobject && udata);
|
||||
|
||||
if (pd->uobject && udata) {
|
||||
if (!qp->is_kernel) {
|
||||
dev_dbg(&dev->pdev->dev,
|
||||
"create queuepair from user space\n");
|
||||
|
||||
|
@ -291,8 +292,6 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
|
|||
qp->npages_recv = 0;
|
||||
qp->npages = qp->npages_send + qp->npages_recv;
|
||||
} else {
|
||||
qp->is_kernel = true;
|
||||
|
||||
ret = pvrdma_set_sq_size(to_vdev(pd->device),
|
||||
&init_attr->cap, qp);
|
||||
if (ret)
|
||||
|
@ -394,7 +393,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
|
|||
err_pdir:
|
||||
pvrdma_page_dir_cleanup(dev, &qp->pdir);
|
||||
err_umem:
|
||||
if (pd->uobject && udata) {
|
||||
if (!qp->is_kernel) {
|
||||
if (qp->rumem)
|
||||
ib_umem_release(qp->rumem);
|
||||
if (qp->sumem)
|
||||
|
@ -428,8 +427,16 @@ static void pvrdma_free_qp(struct pvrdma_qp *qp)
|
|||
|
||||
pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
|
||||
|
||||
atomic_dec(&qp->refcnt);
|
||||
wait_event(qp->wait, !atomic_read(&qp->refcnt));
|
||||
if (refcount_dec_and_test(&qp->refcnt))
|
||||
complete(&qp->free);
|
||||
wait_for_completion(&qp->free);
|
||||
|
||||
if (!qp->is_kernel) {
|
||||
if (qp->rumem)
|
||||
ib_umem_release(qp->rumem);
|
||||
if (qp->sumem)
|
||||
ib_umem_release(qp->sumem);
|
||||
}
|
||||
|
||||
pvrdma_page_dir_cleanup(dev, &qp->pdir);
|
||||
|
||||
|
|
|
@ -149,7 +149,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
|
|||
|
||||
spin_lock_init(&srq->lock);
|
||||
refcount_set(&srq->refcnt, 1);
|
||||
init_waitqueue_head(&srq->wait);
|
||||
init_completion(&srq->free);
|
||||
|
||||
dev_dbg(&dev->pdev->dev,
|
||||
"create shared receive queue from user space\n");
|
||||
|
@ -236,8 +236,9 @@ static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq)
|
|||
dev->srq_tbl[srq->srq_handle] = NULL;
|
||||
spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
|
||||
|
||||
refcount_dec(&srq->refcnt);
|
||||
wait_event(srq->wait, !refcount_read(&srq->refcnt));
|
||||
if (refcount_dec_and_test(&srq->refcnt))
|
||||
complete(&srq->free);
|
||||
wait_for_completion(&srq->free);
|
||||
|
||||
/* There is no support for kernel clients, so this is safe. */
|
||||
ib_umem_release(srq->umem);
|
||||
|
|
|
@ -1145,6 +1145,7 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
|
|||
noio_flag = memalloc_noio_save();
|
||||
p->tx_ring = vzalloc(ipoib_sendq_size * sizeof(*p->tx_ring));
|
||||
if (!p->tx_ring) {
|
||||
memalloc_noio_restore(noio_flag);
|
||||
ret = -ENOMEM;
|
||||
goto err_tx;
|
||||
}
|
||||
|
|
|
@ -1235,13 +1235,10 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
|
|||
ipoib_ib_dev_down(dev);
|
||||
|
||||
if (level == IPOIB_FLUSH_HEAVY) {
|
||||
rtnl_lock();
|
||||
if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
|
||||
ipoib_ib_dev_stop(dev);
|
||||
|
||||
result = ipoib_ib_dev_open(dev);
|
||||
rtnl_unlock();
|
||||
if (result)
|
||||
if (ipoib_ib_dev_open(dev))
|
||||
return;
|
||||
|
||||
if (netif_queue_stopped(dev))
|
||||
|
@ -1281,7 +1278,9 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work)
|
|||
struct ipoib_dev_priv *priv =
|
||||
container_of(work, struct ipoib_dev_priv, flush_heavy);
|
||||
|
||||
rtnl_lock();
|
||||
__ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
void ipoib_ib_dev_cleanup(struct net_device *dev)
|
||||
|
|
|
@ -137,6 +137,17 @@ int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
|
|||
}
|
||||
EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
|
||||
|
||||
static int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev,
|
||||
bool reset, void *out, int out_size)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = { };
|
||||
|
||||
MLX5_SET(query_cong_statistics_in, in, opcode,
|
||||
MLX5_CMD_OP_QUERY_CONG_STATISTICS);
|
||||
MLX5_SET(query_cong_statistics_in, in, clear, reset);
|
||||
return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
|
||||
}
|
||||
|
||||
static struct mlx5_lag *mlx5_lag_dev_get(struct mlx5_core_dev *dev)
|
||||
{
|
||||
return dev->priv.lag;
|
||||
|
@ -633,3 +644,48 @@ bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv)
|
|||
/* If bonded, we do not add an IB device for PF1. */
|
||||
return false;
|
||||
}
|
||||
|
||||
int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
|
||||
u64 *values,
|
||||
int num_counters,
|
||||
size_t *offsets)
|
||||
{
|
||||
int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
|
||||
struct mlx5_core_dev *mdev[MLX5_MAX_PORTS];
|
||||
struct mlx5_lag *ldev;
|
||||
int num_ports;
|
||||
int ret, i, j;
|
||||
void *out;
|
||||
|
||||
out = kvzalloc(outlen, GFP_KERNEL);
|
||||
if (!out)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(values, 0, sizeof(*values) * num_counters);
|
||||
|
||||
mutex_lock(&lag_mutex);
|
||||
ldev = mlx5_lag_dev_get(dev);
|
||||
if (ldev && mlx5_lag_is_bonded(ldev)) {
|
||||
num_ports = MLX5_MAX_PORTS;
|
||||
mdev[0] = ldev->pf[0].dev;
|
||||
mdev[1] = ldev->pf[1].dev;
|
||||
} else {
|
||||
num_ports = 1;
|
||||
mdev[0] = dev;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_ports; ++i) {
|
||||
ret = mlx5_cmd_query_cong_counter(mdev[i], false, out, outlen);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
for (j = 0; j < num_counters; ++j)
|
||||
values[j] += be64_to_cpup((__be64 *)(out + offsets[j]));
|
||||
}
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&lag_mutex);
|
||||
kvfree(out);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_lag_query_cong_counters);
|
||||
|
|
|
@ -1164,6 +1164,10 @@ int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
|
|||
int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
|
||||
bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
|
||||
struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
|
||||
int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
|
||||
u64 *values,
|
||||
int num_counters,
|
||||
size_t *offsets);
|
||||
struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
|
||||
void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
|
||||
|
||||
|
|
|
@ -52,12 +52,14 @@
|
|||
#define PVRDMA_UVERBS_ABI_VERSION 3 /* ABI Version. */
|
||||
#define PVRDMA_UAR_HANDLE_MASK 0x00FFFFFF /* Bottom 24 bits. */
|
||||
#define PVRDMA_UAR_QP_OFFSET 0 /* QP doorbell. */
|
||||
#define PVRDMA_UAR_QP_SEND BIT(30) /* Send bit. */
|
||||
#define PVRDMA_UAR_QP_RECV BIT(31) /* Recv bit. */
|
||||
#define PVRDMA_UAR_QP_SEND (1 << 30) /* Send bit. */
|
||||
#define PVRDMA_UAR_QP_RECV (1 << 31) /* Recv bit. */
|
||||
#define PVRDMA_UAR_CQ_OFFSET 4 /* CQ doorbell. */
|
||||
#define PVRDMA_UAR_CQ_ARM_SOL BIT(29) /* Arm solicited bit. */
|
||||
#define PVRDMA_UAR_CQ_ARM BIT(30) /* Arm bit. */
|
||||
#define PVRDMA_UAR_CQ_POLL BIT(31) /* Poll bit. */
|
||||
#define PVRDMA_UAR_CQ_ARM_SOL (1 << 29) /* Arm solicited bit. */
|
||||
#define PVRDMA_UAR_CQ_ARM (1 << 30) /* Arm bit. */
|
||||
#define PVRDMA_UAR_CQ_POLL (1 << 31) /* Poll bit. */
|
||||
#define PVRDMA_UAR_SRQ_OFFSET 8 /* SRQ doorbell. */
|
||||
#define PVRDMA_UAR_SRQ_RECV (1 << 30) /* Recv bit. */
|
||||
|
||||
enum pvrdma_wr_opcode {
|
||||
PVRDMA_WR_RDMA_WRITE,
|
||||
|
|
Loading…
Reference in New Issue