v6.5 first rc RDMA pull request
Several smaller driver fixes and a core RDMA CM regression fix: - Fix improperly accepting flags from userspace in mlx4 - Add missing DMA barriers for irdma - Fix two kcsan warnings in irdma - Report the correct CQ op code to userspace in irdma - Report the correct MW bind error code for irdma - Load the destination address in RDMA CM to resolve a recent regression - Fix a QP regression in mthca - Remove a race processing completions in bnxt_re resulting in a crash - Fix driver unloading races with interrupts and tasklets in bnxt_re - Fix missing error unwind in rxe -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQRRRCHOFoQz/8F5bUaFwuHvBreFYQUCZMQLoQAKCRCFwuHvBreF YQTWAQDWpBY7DEmi7AsJkjNg+ZSmxIIaSfWeEVgU9GQPBnTWlgD/eZrXUjVRkNXc ZNMB6CjfQOy2XfdQm39kBM+kYnmYCgc= =jNMH -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma Pull rdma fixes from Jason Gunthorpe: "Several smaller driver fixes and a core RDMA CM regression fix: - Fix improperly accepting flags from userspace in mlx4 - Add missing DMA barriers for irdma - Fix two kcsan warnings in irdma - Report the correct CQ op code to userspace in irdma - Report the correct MW bind error code for irdma - Load the destination address in RDMA CM to resolve a recent regression - Fix a QP regression in mthca - Remove a race processing completions in bnxt_re resulting in a crash - Fix driver unloading races with interrupts and tasklets in bnxt_re - Fix missing error unwind in rxe" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: RDMA/irdma: Report correct WC error RDMA/irdma: Fix op_type reporting in CQEs RDMA/rxe: Fix an error handling path in rxe_bind_mw() RDMA/bnxt_re: Fix hang during driver unload RDMA/bnxt_re: Prevent handling any completions after qp destroy RDMA/mthca: Fix crash when polling CQ for shared QPs RDMA/core: Update CMA destination address on rdma_resolve_addr RDMA/irdma: Fix data race on CQP request done RDMA/irdma: Fix data race on CQP completion stats RDMA/irdma: Add missing read barriers RDMA/mlx4: Make check for invalid flags stricter
This commit is contained in:
commit
c06f9091a2
|
@ -4062,6 +4062,8 @@ static int resolve_prepare_src(struct rdma_id_private *id_priv,
|
|||
RDMA_CM_ADDR_QUERY)))
|
||||
return -EINVAL;
|
||||
|
||||
} else {
|
||||
memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
|
||||
}
|
||||
|
||||
if (cma_family(id_priv) != dst_addr->sa_family) {
|
||||
|
|
|
@ -869,7 +869,10 @@ fail:
|
|||
int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
|
||||
{
|
||||
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
|
||||
struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
|
||||
struct bnxt_re_dev *rdev = qp->rdev;
|
||||
struct bnxt_qplib_nq *scq_nq = NULL;
|
||||
struct bnxt_qplib_nq *rcq_nq = NULL;
|
||||
unsigned int flags;
|
||||
int rc;
|
||||
|
||||
|
@ -903,6 +906,15 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
|
|||
ib_umem_release(qp->rumem);
|
||||
ib_umem_release(qp->sumem);
|
||||
|
||||
/* Flush all the entries of notification queue associated with
|
||||
* given qp.
|
||||
*/
|
||||
scq_nq = qplib_qp->scq->nq;
|
||||
rcq_nq = qplib_qp->rcq->nq;
|
||||
bnxt_re_synchronize_nq(scq_nq);
|
||||
if (scq_nq != rcq_nq)
|
||||
bnxt_re_synchronize_nq(rcq_nq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -381,6 +381,24 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t)
|
|||
spin_unlock_bh(&hwq->lock);
|
||||
}
|
||||
|
||||
/* bnxt_re_synchronize_nq - self polling notification queue.
|
||||
* @nq - notification queue pointer
|
||||
*
|
||||
* This function will start polling entries of a given notification queue
|
||||
* for all pending entries.
|
||||
* This function is useful to synchronize notification entries while resources
|
||||
* are going away.
|
||||
*/
|
||||
|
||||
void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq)
|
||||
{
|
||||
int budget = nq->budget;
|
||||
|
||||
nq->budget = nq->hwq.max_elements;
|
||||
bnxt_qplib_service_nq(&nq->nq_tasklet);
|
||||
nq->budget = budget;
|
||||
}
|
||||
|
||||
static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
|
||||
{
|
||||
struct bnxt_qplib_nq *nq = dev_instance;
|
||||
|
@ -402,19 +420,19 @@ void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
|
|||
if (!nq->requested)
|
||||
return;
|
||||
|
||||
tasklet_disable(&nq->nq_tasklet);
|
||||
nq->requested = false;
|
||||
/* Mask h/w interrupt */
|
||||
bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
|
||||
/* Sync with last running IRQ handler */
|
||||
synchronize_irq(nq->msix_vec);
|
||||
if (kill)
|
||||
tasklet_kill(&nq->nq_tasklet);
|
||||
|
||||
irq_set_affinity_hint(nq->msix_vec, NULL);
|
||||
free_irq(nq->msix_vec, nq);
|
||||
kfree(nq->name);
|
||||
nq->name = NULL;
|
||||
nq->requested = false;
|
||||
|
||||
if (kill)
|
||||
tasklet_kill(&nq->nq_tasklet);
|
||||
tasklet_disable(&nq->nq_tasklet);
|
||||
}
|
||||
|
||||
void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
|
||||
|
|
|
@ -553,6 +553,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
|
|||
struct bnxt_qplib_cqe *cqe,
|
||||
int num_cqes);
|
||||
void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp);
|
||||
void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq);
|
||||
|
||||
static inline void *bnxt_qplib_get_swqe(struct bnxt_qplib_q *que, u32 *swq_idx)
|
||||
{
|
||||
|
|
|
@ -989,19 +989,18 @@ void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill)
|
|||
if (!creq->requested)
|
||||
return;
|
||||
|
||||
tasklet_disable(&creq->creq_tasklet);
|
||||
creq->requested = false;
|
||||
/* Mask h/w interrupts */
|
||||
bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, rcfw->res->cctx, false);
|
||||
/* Sync with last running IRQ-handler */
|
||||
synchronize_irq(creq->msix_vec);
|
||||
if (kill)
|
||||
tasklet_kill(&creq->creq_tasklet);
|
||||
|
||||
free_irq(creq->msix_vec, rcfw);
|
||||
kfree(creq->irq_name);
|
||||
creq->irq_name = NULL;
|
||||
creq->requested = false;
|
||||
atomic_set(&rcfw->rcfw_intr_enabled, 0);
|
||||
if (kill)
|
||||
tasklet_kill(&creq->creq_tasklet);
|
||||
tasklet_disable(&creq->creq_tasklet);
|
||||
}
|
||||
|
||||
void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
|
||||
|
|
|
@ -2712,13 +2712,13 @@ static int irdma_sc_cq_modify(struct irdma_sc_cq *cq,
|
|||
*/
|
||||
void irdma_check_cqp_progress(struct irdma_cqp_timeout *timeout, struct irdma_sc_dev *dev)
|
||||
{
|
||||
if (timeout->compl_cqp_cmds != dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]) {
|
||||
timeout->compl_cqp_cmds = dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS];
|
||||
u64 completed_ops = atomic64_read(&dev->cqp->completed_ops);
|
||||
|
||||
if (timeout->compl_cqp_cmds != completed_ops) {
|
||||
timeout->compl_cqp_cmds = completed_ops;
|
||||
timeout->count = 0;
|
||||
} else {
|
||||
if (dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS] !=
|
||||
timeout->compl_cqp_cmds)
|
||||
timeout->count++;
|
||||
} else if (timeout->compl_cqp_cmds != dev->cqp->requested_ops) {
|
||||
timeout->count++;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2761,7 +2761,7 @@ static int irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp, u32 tail,
|
|||
if (newtail != tail) {
|
||||
/* SUCCESS */
|
||||
IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
|
||||
cqp->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]++;
|
||||
atomic64_inc(&cqp->completed_ops);
|
||||
return 0;
|
||||
}
|
||||
udelay(cqp->dev->hw_attrs.max_sleep_count);
|
||||
|
@ -3121,8 +3121,8 @@ int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
|
|||
info->dev->cqp = cqp;
|
||||
|
||||
IRDMA_RING_INIT(cqp->sq_ring, cqp->sq_size);
|
||||
cqp->dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS] = 0;
|
||||
cqp->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS] = 0;
|
||||
cqp->requested_ops = 0;
|
||||
atomic64_set(&cqp->completed_ops, 0);
|
||||
/* for the cqp commands backlog. */
|
||||
INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head);
|
||||
|
||||
|
@ -3274,7 +3274,7 @@ __le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch
|
|||
if (ret_code)
|
||||
return NULL;
|
||||
|
||||
cqp->dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS]++;
|
||||
cqp->requested_ops++;
|
||||
if (!*wqe_idx)
|
||||
cqp->polarity = !cqp->polarity;
|
||||
wqe = cqp->sq_base[*wqe_idx].elem;
|
||||
|
@ -3363,6 +3363,9 @@ int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
|
|||
if (polarity != ccq->cq_uk.polarity)
|
||||
return -ENOENT;
|
||||
|
||||
/* Ensure CEQE contents are read after valid bit is checked */
|
||||
dma_rmb();
|
||||
|
||||
get_64bit_val(cqe, 8, &qp_ctx);
|
||||
cqp = (struct irdma_sc_cqp *)(unsigned long)qp_ctx;
|
||||
info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, temp);
|
||||
|
@ -3397,7 +3400,7 @@ int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
|
|||
dma_wmb(); /* make sure shadow area is updated before moving tail */
|
||||
|
||||
IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
|
||||
ccq->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]++;
|
||||
atomic64_inc(&cqp->completed_ops);
|
||||
|
||||
return ret_code;
|
||||
}
|
||||
|
@ -4009,13 +4012,17 @@ int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
|
|||
u8 polarity;
|
||||
|
||||
aeqe = IRDMA_GET_CURRENT_AEQ_ELEM(aeq);
|
||||
get_64bit_val(aeqe, 0, &compl_ctx);
|
||||
get_64bit_val(aeqe, 8, &temp);
|
||||
polarity = (u8)FIELD_GET(IRDMA_AEQE_VALID, temp);
|
||||
|
||||
if (aeq->polarity != polarity)
|
||||
return -ENOENT;
|
||||
|
||||
/* Ensure AEQE contents are read after valid bit is checked */
|
||||
dma_rmb();
|
||||
|
||||
get_64bit_val(aeqe, 0, &compl_ctx);
|
||||
|
||||
print_hex_dump_debug("WQE: AEQ_ENTRY WQE", DUMP_PREFIX_OFFSET, 16, 8,
|
||||
aeqe, 16, false);
|
||||
|
||||
|
|
|
@ -191,32 +191,30 @@ enum irdma_cqp_op_type {
|
|||
IRDMA_OP_MANAGE_VF_PBLE_BP = 25,
|
||||
IRDMA_OP_QUERY_FPM_VAL = 26,
|
||||
IRDMA_OP_COMMIT_FPM_VAL = 27,
|
||||
IRDMA_OP_REQ_CMDS = 28,
|
||||
IRDMA_OP_CMPL_CMDS = 29,
|
||||
IRDMA_OP_AH_CREATE = 30,
|
||||
IRDMA_OP_AH_MODIFY = 31,
|
||||
IRDMA_OP_AH_DESTROY = 32,
|
||||
IRDMA_OP_MC_CREATE = 33,
|
||||
IRDMA_OP_MC_DESTROY = 34,
|
||||
IRDMA_OP_MC_MODIFY = 35,
|
||||
IRDMA_OP_STATS_ALLOCATE = 36,
|
||||
IRDMA_OP_STATS_FREE = 37,
|
||||
IRDMA_OP_STATS_GATHER = 38,
|
||||
IRDMA_OP_WS_ADD_NODE = 39,
|
||||
IRDMA_OP_WS_MODIFY_NODE = 40,
|
||||
IRDMA_OP_WS_DELETE_NODE = 41,
|
||||
IRDMA_OP_WS_FAILOVER_START = 42,
|
||||
IRDMA_OP_WS_FAILOVER_COMPLETE = 43,
|
||||
IRDMA_OP_SET_UP_MAP = 44,
|
||||
IRDMA_OP_GEN_AE = 45,
|
||||
IRDMA_OP_QUERY_RDMA_FEATURES = 46,
|
||||
IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY = 47,
|
||||
IRDMA_OP_ADD_LOCAL_MAC_ENTRY = 48,
|
||||
IRDMA_OP_DELETE_LOCAL_MAC_ENTRY = 49,
|
||||
IRDMA_OP_CQ_MODIFY = 50,
|
||||
IRDMA_OP_AH_CREATE = 28,
|
||||
IRDMA_OP_AH_MODIFY = 29,
|
||||
IRDMA_OP_AH_DESTROY = 30,
|
||||
IRDMA_OP_MC_CREATE = 31,
|
||||
IRDMA_OP_MC_DESTROY = 32,
|
||||
IRDMA_OP_MC_MODIFY = 33,
|
||||
IRDMA_OP_STATS_ALLOCATE = 34,
|
||||
IRDMA_OP_STATS_FREE = 35,
|
||||
IRDMA_OP_STATS_GATHER = 36,
|
||||
IRDMA_OP_WS_ADD_NODE = 37,
|
||||
IRDMA_OP_WS_MODIFY_NODE = 38,
|
||||
IRDMA_OP_WS_DELETE_NODE = 39,
|
||||
IRDMA_OP_WS_FAILOVER_START = 40,
|
||||
IRDMA_OP_WS_FAILOVER_COMPLETE = 41,
|
||||
IRDMA_OP_SET_UP_MAP = 42,
|
||||
IRDMA_OP_GEN_AE = 43,
|
||||
IRDMA_OP_QUERY_RDMA_FEATURES = 44,
|
||||
IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY = 45,
|
||||
IRDMA_OP_ADD_LOCAL_MAC_ENTRY = 46,
|
||||
IRDMA_OP_DELETE_LOCAL_MAC_ENTRY = 47,
|
||||
IRDMA_OP_CQ_MODIFY = 48,
|
||||
|
||||
/* Must be last entry*/
|
||||
IRDMA_MAX_CQP_OPS = 51,
|
||||
IRDMA_MAX_CQP_OPS = 49,
|
||||
};
|
||||
|
||||
/* CQP SQ WQES */
|
||||
|
|
|
@ -191,6 +191,7 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
|
|||
case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
|
||||
case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
|
||||
case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
|
||||
case IRDMA_AE_AMP_MWBIND_VALID_STAG:
|
||||
qp->flush_code = FLUSH_MW_BIND_ERR;
|
||||
qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
|
||||
break;
|
||||
|
@ -2075,7 +2076,7 @@ void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
|
|||
cqp_request->compl_info.error = info.error;
|
||||
|
||||
if (cqp_request->waiting) {
|
||||
cqp_request->request_done = true;
|
||||
WRITE_ONCE(cqp_request->request_done, true);
|
||||
wake_up(&cqp_request->waitq);
|
||||
irdma_put_cqp_request(&rf->cqp, cqp_request);
|
||||
} else {
|
||||
|
|
|
@ -161,8 +161,8 @@ struct irdma_cqp_request {
|
|||
void (*callback_fcn)(struct irdma_cqp_request *cqp_request);
|
||||
void *param;
|
||||
struct irdma_cqp_compl_info compl_info;
|
||||
bool request_done; /* READ/WRITE_ONCE macros operate on it */
|
||||
bool waiting:1;
|
||||
bool request_done:1;
|
||||
bool dynamic:1;
|
||||
};
|
||||
|
||||
|
|
|
@ -230,6 +230,9 @@ static int irdma_puda_poll_info(struct irdma_sc_cq *cq,
|
|||
if (valid_bit != cq_uk->polarity)
|
||||
return -ENOENT;
|
||||
|
||||
/* Ensure CQE contents are read after valid bit is checked */
|
||||
dma_rmb();
|
||||
|
||||
if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
|
||||
ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
|
||||
|
||||
|
@ -243,6 +246,9 @@ static int irdma_puda_poll_info(struct irdma_sc_cq *cq,
|
|||
if (polarity != cq_uk->polarity)
|
||||
return -ENOENT;
|
||||
|
||||
/* Ensure ext CQE contents are read after ext valid bit is checked */
|
||||
dma_rmb();
|
||||
|
||||
IRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring);
|
||||
if (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring))
|
||||
cq_uk->polarity = !cq_uk->polarity;
|
||||
|
|
|
@ -365,6 +365,8 @@ struct irdma_sc_cqp {
|
|||
struct irdma_dcqcn_cc_params dcqcn_params;
|
||||
__le64 *host_ctx;
|
||||
u64 *scratch_array;
|
||||
u64 requested_ops;
|
||||
atomic64_t completed_ops;
|
||||
u32 cqp_id;
|
||||
u32 sq_size;
|
||||
u32 hw_sq_size;
|
||||
|
|
|
@ -1161,7 +1161,7 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
|
|||
}
|
||||
wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
|
||||
info->qp_handle = (irdma_qp_handle)(unsigned long)qp;
|
||||
info->op_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
|
||||
info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
|
||||
|
||||
if (info->q_type == IRDMA_CQE_QTYPE_RQ) {
|
||||
u32 array_idx;
|
||||
|
@ -1527,6 +1527,9 @@ void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
|
|||
if (polarity != temp)
|
||||
break;
|
||||
|
||||
/* Ensure CQE contents are read after valid bit is checked */
|
||||
dma_rmb();
|
||||
|
||||
get_64bit_val(cqe, 8, &comp_ctx);
|
||||
if ((void *)(unsigned long)comp_ctx == q)
|
||||
set_64bit_val(cqe, 8, 0);
|
||||
|
|
|
@ -481,7 +481,7 @@ void irdma_free_cqp_request(struct irdma_cqp *cqp,
|
|||
if (cqp_request->dynamic) {
|
||||
kfree(cqp_request);
|
||||
} else {
|
||||
cqp_request->request_done = false;
|
||||
WRITE_ONCE(cqp_request->request_done, false);
|
||||
cqp_request->callback_fcn = NULL;
|
||||
cqp_request->waiting = false;
|
||||
|
||||
|
@ -515,7 +515,7 @@ irdma_free_pending_cqp_request(struct irdma_cqp *cqp,
|
|||
{
|
||||
if (cqp_request->waiting) {
|
||||
cqp_request->compl_info.error = true;
|
||||
cqp_request->request_done = true;
|
||||
WRITE_ONCE(cqp_request->request_done, true);
|
||||
wake_up(&cqp_request->waitq);
|
||||
}
|
||||
wait_event_timeout(cqp->remove_wq,
|
||||
|
@ -567,11 +567,11 @@ static int irdma_wait_event(struct irdma_pci_f *rf,
|
|||
bool cqp_error = false;
|
||||
int err_code = 0;
|
||||
|
||||
cqp_timeout.compl_cqp_cmds = rf->sc_dev.cqp_cmd_stats[IRDMA_OP_CMPL_CMDS];
|
||||
cqp_timeout.compl_cqp_cmds = atomic64_read(&rf->sc_dev.cqp->completed_ops);
|
||||
do {
|
||||
irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
|
||||
if (wait_event_timeout(cqp_request->waitq,
|
||||
cqp_request->request_done,
|
||||
READ_ONCE(cqp_request->request_done),
|
||||
msecs_to_jiffies(CQP_COMPL_WAIT_TIME_MS)))
|
||||
break;
|
||||
|
||||
|
|
|
@ -565,15 +565,15 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
|
|||
return (-EOPNOTSUPP);
|
||||
}
|
||||
|
||||
if (ucmd->rx_hash_fields_mask & ~(MLX4_IB_RX_HASH_SRC_IPV4 |
|
||||
MLX4_IB_RX_HASH_DST_IPV4 |
|
||||
MLX4_IB_RX_HASH_SRC_IPV6 |
|
||||
MLX4_IB_RX_HASH_DST_IPV6 |
|
||||
MLX4_IB_RX_HASH_SRC_PORT_TCP |
|
||||
MLX4_IB_RX_HASH_DST_PORT_TCP |
|
||||
MLX4_IB_RX_HASH_SRC_PORT_UDP |
|
||||
MLX4_IB_RX_HASH_DST_PORT_UDP |
|
||||
MLX4_IB_RX_HASH_INNER)) {
|
||||
if (ucmd->rx_hash_fields_mask & ~(u64)(MLX4_IB_RX_HASH_SRC_IPV4 |
|
||||
MLX4_IB_RX_HASH_DST_IPV4 |
|
||||
MLX4_IB_RX_HASH_SRC_IPV6 |
|
||||
MLX4_IB_RX_HASH_DST_IPV6 |
|
||||
MLX4_IB_RX_HASH_SRC_PORT_TCP |
|
||||
MLX4_IB_RX_HASH_DST_PORT_TCP |
|
||||
MLX4_IB_RX_HASH_SRC_PORT_UDP |
|
||||
MLX4_IB_RX_HASH_DST_PORT_UDP |
|
||||
MLX4_IB_RX_HASH_INNER)) {
|
||||
pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n",
|
||||
ucmd->rx_hash_fields_mask);
|
||||
return (-EOPNOTSUPP);
|
||||
|
|
|
@ -1393,7 +1393,7 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
|
|||
if (mthca_array_get(&dev->qp_table.qp, mqpn))
|
||||
err = -EBUSY;
|
||||
else
|
||||
mthca_array_set(&dev->qp_table.qp, mqpn, qp->sqp);
|
||||
mthca_array_set(&dev->qp_table.qp, mqpn, qp);
|
||||
spin_unlock_irq(&dev->qp_table.lock);
|
||||
|
||||
if (err)
|
||||
|
|
|
@ -199,7 +199,8 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
|
|||
|
||||
if (access & ~RXE_ACCESS_SUPPORTED_MW) {
|
||||
rxe_err_mw(mw, "access %#x not supported", access);
|
||||
return -EOPNOTSUPP;
|
||||
ret = -EOPNOTSUPP;
|
||||
goto err_drop_mr;
|
||||
}
|
||||
|
||||
spin_lock_bh(&mw->lock);
|
||||
|
|
Loading…
Reference in New Issue