RDMA/cxgb4: Disable interrupts in c4iw_ev_dispatch()
Use GFP_ATOMIC in _insert_handle() if ints are disabled. Don't panic if we get an abort with no endpoint found. Just log a warning. Signed-off-by: Vipul Pandya <vipul@chelsio.com> Signed-off-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
This commit is contained in:
parent
2c97478106
commit
4984037bef
|
@ -1362,7 +1362,10 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
|
||||||
|
|
||||||
ep = lookup_tid(t, tid);
|
ep = lookup_tid(t, tid);
|
||||||
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
|
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
|
||||||
BUG_ON(!ep);
|
if (!ep) {
|
||||||
|
printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
mutex_lock(&ep->com.mutex);
|
mutex_lock(&ep->com.mutex);
|
||||||
switch (ep->com.state) {
|
switch (ep->com.state) {
|
||||||
case ABORTING:
|
case ABORTING:
|
||||||
|
|
|
@ -84,7 +84,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
|
||||||
struct c4iw_qp *qhp;
|
struct c4iw_qp *qhp;
|
||||||
u32 cqid;
|
u32 cqid;
|
||||||
|
|
||||||
spin_lock(&dev->lock);
|
spin_lock_irq(&dev->lock);
|
||||||
qhp = get_qhp(dev, CQE_QPID(err_cqe));
|
qhp = get_qhp(dev, CQE_QPID(err_cqe));
|
||||||
if (!qhp) {
|
if (!qhp) {
|
||||||
printk(KERN_ERR MOD "BAD AE qpid 0x%x opcode %d "
|
printk(KERN_ERR MOD "BAD AE qpid 0x%x opcode %d "
|
||||||
|
@ -93,7 +93,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
|
||||||
CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
|
CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
|
||||||
CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
|
CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
|
||||||
CQE_WRID_LOW(err_cqe));
|
CQE_WRID_LOW(err_cqe));
|
||||||
spin_unlock(&dev->lock);
|
spin_unlock_irq(&dev->lock);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -109,13 +109,13 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
|
||||||
CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
|
CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
|
||||||
CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
|
CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
|
||||||
CQE_WRID_LOW(err_cqe));
|
CQE_WRID_LOW(err_cqe));
|
||||||
spin_unlock(&dev->lock);
|
spin_unlock_irq(&dev->lock);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
c4iw_qp_add_ref(&qhp->ibqp);
|
c4iw_qp_add_ref(&qhp->ibqp);
|
||||||
atomic_inc(&chp->refcnt);
|
atomic_inc(&chp->refcnt);
|
||||||
spin_unlock(&dev->lock);
|
spin_unlock_irq(&dev->lock);
|
||||||
|
|
||||||
/* Bad incoming write */
|
/* Bad incoming write */
|
||||||
if (RQ_TYPE(err_cqe) &&
|
if (RQ_TYPE(err_cqe) &&
|
||||||
|
|
|
@ -246,7 +246,7 @@ static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr,
|
||||||
int newid;
|
int newid;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
if (!idr_pre_get(idr, GFP_KERNEL))
|
if (!idr_pre_get(idr, lock ? GFP_KERNEL : GFP_ATOMIC))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
if (lock)
|
if (lock)
|
||||||
spin_lock_irq(&rhp->lock);
|
spin_lock_irq(&rhp->lock);
|
||||||
|
|
Loading…
Reference in New Issue