RDMA 5.12 first rc pull request
- Fix corner cases in the rxe reference counting cleanup that are causing regressions in blktests for SRP - Two kdoc fixes so W=1 is clean - Missing error return in error unwind for mlx5 - Wrong lock type nesting in IB CM -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEfB7FMLh+8QxL+6i3OG33FX4gmxoFAmBCeEwACgkQOG33FX4g mxohmQ/7Bnf52Ay59dqdQzc8nDPXMwQ5ejZke+MnYZG3j79UqNQrbEnLJ8Y2BlQU IkpdTi8zUHm69GH7Ix9m/FsW52XFUuq2OEfGLvZNRUhi7hGEmc6llvfCK9y6a4Mt VEpyo7vC4QvA1OvMBF3Zv91jylKPJt5U51OuRtbh+6eoS6oVm7ajjOay+USYT/kX 3zftTd6DUg6vpUP5j2WvjocgKUVGsh8GiyA3xC7u7AUTemcxvZ40wB535fLpn1JC ObU8aiRKw0Ib5JbRukCFFBV13kmA3lnAEy0SktPVhIoeUnF0m1vNtAJrdnHsWPxW lxtAXA3bN3oINhsEhnBllONKRxlqsUz7KT5bVAehsl8LENNLHhJWp0hwKawKK74K 5MAQ+P6CIW8TLMKeOcYMXBD2UiElc9sdhYsDliM7leclftwOO5gFI4WgTWrQwxvl m+KewiByT6pzK7oyGvg9w/dl3y7atzy8y/7uMAfI2L6oL43z9yt24hHQTo3Z7UM7 /bbSas/yRbZujB7qE45c6NvwRFQk6mYTK0Is0huMWOLn0Zf0fVcy3um0q+OKQ7At VS1Z7Fhy+ev1Lk764n5rmVUx8NWDlXkzwX7qk7SnQk7dWQ5lNFbziFDKmkiKgcAa //oN0I/FbAM4FjhKImRzbXJiLdULxJCqpfQ5VXbUXlfXdpzjabY= =j5ZU -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma Pull rdma fixes from Jason Gunthorpe: "Nothing special here, though Bob's regression fixes for rxe would have made it before the rc cycle had there not been such strong winter weather! - Fix corner cases in the rxe reference counting cleanup that are causing regressions in blktests for SRP - Two kdoc fixes so W=1 is clean - Missing error return in error unwind for mlx5 - Wrong lock type nesting in IB CM" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: RDMA/rxe: Fix errant WARN_ONCE in rxe_completer() RDMA/rxe: Fix extra deref in rxe_rcv_mcast_pkt() RDMA/rxe: Fix missed IB reference counting in loopback RDMA/uverbs: Fix kernel-doc warning of _uverbs_alloc RDMA/mlx5: Set correct kernel-doc identifier IB/mlx5: Add missing error code RDMA/rxe: Fix missing kconfig dependency on CRYPTO RDMA/cm: Fix IRQ restore in ib_send_cm_sidr_rep
This commit is contained in:
commit
f3ed4de6cc
|
@ -3651,6 +3651,7 @@ static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
|
|||
struct ib_cm_sidr_rep_param *param)
|
||||
{
|
||||
struct ib_mad_send_buf *msg;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&cm_id_priv->lock);
|
||||
|
@ -3676,12 +3677,12 @@ static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
|
|||
return ret;
|
||||
}
|
||||
cm_id_priv->id.state = IB_CM_IDLE;
|
||||
spin_lock_irq(&cm.lock);
|
||||
spin_lock_irqsave(&cm.lock, flags);
|
||||
if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
|
||||
rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
|
||||
RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
|
||||
}
|
||||
spin_unlock_irq(&cm.lock);
|
||||
spin_unlock_irqrestore(&cm.lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -91,7 +91,7 @@ void uapi_compute_bundle_size(struct uverbs_api_ioctl_method *method_elm,
|
|||
}
|
||||
|
||||
/**
|
||||
* uverbs_alloc() - Quickly allocate memory for use with a bundle
|
||||
* _uverbs_alloc() - Quickly allocate memory for use with a bundle
|
||||
* @bundle: The bundle
|
||||
* @size: Number of bytes to allocate
|
||||
* @flags: Allocator flags
|
||||
|
|
|
@ -2073,8 +2073,10 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
|
|||
|
||||
num_alloc_xa_entries++;
|
||||
event_sub = kzalloc(sizeof(*event_sub), GFP_KERNEL);
|
||||
if (!event_sub)
|
||||
if (!event_sub) {
|
||||
err = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
list_add_tail(&event_sub->event_list, &sub_list);
|
||||
uverbs_uobject_get(&ev_file->uobj);
|
||||
|
|
|
@ -1082,7 +1082,7 @@ end:
|
|||
return ret ? ret : npages;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Parse a series of data segments for page fault handling.
|
||||
*
|
||||
* @dev: Pointer to mlx5 IB device
|
||||
|
|
|
@ -4,6 +4,7 @@ config RDMA_RXE
|
|||
depends on INET && PCI && INFINIBAND
|
||||
depends on INFINIBAND_VIRT_DMA
|
||||
select NET_UDP_TUNNEL
|
||||
select CRYPTO
|
||||
select CRYPTO_CRC32
|
||||
help
|
||||
This driver implements the InfiniBand RDMA transport over
|
||||
|
|
|
@ -547,6 +547,7 @@ int rxe_completer(void *arg)
|
|||
struct sk_buff *skb = NULL;
|
||||
struct rxe_pkt_info *pkt = NULL;
|
||||
enum comp_state state;
|
||||
int ret = 0;
|
||||
|
||||
rxe_add_ref(qp);
|
||||
|
||||
|
@ -554,7 +555,8 @@ int rxe_completer(void *arg)
|
|||
qp->req.state == QP_STATE_RESET) {
|
||||
rxe_drain_resp_pkts(qp, qp->valid &&
|
||||
qp->req.state == QP_STATE_ERROR);
|
||||
goto exit;
|
||||
ret = -EAGAIN;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (qp->comp.timeout) {
|
||||
|
@ -564,8 +566,10 @@ int rxe_completer(void *arg)
|
|||
qp->comp.timeout_retry = 0;
|
||||
}
|
||||
|
||||
if (qp->req.need_retry)
|
||||
goto exit;
|
||||
if (qp->req.need_retry) {
|
||||
ret = -EAGAIN;
|
||||
goto done;
|
||||
}
|
||||
|
||||
state = COMPST_GET_ACK;
|
||||
|
||||
|
@ -636,8 +640,6 @@ int rxe_completer(void *arg)
|
|||
break;
|
||||
|
||||
case COMPST_DONE:
|
||||
if (pkt)
|
||||
free_pkt(pkt);
|
||||
goto done;
|
||||
|
||||
case COMPST_EXIT:
|
||||
|
@ -660,7 +662,8 @@ int rxe_completer(void *arg)
|
|||
qp->qp_timeout_jiffies)
|
||||
mod_timer(&qp->retrans_timer,
|
||||
jiffies + qp->qp_timeout_jiffies);
|
||||
goto exit;
|
||||
ret = -EAGAIN;
|
||||
goto done;
|
||||
|
||||
case COMPST_ERROR_RETRY:
|
||||
/* we come here if the retry timer fired and we did
|
||||
|
@ -672,18 +675,18 @@ int rxe_completer(void *arg)
|
|||
*/
|
||||
|
||||
/* there is nothing to retry in this case */
|
||||
if (!wqe || (wqe->state == wqe_state_posted))
|
||||
goto exit;
|
||||
if (!wqe || (wqe->state == wqe_state_posted)) {
|
||||
pr_warn("Retry attempted without a valid wqe\n");
|
||||
ret = -EAGAIN;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* if we've started a retry, don't start another
|
||||
* retry sequence, unless this is a timeout.
|
||||
*/
|
||||
if (qp->comp.started_retry &&
|
||||
!qp->comp.timeout_retry) {
|
||||
if (pkt)
|
||||
free_pkt(pkt);
|
||||
!qp->comp.timeout_retry)
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (qp->comp.retry_cnt > 0) {
|
||||
if (qp->comp.retry_cnt != 7)
|
||||
|
@ -704,8 +707,6 @@ int rxe_completer(void *arg)
|
|||
qp->comp.started_retry = 1;
|
||||
rxe_run_task(&qp->req.task, 0);
|
||||
}
|
||||
if (pkt)
|
||||
free_pkt(pkt);
|
||||
goto done;
|
||||
|
||||
} else {
|
||||
|
@ -726,8 +727,8 @@ int rxe_completer(void *arg)
|
|||
mod_timer(&qp->rnr_nak_timer,
|
||||
jiffies + rnrnak_jiffies(aeth_syn(pkt)
|
||||
& ~AETH_TYPE_MASK));
|
||||
free_pkt(pkt);
|
||||
goto exit;
|
||||
ret = -EAGAIN;
|
||||
goto done;
|
||||
} else {
|
||||
rxe_counter_inc(rxe,
|
||||
RXE_CNT_RNR_RETRY_EXCEEDED);
|
||||
|
@ -740,25 +741,15 @@ int rxe_completer(void *arg)
|
|||
WARN_ON_ONCE(wqe->status == IB_WC_SUCCESS);
|
||||
do_complete(qp, wqe);
|
||||
rxe_qp_error(qp);
|
||||
if (pkt)
|
||||
free_pkt(pkt);
|
||||
goto exit;
|
||||
ret = -EAGAIN;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
exit:
|
||||
/* we come here if we are done with processing and want the task to
|
||||
* exit from the loop calling us
|
||||
*/
|
||||
WARN_ON_ONCE(skb);
|
||||
rxe_drop_ref(qp);
|
||||
return -EAGAIN;
|
||||
|
||||
done:
|
||||
/* we come here if we have processed a packet we want the task to call
|
||||
* us again to see if there is anything else to do
|
||||
*/
|
||||
WARN_ON_ONCE(skb);
|
||||
if (pkt)
|
||||
free_pkt(pkt);
|
||||
rxe_drop_ref(qp);
|
||||
return 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -407,14 +407,22 @@ int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* fix up a send packet to match the packets
|
||||
* received from UDP before looping them back
|
||||
*/
|
||||
void rxe_loopback(struct sk_buff *skb)
|
||||
{
|
||||
struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
|
||||
|
||||
if (skb->protocol == htons(ETH_P_IP))
|
||||
skb_pull(skb, sizeof(struct iphdr));
|
||||
else
|
||||
skb_pull(skb, sizeof(struct ipv6hdr));
|
||||
|
||||
rxe_rcv(skb);
|
||||
if (WARN_ON(!ib_device_try_get(&pkt->rxe->ib_dev)))
|
||||
kfree_skb(skb);
|
||||
else
|
||||
rxe_rcv(skb);
|
||||
}
|
||||
|
||||
struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
|
||||
|
|
|
@ -237,8 +237,6 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
|
|||
struct rxe_mc_elem *mce;
|
||||
struct rxe_qp *qp;
|
||||
union ib_gid dgid;
|
||||
struct sk_buff *per_qp_skb;
|
||||
struct rxe_pkt_info *per_qp_pkt;
|
||||
int err;
|
||||
|
||||
if (skb->protocol == htons(ETH_P_IP))
|
||||
|
@ -250,10 +248,15 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
|
|||
/* lookup mcast group corresponding to mgid, takes a ref */
|
||||
mcg = rxe_pool_get_key(&rxe->mc_grp_pool, &dgid);
|
||||
if (!mcg)
|
||||
goto err1; /* mcast group not registered */
|
||||
goto drop; /* mcast group not registered */
|
||||
|
||||
spin_lock_bh(&mcg->mcg_lock);
|
||||
|
||||
/* this is unreliable datagram service so we let
|
||||
* failures to deliver a multicast packet to a
|
||||
* single QP happen and just move on and try
|
||||
* the rest of them on the list
|
||||
*/
|
||||
list_for_each_entry(mce, &mcg->qp_list, qp_list) {
|
||||
qp = mce->qp;
|
||||
|
||||
|
@ -266,39 +269,47 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
|
|||
if (err)
|
||||
continue;
|
||||
|
||||
/* for all but the last qp create a new clone of the
|
||||
* skb and pass to the qp. If an error occurs in the
|
||||
* checks for the last qp in the list we need to
|
||||
* free the skb since it hasn't been passed on to
|
||||
* rxe_rcv_pkt() which would free it later.
|
||||
/* for all but the last QP create a new clone of the
|
||||
* skb and pass to the QP. Pass the original skb to
|
||||
* the last QP in the list.
|
||||
*/
|
||||
if (mce->qp_list.next != &mcg->qp_list) {
|
||||
per_qp_skb = skb_clone(skb, GFP_ATOMIC);
|
||||
if (WARN_ON(!ib_device_try_get(&rxe->ib_dev))) {
|
||||
kfree_skb(per_qp_skb);
|
||||
struct sk_buff *cskb;
|
||||
struct rxe_pkt_info *cpkt;
|
||||
|
||||
cskb = skb_clone(skb, GFP_ATOMIC);
|
||||
if (unlikely(!cskb))
|
||||
continue;
|
||||
|
||||
if (WARN_ON(!ib_device_try_get(&rxe->ib_dev))) {
|
||||
kfree_skb(cskb);
|
||||
break;
|
||||
}
|
||||
|
||||
cpkt = SKB_TO_PKT(cskb);
|
||||
cpkt->qp = qp;
|
||||
rxe_add_ref(qp);
|
||||
rxe_rcv_pkt(cpkt, cskb);
|
||||
} else {
|
||||
per_qp_skb = skb;
|
||||
/* show we have consumed the skb */
|
||||
skb = NULL;
|
||||
pkt->qp = qp;
|
||||
rxe_add_ref(qp);
|
||||
rxe_rcv_pkt(pkt, skb);
|
||||
skb = NULL; /* mark consumed */
|
||||
}
|
||||
|
||||
if (unlikely(!per_qp_skb))
|
||||
continue;
|
||||
|
||||
per_qp_pkt = SKB_TO_PKT(per_qp_skb);
|
||||
per_qp_pkt->qp = qp;
|
||||
rxe_add_ref(qp);
|
||||
rxe_rcv_pkt(per_qp_pkt, per_qp_skb);
|
||||
}
|
||||
|
||||
spin_unlock_bh(&mcg->mcg_lock);
|
||||
|
||||
rxe_drop_ref(mcg); /* drop ref from rxe_pool_get_key. */
|
||||
|
||||
err1:
|
||||
/* free skb if not consumed */
|
||||
if (likely(!skb))
|
||||
return;
|
||||
|
||||
/* This only occurs if one of the checks fails on the last
|
||||
* QP in the list above
|
||||
*/
|
||||
|
||||
drop:
|
||||
kfree_skb(skb);
|
||||
ib_device_put(&rxe->ib_dev);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue