RDMA v5.15 second rc pull request
Usual collection of small bug fixes: - irdma issues with CQ entries, VLAN completions and a mutex deadlock - Incorrect DCT packets in mlx5 - Userspace triggered overflows in qib - Locking error in hfi - Typo in errno value in qib/hfi1 - Double free in qedr - Leak of random kernel memory to userspace with a netlink callback -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEfB7FMLh+8QxL+6i3OG33FX4gmxoFAmF4SccACgkQOG33FX4g mxrxtw//Xkfg+NGlN836uw0xE3tgJ/8ThuMqhVTnTfd05lhp4LSI89SQqcetGkEZ QVhL3RDnCKWqYd104XCtblGwWQO6hiC0DtIDuR31W7XYWYLnqxom1H4dN0n7Fs6L J1elKYhRlrdTS/zPNHTxelEqclEuKqW1ekPGh4ycVaw9Rt0zptRCJwFvzfKYlPZ/ xhPmEHZ00Ad70g3O9NKPtZUSMOU406pC4c2fv1LYnWxHHTwP9THxl4pmCSTj8PmV 9LyHYtWjzB3Tc0WwhUsBxNgkOQZImvNQYihK4CwRPNU860uJ1V0hUSyMiiOYtqsl igAZnsXXdNwvsC3NfnZrbMXXQPFFdp+tx+eKvqIL89by2eYJ9U8jNqSFeJqUHXli nJRIRldKBgM6HCY3pmfo38GrRwPOzzkcfx46Fxg9RzrU8uR75Q4onnCU91V7VeLM IB4H+aAL+p+sIVb99RsqkyiaweSIRQLgPgM6jG0htqsVTaGq3V1XkS2scVUhwFmw fTfjB/8d5TpcuQ+tyS/+1GmXGI9f/wXwMhV434OtBnGFTGhpdic/oDgWXD+JTvup eql6BRJrJmuI8jHqMCk2wjVwa/sEFzuXOUogRIOpJ0f/kqieQV7PcTkS/bn68/Fr ATtkviOdt5wx0Iz5FnGTI10V4bUzkHXBIKVWcmDb+zehaor4KOQ= =xw1S -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma Pull rdma fixes from Jason Gunthorpe: "Nothing very exciting here, it has been a quiet cycle overall. Usual collection of small bug fixes: - irdma issues with CQ entries, VLAN completions and a mutex deadlock - Incorrect DCT packets in mlx5 - Userspace triggered overflows in qib - Locking error in hfi - Typo in errno value in qib/hfi1 - Double free in qedr - Leak of random kernel memory to userspace with a netlink callback" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: RDMA/sa_query: Use strscpy_pad instead of memcpy to copy a string RDMA/irdma: Do not hold qos mutex twice on QP resume RDMA/irdma: Set VLAN in UD work completion correctly RDMA/mlx5: Initialize the ODP xarray when creating an ODP MR rdma/qedr: Fix crash due to redundant release of device's qp memory RDMA/rdmavt: Fix error code in rvt_create_qp() IB/hfi1: Fix abba locking issue with sc_disable() IB/qib: Protect from buffer overflow in struct qib_user_sdma_pkt fields RDMA/mlx5: Set user priority for DCT RDMA/irdma: Process extended CQ entries correctly
This commit is contained in:
commit
ab2aa486f4
|
@ -706,8 +706,9 @@ static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
|
|||
|
||||
/* Construct the family header first */
|
||||
header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
|
||||
memcpy(header->device_name, dev_name(&query->port->agent->device->dev),
|
||||
LS_DEVICE_NAME_MAX);
|
||||
strscpy_pad(header->device_name,
|
||||
dev_name(&query->port->agent->device->dev),
|
||||
LS_DEVICE_NAME_MAX);
|
||||
header->port_num = query->port->port_num;
|
||||
|
||||
if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
|
||||
|
|
|
@ -878,6 +878,7 @@ void sc_disable(struct send_context *sc)
|
|||
{
|
||||
u64 reg;
|
||||
struct pio_buf *pbuf;
|
||||
LIST_HEAD(wake_list);
|
||||
|
||||
if (!sc)
|
||||
return;
|
||||
|
@ -912,19 +913,21 @@ void sc_disable(struct send_context *sc)
|
|||
spin_unlock(&sc->release_lock);
|
||||
|
||||
write_seqlock(&sc->waitlock);
|
||||
while (!list_empty(&sc->piowait)) {
|
||||
if (!list_empty(&sc->piowait))
|
||||
list_move(&sc->piowait, &wake_list);
|
||||
write_sequnlock(&sc->waitlock);
|
||||
while (!list_empty(&wake_list)) {
|
||||
struct iowait *wait;
|
||||
struct rvt_qp *qp;
|
||||
struct hfi1_qp_priv *priv;
|
||||
|
||||
wait = list_first_entry(&sc->piowait, struct iowait, list);
|
||||
wait = list_first_entry(&wake_list, struct iowait, list);
|
||||
qp = iowait_to_qp(wait);
|
||||
priv = qp->priv;
|
||||
list_del_init(&priv->s_iowait.list);
|
||||
priv->s_iowait.lock = NULL;
|
||||
hfi1_qp_wakeup(qp, RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
|
||||
}
|
||||
write_sequnlock(&sc->waitlock);
|
||||
|
||||
spin_unlock_irq(&sc->alloc_lock);
|
||||
}
|
||||
|
|
|
@ -1092,12 +1092,12 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
|
|||
if (cq->avoid_mem_cflct) {
|
||||
ext_cqe = (__le64 *)((u8 *)cqe + 32);
|
||||
get_64bit_val(ext_cqe, 24, &qword7);
|
||||
polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
|
||||
polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
|
||||
} else {
|
||||
peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
|
||||
ext_cqe = cq->cq_base[peek_head].buf;
|
||||
get_64bit_val(ext_cqe, 24, &qword7);
|
||||
polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
|
||||
polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
|
||||
if (!peek_head)
|
||||
polarity ^= 1;
|
||||
}
|
||||
|
|
|
@ -3399,9 +3399,13 @@ static void irdma_process_cqe(struct ib_wc *entry,
|
|||
}
|
||||
|
||||
if (cq_poll_info->ud_vlan_valid) {
|
||||
entry->vlan_id = cq_poll_info->ud_vlan & VLAN_VID_MASK;
|
||||
entry->wc_flags |= IB_WC_WITH_VLAN;
|
||||
u16 vlan = cq_poll_info->ud_vlan & VLAN_VID_MASK;
|
||||
|
||||
entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT;
|
||||
if (vlan) {
|
||||
entry->vlan_id = vlan;
|
||||
entry->wc_flags |= IB_WC_WITH_VLAN;
|
||||
}
|
||||
} else {
|
||||
entry->sl = 0;
|
||||
}
|
||||
|
|
|
@ -330,8 +330,10 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
|
|||
|
||||
tc_node->enable = true;
|
||||
ret = irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_MODIFY_NODE);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
vsi->unregister_qset(vsi, tc_node);
|
||||
goto reg_err;
|
||||
}
|
||||
}
|
||||
ibdev_dbg(to_ibdev(vsi->dev),
|
||||
"WS: Using node %d which represents VSI %d TC %d\n",
|
||||
|
@ -350,6 +352,10 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
|
|||
}
|
||||
goto exit;
|
||||
|
||||
reg_err:
|
||||
irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_DELETE_NODE);
|
||||
list_del(&tc_node->siblings);
|
||||
irdma_free_node(vsi, tc_node);
|
||||
leaf_add_err:
|
||||
if (list_empty(&vsi_node->child_list_head)) {
|
||||
if (irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_DELETE_NODE))
|
||||
|
@ -369,11 +375,6 @@ vsi_add_err:
|
|||
exit:
|
||||
mutex_unlock(&vsi->dev->ws_mutex);
|
||||
return ret;
|
||||
|
||||
reg_err:
|
||||
mutex_unlock(&vsi->dev->ws_mutex);
|
||||
irdma_ws_remove(vsi, user_pri);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1339,7 +1339,6 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
|
|||
goto err_2;
|
||||
}
|
||||
mr->mmkey.type = MLX5_MKEY_MR;
|
||||
mr->desc_size = sizeof(struct mlx5_mtt);
|
||||
mr->umem = umem;
|
||||
set_mr_fields(dev, mr, umem->length, access_flags);
|
||||
kvfree(in);
|
||||
|
@ -1533,6 +1532,7 @@ static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
|
|||
ib_umem_release(&odp->umem);
|
||||
return ERR_CAST(mr);
|
||||
}
|
||||
xa_init(&mr->implicit_children);
|
||||
|
||||
odp->private = mr;
|
||||
err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
|
||||
|
|
|
@ -4458,6 +4458,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|||
MLX5_SET(dctc, dctc, mtu, attr->path_mtu);
|
||||
MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index);
|
||||
MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
|
||||
if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
|
||||
MLX5_SET(dctc, dctc, eth_prio, attr->ah_attr.sl & 0x7);
|
||||
|
||||
err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in,
|
||||
MLX5_ST_SZ_BYTES(create_dct_in), out,
|
||||
|
|
|
@ -455,6 +455,7 @@ struct qedr_qp {
|
|||
/* synchronization objects used with iwarp ep */
|
||||
struct kref refcnt;
|
||||
struct completion iwarp_cm_comp;
|
||||
struct completion qp_rel_comp;
|
||||
unsigned long iwarp_cm_flags; /* enum iwarp_cm_flags */
|
||||
};
|
||||
|
||||
|
|
|
@ -83,7 +83,7 @@ static void qedr_iw_free_qp(struct kref *ref)
|
|||
{
|
||||
struct qedr_qp *qp = container_of(ref, struct qedr_qp, refcnt);
|
||||
|
||||
kfree(qp);
|
||||
complete(&qp->qp_rel_comp);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
@ -1357,6 +1357,7 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
|
|||
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
|
||||
kref_init(&qp->refcnt);
|
||||
init_completion(&qp->iwarp_cm_comp);
|
||||
init_completion(&qp->qp_rel_comp);
|
||||
}
|
||||
|
||||
qp->pd = pd;
|
||||
|
@ -2857,8 +2858,10 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
|
|||
|
||||
qedr_free_qp_resources(dev, qp, udata);
|
||||
|
||||
if (rdma_protocol_iwarp(&dev->ibdev, 1))
|
||||
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
|
||||
qedr_iw_qp_rem_ref(&qp->ibqp);
|
||||
wait_for_completion(&qp->qp_rel_comp);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -602,7 +602,7 @@ done:
|
|||
/*
|
||||
* How many pages in this iovec element?
|
||||
*/
|
||||
static int qib_user_sdma_num_pages(const struct iovec *iov)
|
||||
static size_t qib_user_sdma_num_pages(const struct iovec *iov)
|
||||
{
|
||||
const unsigned long addr = (unsigned long) iov->iov_base;
|
||||
const unsigned long len = iov->iov_len;
|
||||
|
@ -658,7 +658,7 @@ static void qib_user_sdma_free_pkt_frag(struct device *dev,
|
|||
static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
|
||||
struct qib_user_sdma_queue *pq,
|
||||
struct qib_user_sdma_pkt *pkt,
|
||||
unsigned long addr, int tlen, int npages)
|
||||
unsigned long addr, int tlen, size_t npages)
|
||||
{
|
||||
struct page *pages[8];
|
||||
int i, j;
|
||||
|
@ -722,7 +722,7 @@ static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
|
|||
unsigned long idx;
|
||||
|
||||
for (idx = 0; idx < niov; idx++) {
|
||||
const int npages = qib_user_sdma_num_pages(iov + idx);
|
||||
const size_t npages = qib_user_sdma_num_pages(iov + idx);
|
||||
const unsigned long addr = (unsigned long) iov[idx].iov_base;
|
||||
|
||||
ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr,
|
||||
|
@ -824,8 +824,8 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
|
|||
unsigned pktnw;
|
||||
unsigned pktnwc;
|
||||
int nfrags = 0;
|
||||
int npages = 0;
|
||||
int bytes_togo = 0;
|
||||
size_t npages = 0;
|
||||
size_t bytes_togo = 0;
|
||||
int tiddma = 0;
|
||||
int cfur;
|
||||
|
||||
|
@ -885,7 +885,11 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
|
|||
|
||||
npages += qib_user_sdma_num_pages(&iov[idx]);
|
||||
|
||||
bytes_togo += slen;
|
||||
if (check_add_overflow(bytes_togo, slen, &bytes_togo) ||
|
||||
bytes_togo > type_max(typeof(pkt->bytes_togo))) {
|
||||
ret = -EINVAL;
|
||||
goto free_pbc;
|
||||
}
|
||||
pktnwc += slen >> 2;
|
||||
idx++;
|
||||
nfrags++;
|
||||
|
@ -904,8 +908,7 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
|
|||
}
|
||||
|
||||
if (frag_size) {
|
||||
int tidsmsize, n;
|
||||
size_t pktsize;
|
||||
size_t tidsmsize, n, pktsize, sz, addrlimit;
|
||||
|
||||
n = npages*((2*PAGE_SIZE/frag_size)+1);
|
||||
pktsize = struct_size(pkt, addr, n);
|
||||
|
@ -923,14 +926,24 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
|
|||
else
|
||||
tidsmsize = 0;
|
||||
|
||||
pkt = kmalloc(pktsize+tidsmsize, GFP_KERNEL);
|
||||
if (check_add_overflow(pktsize, tidsmsize, &sz)) {
|
||||
ret = -EINVAL;
|
||||
goto free_pbc;
|
||||
}
|
||||
pkt = kmalloc(sz, GFP_KERNEL);
|
||||
if (!pkt) {
|
||||
ret = -ENOMEM;
|
||||
goto free_pbc;
|
||||
}
|
||||
pkt->largepkt = 1;
|
||||
pkt->frag_size = frag_size;
|
||||
pkt->addrlimit = n + ARRAY_SIZE(pkt->addr);
|
||||
if (check_add_overflow(n, ARRAY_SIZE(pkt->addr),
|
||||
&addrlimit) ||
|
||||
addrlimit > type_max(typeof(pkt->addrlimit))) {
|
||||
ret = -EINVAL;
|
||||
goto free_pbc;
|
||||
}
|
||||
pkt->addrlimit = addrlimit;
|
||||
|
||||
if (tiddma) {
|
||||
char *tidsm = (char *)pkt + pktsize;
|
||||
|
|
|
@ -1223,7 +1223,7 @@ int rvt_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
|
|||
spin_lock(&rdi->n_qps_lock);
|
||||
if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
|
||||
spin_unlock(&rdi->n_qps_lock);
|
||||
ret = ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto bail_ip;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue