RDMA 5.12 third rc pull request
Several bug fixes: - Regression from the last pull request in cxgb4 related to the ipv6 fixes - KASAN crasher in rtrs - oops in hfi1 related to a buggy BIOS - Userspace could oops qedr's XRC support - Uninitialized memory when parsing a LS_NLA_TYPE_DGID netlink message -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEfB7FMLh+8QxL+6i3OG33FX4gmxoFAmBvWaMACgkQOG33FX4g mxpuKQ/+LY+nYwY+DGAbg1OyzFzoYn8QQaxO6lVbvPNjHhM8A94B/c6uuSMvZBvn oPaRuIHlcx77szrWpjNCmVM16SjUKRqUohAdMNFqv83Q4yFptIDpdqT6iuud6BXw kTjKQv14TaqnFDyRQ8L48wg+xoifiCdCUt7ajQ+6GEIa+wqjuRUdSoaH/Tg4N5o5 q+gYgbDY0eiZBmP10KtD25nBZjuKV8FkvTLxN6Zod1CuGrBBbnMLfeD/NZMGZQEb pWkcVYBf+iW88LBLtY0X7KaGSVAE2m8Y8mxZgxCg6HFfbxNZN3VVSTLv4BuGPTy4 3Y1TQLlRuejiuzKytS8N+34vQlj2/6O/4amHRGTXV05TyehllHSgzP2ZCQWWy7Os IyelRAnl5sFGLpjcabQ+WZ3tOO84AbhaziEiSka7+/4g2c7GW4WJrOn7KbrlcImL vl2fUoHzwn0ObcEWomHgjCaPSQb+VimrHDXTVMLRd8voxoM8r6KWTDv0Y4fg4nm9 Wym3iYzvaMizsc7v1AotJUXu5kJKV44apr/JnjaTs+RQ2KVOxjx4h/PDFbY06A2r 5v8+9CMgMKqQldqfQALi7lgyx+/pG0xLW3Rx8r30jCePVN1ulPj54onijHHKrd0K RROGDoEHSIxuPrmnTU8a5NM65eTGN6+BGbxwVTFL1QBqRrSl7AY= =UaHF -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma Pull rdma fixes from Jason Gunthorpe: "Nothing very exciting here, just a few small bug fixes. No red flags for this release have shown up. - Regression from the last pull request in cxgb4 related to the ipv6 fixes - KASAN crasher in rtrs - oops in hfi1 related to a buggy BIOS - Userspace could oops qedr's XRC support - Uninitialized memory when parsing a LS_NLA_TYPE_DGID netlink message" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: RDMA/addr: Be strict with gid size RDMA/qedr: Fix kernel panic when trying to access recv_cq IB/hfi1: Fix probe time panic when AIP is enabled with a buggy BIOS RDMA/cxgb4: check for ipv6 address properly while destroying listener RDMA/rtrs-clt: Close rtrs client conn before destroying rtrs clt session files
This commit is contained in:
commit
4fa56ad0d1
|
@ -76,7 +76,9 @@ static struct workqueue_struct *addr_wq;
|
|||
|
||||
static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = {
|
||||
[LS_NLA_TYPE_DGID] = {.type = NLA_BINARY,
|
||||
.len = sizeof(struct rdma_nla_ls_gid)},
|
||||
.len = sizeof(struct rdma_nla_ls_gid),
|
||||
.validation_type = NLA_VALIDATE_MIN,
|
||||
.min = sizeof(struct rdma_nla_ls_gid)},
|
||||
};
|
||||
|
||||
static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh)
|
||||
|
|
|
@ -3616,7 +3616,8 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
|
|||
c4iw_init_wr_wait(ep->com.wr_waitp);
|
||||
err = cxgb4_remove_server(
|
||||
ep->com.dev->rdev.lldi.ports[0], ep->stid,
|
||||
ep->com.dev->rdev.lldi.rxq_ids[0], true);
|
||||
ep->com.dev->rdev.lldi.rxq_ids[0],
|
||||
ep->com.local_addr.ss_family == AF_INET6);
|
||||
if (err)
|
||||
goto done;
|
||||
err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp,
|
||||
|
|
|
@ -632,22 +632,11 @@ static void _dev_comp_vect_cpu_mask_clean_up(struct hfi1_devdata *dd,
|
|||
*/
|
||||
int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
|
||||
{
|
||||
int node = pcibus_to_node(dd->pcidev->bus);
|
||||
struct hfi1_affinity_node *entry;
|
||||
const struct cpumask *local_mask;
|
||||
int curr_cpu, possible, i, ret;
|
||||
bool new_entry = false;
|
||||
|
||||
/*
|
||||
* If the BIOS does not have the NUMA node information set, select
|
||||
* NUMA 0 so we get consistent performance.
|
||||
*/
|
||||
if (node < 0) {
|
||||
dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n");
|
||||
node = 0;
|
||||
}
|
||||
dd->node = node;
|
||||
|
||||
local_mask = cpumask_of_node(dd->node);
|
||||
if (cpumask_first(local_mask) >= nr_cpu_ids)
|
||||
local_mask = topology_core_cpumask(0);
|
||||
|
@ -660,7 +649,7 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
|
|||
* create an entry in the global affinity structure and initialize it.
|
||||
*/
|
||||
if (!entry) {
|
||||
entry = node_affinity_allocate(node);
|
||||
entry = node_affinity_allocate(dd->node);
|
||||
if (!entry) {
|
||||
dd_dev_err(dd,
|
||||
"Unable to allocate global affinity node\n");
|
||||
|
@ -751,6 +740,7 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
|
|||
if (new_entry)
|
||||
node_affinity_add_tail(entry);
|
||||
|
||||
dd->affinity_entry = entry;
|
||||
mutex_unlock(&node_affinity.lock);
|
||||
|
||||
return 0;
|
||||
|
@ -766,10 +756,9 @@ void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd)
|
|||
{
|
||||
struct hfi1_affinity_node *entry;
|
||||
|
||||
if (dd->node < 0)
|
||||
return;
|
||||
|
||||
mutex_lock(&node_affinity.lock);
|
||||
if (!dd->affinity_entry)
|
||||
goto unlock;
|
||||
entry = node_affinity_lookup(dd->node);
|
||||
if (!entry)
|
||||
goto unlock;
|
||||
|
@ -780,8 +769,8 @@ void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd)
|
|||
*/
|
||||
_dev_comp_vect_cpu_mask_clean_up(dd, entry);
|
||||
unlock:
|
||||
dd->affinity_entry = NULL;
|
||||
mutex_unlock(&node_affinity.lock);
|
||||
dd->node = NUMA_NO_NODE;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -1409,6 +1409,7 @@ struct hfi1_devdata {
|
|||
spinlock_t irq_src_lock;
|
||||
int vnic_num_vports;
|
||||
struct net_device *dummy_netdev;
|
||||
struct hfi1_affinity_node *affinity_entry;
|
||||
|
||||
/* Keeps track of IPoIB RSM rule users */
|
||||
atomic_t ipoib_rsm_usr_num;
|
||||
|
|
|
@ -1277,7 +1277,6 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
|
|||
dd->pport = (struct hfi1_pportdata *)(dd + 1);
|
||||
dd->pcidev = pdev;
|
||||
pci_set_drvdata(pdev, dd);
|
||||
dd->node = NUMA_NO_NODE;
|
||||
|
||||
ret = xa_alloc_irq(&hfi1_dev_table, &dd->unit, dd, xa_limit_32b,
|
||||
GFP_KERNEL);
|
||||
|
@ -1287,6 +1286,15 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
|
|||
goto bail;
|
||||
}
|
||||
rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit);
|
||||
/*
|
||||
* If the BIOS does not have the NUMA node information set, select
|
||||
* NUMA 0 so we get consistent performance.
|
||||
*/
|
||||
dd->node = pcibus_to_node(pdev->bus);
|
||||
if (dd->node == NUMA_NO_NODE) {
|
||||
dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n");
|
||||
dd->node = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize all locks for the device. This needs to be as early as
|
||||
|
|
|
@ -173,8 +173,7 @@ u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts,
|
|||
return 0;
|
||||
}
|
||||
|
||||
cpumask_and(node_cpu_mask, cpu_mask,
|
||||
cpumask_of_node(pcibus_to_node(dd->pcidev->bus)));
|
||||
cpumask_and(node_cpu_mask, cpu_mask, cpumask_of_node(dd->node));
|
||||
|
||||
available_cpus = cpumask_weight(node_cpu_mask);
|
||||
|
||||
|
|
|
@ -1244,7 +1244,8 @@ static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
|
|||
* TGT QP isn't associated with RQ/SQ
|
||||
*/
|
||||
if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created) &&
|
||||
(attrs->qp_type != IB_QPT_XRC_TGT)) {
|
||||
(attrs->qp_type != IB_QPT_XRC_TGT) &&
|
||||
(attrs->qp_type != IB_QPT_XRC_INI)) {
|
||||
struct qedr_cq *send_cq = get_qedr_cq(attrs->send_cq);
|
||||
struct qedr_cq *recv_cq = get_qedr_cq(attrs->recv_cq);
|
||||
|
||||
|
|
|
@ -2720,8 +2720,8 @@ void rtrs_clt_close(struct rtrs_clt *clt)
|
|||
|
||||
/* Now it is safe to iterate over all paths without locks */
|
||||
list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
|
||||
rtrs_clt_destroy_sess_files(sess, NULL);
|
||||
rtrs_clt_close_conns(sess, true);
|
||||
rtrs_clt_destroy_sess_files(sess, NULL);
|
||||
kobject_put(&sess->kobj);
|
||||
}
|
||||
free_clt(clt);
|
||||
|
|
Loading…
Reference in New Issue