Here is the first rc pull request for RDMA. This includes an important core
fix for a regression in iWarp if SELinux is enabled, a fix for a compilation regression introduced in this merge window, and one obscure kconfig combination that oops's the kernel. For drivers, we have hns fixes needed to make their devices work on certain ARM IOMMU configurations, a stack data leak for hfi1, and various testing discovered -rc bug fixes for i40iw. This cycle we pushed back on the driver maintainers to have better commit messages for -rc material. You may need to pull my latest PGP key from the GPG key servers for this, I am not certain if the subkey update will make it to kernel.org's WKD before you need it. -----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iQIcBAABCgAGBQJaJcu6AAoJEDht9xV+IJsa8aoP/Rh3tHT3mqN6v/p9HgUyUNRS gyDJ4BHg3A+O1UrnBjFjrAhpX9bqik/96n9t14Er7kVM9gxaDzOaCNd9+ASsKjDf fXRusCnKS5RP5CpQ16e6qurkOsBXghsJKTL+zpqGSmDf0yUBQCJUkmRNJNhiaUtW YEp92dfZytTK+iEmuXW4fJoIKWK3N5aOkttiK8BFb6XvmsUnWSp1wlBS2FhRzDq9 PPwfM2EE/x46dFF1/w04M5hVDPO6Bngq0Tvo+EdOlAMwKN3Zmun+fSOLKaxg44Of dyN6dsu5tKi200Nbdq6cBkehWL6CukSGdJnepeI+xW+8hve9Eu9O6j6O3pMb/dYn /vvqE14KhrR1B3F5LFkJLcxxKRl97S2uPhOY2j3oU4L93s9B4X6geXX2oLVIos1r 41YPu1/7OQyQffp4eKgsz4eA38TpdG6DoOlFMXgdIboJ8bASuRuyfLISVviMc8dx SKQTZTY54FK7uJMRw4rkcOlVUpJ2tyuVZr+Lt8p80IpnySCdJsEgAZkJngCPOKRT 2h8VdfFwzhdlf3Ni5tZRZdMtE6oMD5BMa0jri7xtyKYa0o3gUqvHDGMTSVlQ1maF qXMP2mApTcpdFuFvdbnxIeLzP8zigJVkvIsqeKHGS8gt+dxF/934rTY1NTj3rQN5 zmClIoiVg7NvHlDzvwg+ =YaUB -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma Pull rdma fixes from Jason Gunthorpe: "Here is the first rc pull request for RDMA. This includes an important core fix for a regression in iWarp if SELinux is enabled, a fix for a compilation regression introduced in this merge window, and one obscure kconfig combination that oops's the kernel. For drivers, we have hns fixes needed to make their devices work on certain ARM IOMMU configurations, a stack data leak for hfi1, and various testing discovered -rc bug fixes for i40iw. This cycle we pushed back on the driver maintainers to have better commit messages for -rc material" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: IB/core: Only enforce security for InfiniBand RDMA/hns: Get rid of page operation after dma_alloc_coherent RDMA/hns: Get rid of virt_to_page and vmap calls after dma_alloc_coherent RDMA/hns: Fix the issue of IOVA not page continuous in hip08 IB/core: Init subsys if compiled to vmlinuz-core RDMA/cma: Make sure that PSN is not over max allowed i40iw: Notify user of established connection after QP in RTS i40iw: Move MPA request event for loopback after connect i40iw: Correct ARP index mask i40iw: Do not free sqbuf when event is I40IW_TIMER_TYPE_CLOSE i40iw: Allocate a sdbuf per CQP WQE IB: INFINIBAND should depend on HAS_DMA IB/hfi1: Initialize bth1 in 16B rc ack builder
This commit is contained in:
commit
e6cdd80a83
|
@ -1,6 +1,6 @@
|
|||
menuconfig INFINIBAND
|
||||
tristate "InfiniBand support"
|
||||
depends on HAS_IOMEM
|
||||
depends on HAS_IOMEM && HAS_DMA
|
||||
depends on NET
|
||||
depends on INET
|
||||
depends on m || IPV6 != m
|
||||
|
|
|
@ -801,6 +801,7 @@ struct rdma_cm_id *rdma_create_id(struct net *net,
|
|||
INIT_LIST_HEAD(&id_priv->mc_list);
|
||||
get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
|
||||
id_priv->id.route.addr.dev_addr.net = get_net(net);
|
||||
id_priv->seq_num &= 0x00ffffff;
|
||||
|
||||
return &id_priv->id;
|
||||
}
|
||||
|
|
|
@ -1253,5 +1253,5 @@ static void __exit ib_core_cleanup(void)
|
|||
|
||||
MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4);
|
||||
|
||||
module_init(ib_core_init);
|
||||
subsys_initcall(ib_core_init);
|
||||
module_exit(ib_core_cleanup);
|
||||
|
|
|
@ -417,8 +417,17 @@ void ib_close_shared_qp_security(struct ib_qp_security *sec)
|
|||
|
||||
int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
|
||||
{
|
||||
u8 i = rdma_start_port(dev);
|
||||
bool is_ib = false;
|
||||
int ret;
|
||||
|
||||
while (i <= rdma_end_port(dev) && !is_ib)
|
||||
is_ib = rdma_protocol_ib(dev, i++);
|
||||
|
||||
/* If this isn't an IB device don't create the security context */
|
||||
if (!is_ib)
|
||||
return 0;
|
||||
|
||||
qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL);
|
||||
if (!qp->qp_sec)
|
||||
return -ENOMEM;
|
||||
|
@ -441,6 +450,10 @@ EXPORT_SYMBOL(ib_create_qp_security);
|
|||
|
||||
void ib_destroy_qp_security_begin(struct ib_qp_security *sec)
|
||||
{
|
||||
/* Return if not IB */
|
||||
if (!sec)
|
||||
return;
|
||||
|
||||
mutex_lock(&sec->mutex);
|
||||
|
||||
/* Remove the QP from the lists so it won't get added to
|
||||
|
@ -470,6 +483,10 @@ void ib_destroy_qp_security_abort(struct ib_qp_security *sec)
|
|||
int ret;
|
||||
int i;
|
||||
|
||||
/* Return if not IB */
|
||||
if (!sec)
|
||||
return;
|
||||
|
||||
/* If a concurrent cache update is in progress this
|
||||
* QP security could be marked for an error state
|
||||
* transition. Wait for this to complete.
|
||||
|
@ -505,6 +522,10 @@ void ib_destroy_qp_security_end(struct ib_qp_security *sec)
|
|||
{
|
||||
int i;
|
||||
|
||||
/* Return if not IB */
|
||||
if (!sec)
|
||||
return;
|
||||
|
||||
/* If a concurrent cache update is occurring we must
|
||||
* wait until this QP security structure is processed
|
||||
* in the QP to error flow before destroying it because
|
||||
|
@ -557,7 +578,7 @@ int ib_security_modify_qp(struct ib_qp *qp,
|
|||
{
|
||||
int ret = 0;
|
||||
struct ib_ports_pkeys *tmp_pps;
|
||||
struct ib_ports_pkeys *new_pps;
|
||||
struct ib_ports_pkeys *new_pps = NULL;
|
||||
struct ib_qp *real_qp = qp->real_qp;
|
||||
bool special_qp = (real_qp->qp_type == IB_QPT_SMI ||
|
||||
real_qp->qp_type == IB_QPT_GSI ||
|
||||
|
@ -565,18 +586,27 @@ int ib_security_modify_qp(struct ib_qp *qp,
|
|||
bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) ||
|
||||
(qp_attr_mask & IB_QP_ALT_PATH));
|
||||
|
||||
WARN_ONCE((qp_attr_mask & IB_QP_PORT &&
|
||||
rdma_protocol_ib(real_qp->device, qp_attr->port_num) &&
|
||||
!real_qp->qp_sec),
|
||||
"%s: QP security is not initialized for IB QP: %d\n",
|
||||
__func__, real_qp->qp_num);
|
||||
|
||||
/* The port/pkey settings are maintained only for the real QP. Open
|
||||
* handles on the real QP will be in the shared_qp_list. When
|
||||
* enforcing security on the real QP all the shared QPs will be
|
||||
* checked as well.
|
||||
*/
|
||||
|
||||
if (pps_change && !special_qp) {
|
||||
if (pps_change && !special_qp && real_qp->qp_sec) {
|
||||
mutex_lock(&real_qp->qp_sec->mutex);
|
||||
new_pps = get_new_pps(real_qp,
|
||||
qp_attr,
|
||||
qp_attr_mask);
|
||||
|
||||
if (!new_pps) {
|
||||
mutex_unlock(&real_qp->qp_sec->mutex);
|
||||
return -ENOMEM;
|
||||
}
|
||||
/* Add this QP to the lists for the new port
|
||||
* and pkey settings before checking for permission
|
||||
* in case there is a concurrent cache update
|
||||
|
@ -600,7 +630,7 @@ int ib_security_modify_qp(struct ib_qp *qp,
|
|||
qp_attr_mask,
|
||||
udata);
|
||||
|
||||
if (pps_change && !special_qp) {
|
||||
if (new_pps) {
|
||||
/* Clean up the lists and free the appropriate
|
||||
* ports_pkeys structure.
|
||||
*/
|
||||
|
@ -631,6 +661,9 @@ int ib_security_pkey_access(struct ib_device *dev,
|
|||
u16 pkey;
|
||||
int ret;
|
||||
|
||||
if (!rdma_protocol_ib(dev, port_num))
|
||||
return 0;
|
||||
|
||||
ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -665,6 +698,9 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (!rdma_protocol_ib(agent->device, agent->port_num))
|
||||
return 0;
|
||||
|
||||
ret = security_ib_alloc_security(&agent->security);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -690,6 +726,9 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
|
|||
|
||||
void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
|
||||
{
|
||||
if (!rdma_protocol_ib(agent->device, agent->port_num))
|
||||
return;
|
||||
|
||||
security_ib_free_security(agent->security);
|
||||
if (agent->lsm_nb_reg)
|
||||
unregister_lsm_notifier(&agent->lsm_nb);
|
||||
|
@ -697,6 +736,9 @@ void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
|
|||
|
||||
int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
|
||||
{
|
||||
if (!rdma_protocol_ib(map->agent.device, map->agent.port_num))
|
||||
return 0;
|
||||
|
||||
if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed)
|
||||
return -EACCES;
|
||||
|
||||
|
|
|
@ -814,7 +814,7 @@ static inline void hfi1_make_rc_ack_16B(struct rvt_qp *qp,
|
|||
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
|
||||
struct hfi1_16b_header *hdr = &opa_hdr->opah;
|
||||
struct ib_other_headers *ohdr;
|
||||
u32 bth0, bth1;
|
||||
u32 bth0, bth1 = 0;
|
||||
u16 len, pkey;
|
||||
u8 becn = !!is_fecn;
|
||||
u8 l4 = OPA_16B_L4_IB_LOCAL;
|
||||
|
|
|
@ -162,14 +162,10 @@ void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
|
|||
{
|
||||
int i;
|
||||
struct device *dev = hr_dev->dev;
|
||||
u32 bits_per_long = BITS_PER_LONG;
|
||||
|
||||
if (buf->nbufs == 1) {
|
||||
dma_free_coherent(dev, size, buf->direct.buf, buf->direct.map);
|
||||
} else {
|
||||
if (bits_per_long == 64 && buf->page_shift == PAGE_SHIFT)
|
||||
vunmap(buf->direct.buf);
|
||||
|
||||
for (i = 0; i < buf->nbufs; ++i)
|
||||
if (buf->page_list[i].buf)
|
||||
dma_free_coherent(dev, 1 << buf->page_shift,
|
||||
|
@ -185,9 +181,7 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
|
|||
{
|
||||
int i = 0;
|
||||
dma_addr_t t;
|
||||
struct page **pages;
|
||||
struct device *dev = hr_dev->dev;
|
||||
u32 bits_per_long = BITS_PER_LONG;
|
||||
u32 page_size = 1 << page_shift;
|
||||
u32 order;
|
||||
|
||||
|
@ -236,23 +230,6 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
|
|||
buf->page_list[i].map = t;
|
||||
memset(buf->page_list[i].buf, 0, page_size);
|
||||
}
|
||||
if (bits_per_long == 64 && page_shift == PAGE_SHIFT) {
|
||||
pages = kmalloc_array(buf->nbufs, sizeof(*pages),
|
||||
GFP_KERNEL);
|
||||
if (!pages)
|
||||
goto err_free;
|
||||
|
||||
for (i = 0; i < buf->nbufs; ++i)
|
||||
pages[i] = virt_to_page(buf->page_list[i].buf);
|
||||
|
||||
buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP,
|
||||
PAGE_KERNEL);
|
||||
kfree(pages);
|
||||
if (!buf->direct.buf)
|
||||
goto err_free;
|
||||
} else {
|
||||
buf->direct.buf = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -726,11 +726,9 @@ static inline struct hns_roce_qp
|
|||
|
||||
static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf, int offset)
|
||||
{
|
||||
u32 bits_per_long_val = BITS_PER_LONG;
|
||||
u32 page_size = 1 << buf->page_shift;
|
||||
|
||||
if ((bits_per_long_val == 64 && buf->page_shift == PAGE_SHIFT) ||
|
||||
buf->nbufs == 1)
|
||||
if (buf->nbufs == 1)
|
||||
return (char *)(buf->direct.buf) + offset;
|
||||
else
|
||||
return (char *)(buf->page_list[offset >> buf->page_shift].buf) +
|
||||
|
|
|
@ -224,6 +224,7 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
|
|||
sg_init_table(chunk->mem, HNS_ROCE_HEM_CHUNK_LEN);
|
||||
chunk->npages = 0;
|
||||
chunk->nsg = 0;
|
||||
memset(chunk->buf, 0, sizeof(chunk->buf));
|
||||
list_add_tail(&chunk->list, &hem->chunk_list);
|
||||
}
|
||||
|
||||
|
@ -240,8 +241,7 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
|
|||
if (!buf)
|
||||
goto fail;
|
||||
|
||||
sg_set_buf(mem, buf, PAGE_SIZE << order);
|
||||
WARN_ON(mem->offset);
|
||||
chunk->buf[chunk->npages] = buf;
|
||||
sg_dma_len(mem) = PAGE_SIZE << order;
|
||||
|
||||
++chunk->npages;
|
||||
|
@ -267,8 +267,8 @@ void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
|
|||
list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) {
|
||||
for (i = 0; i < chunk->npages; ++i)
|
||||
dma_free_coherent(hr_dev->dev,
|
||||
chunk->mem[i].length,
|
||||
lowmem_page_address(sg_page(&chunk->mem[i])),
|
||||
sg_dma_len(&chunk->mem[i]),
|
||||
chunk->buf[i],
|
||||
sg_dma_address(&chunk->mem[i]));
|
||||
kfree(chunk);
|
||||
}
|
||||
|
@ -722,11 +722,12 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
|
|||
struct hns_roce_hem_chunk *chunk;
|
||||
struct hns_roce_hem_mhop mhop;
|
||||
struct hns_roce_hem *hem;
|
||||
struct page *page = NULL;
|
||||
void *addr = NULL;
|
||||
unsigned long mhop_obj = obj;
|
||||
unsigned long obj_per_chunk;
|
||||
unsigned long idx_offset;
|
||||
int offset, dma_offset;
|
||||
int length;
|
||||
int i, j;
|
||||
u32 hem_idx = 0;
|
||||
|
||||
|
@ -763,25 +764,25 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
|
|||
|
||||
list_for_each_entry(chunk, &hem->chunk_list, list) {
|
||||
for (i = 0; i < chunk->npages; ++i) {
|
||||
length = sg_dma_len(&chunk->mem[i]);
|
||||
if (dma_handle && dma_offset >= 0) {
|
||||
if (sg_dma_len(&chunk->mem[i]) >
|
||||
(u32)dma_offset)
|
||||
if (length > (u32)dma_offset)
|
||||
*dma_handle = sg_dma_address(
|
||||
&chunk->mem[i]) + dma_offset;
|
||||
dma_offset -= sg_dma_len(&chunk->mem[i]);
|
||||
dma_offset -= length;
|
||||
}
|
||||
|
||||
if (chunk->mem[i].length > (u32)offset) {
|
||||
page = sg_page(&chunk->mem[i]);
|
||||
if (length > (u32)offset) {
|
||||
addr = chunk->buf[i] + offset;
|
||||
goto out;
|
||||
}
|
||||
offset -= chunk->mem[i].length;
|
||||
offset -= length;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&table->mutex);
|
||||
return page ? lowmem_page_address(page) + offset : NULL;
|
||||
return addr;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hns_roce_table_find);
|
||||
|
||||
|
|
|
@ -78,6 +78,7 @@ struct hns_roce_hem_chunk {
|
|||
int npages;
|
||||
int nsg;
|
||||
struct scatterlist mem[HNS_ROCE_HEM_CHUNK_LEN];
|
||||
void *buf[HNS_ROCE_HEM_CHUNK_LEN];
|
||||
};
|
||||
|
||||
struct hns_roce_hem {
|
||||
|
|
|
@ -1126,9 +1126,11 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
|
|||
{
|
||||
struct hns_roce_v2_mpt_entry *mpt_entry;
|
||||
struct scatterlist *sg;
|
||||
u64 page_addr;
|
||||
u64 *pages;
|
||||
int i, j;
|
||||
int len;
|
||||
int entry;
|
||||
int i;
|
||||
|
||||
mpt_entry = mb_buf;
|
||||
memset(mpt_entry, 0, sizeof(*mpt_entry));
|
||||
|
@ -1186,14 +1188,20 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
|
|||
|
||||
i = 0;
|
||||
for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
|
||||
pages[i] = ((u64)sg_dma_address(sg)) >> 6;
|
||||
len = sg_dma_len(sg) >> PAGE_SHIFT;
|
||||
for (j = 0; j < len; ++j) {
|
||||
page_addr = sg_dma_address(sg) +
|
||||
(j << mr->umem->page_shift);
|
||||
pages[i] = page_addr >> 6;
|
||||
|
||||
/* Record the first 2 entry directly to MTPT table */
|
||||
if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
|
||||
break;
|
||||
i++;
|
||||
/* Record the first 2 entry directly to MTPT table */
|
||||
if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
|
||||
goto found;
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
found:
|
||||
mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
|
||||
roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
|
||||
V2_MPT_BYTE_56_PA0_H_S,
|
||||
|
|
|
@ -1043,7 +1043,7 @@ negotiate_done:
|
|||
* i40iw_schedule_cm_timer
|
||||
* @@cm_node: connection's node
|
||||
* @sqbuf: buffer to send
|
||||
* @type: if it es send ot close
|
||||
* @type: if it is send or close
|
||||
* @send_retrans: if rexmits to be done
|
||||
* @close_when_complete: is cm_node to be removed
|
||||
*
|
||||
|
@ -1067,7 +1067,8 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
|
|||
|
||||
new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
|
||||
if (!new_send) {
|
||||
i40iw_free_sqbuf(vsi, (void *)sqbuf);
|
||||
if (type != I40IW_TIMER_TYPE_CLOSE)
|
||||
i40iw_free_sqbuf(vsi, (void *)sqbuf);
|
||||
return -ENOMEM;
|
||||
}
|
||||
new_send->retrycount = I40IW_DEFAULT_RETRYS;
|
||||
|
@ -1082,7 +1083,6 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
|
|||
new_send->timetosend += (HZ / 10);
|
||||
if (cm_node->close_entry) {
|
||||
kfree(new_send);
|
||||
i40iw_free_sqbuf(vsi, (void *)sqbuf);
|
||||
i40iw_pr_err("already close entry\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -2947,8 +2947,6 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
|
|||
loopback_remotenode->tcp_cntxt.snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
|
||||
cm_node->tcp_cntxt.snd_wscale = loopback_remotenode->tcp_cntxt.rcv_wscale;
|
||||
loopback_remotenode->tcp_cntxt.snd_wscale = cm_node->tcp_cntxt.rcv_wscale;
|
||||
loopback_remotenode->state = I40IW_CM_STATE_MPAREQ_RCVD;
|
||||
i40iw_create_event(loopback_remotenode, I40IW_CM_EVENT_MPA_REQ);
|
||||
}
|
||||
return cm_node;
|
||||
}
|
||||
|
@ -3689,11 +3687,16 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|||
cm_id->add_ref(cm_id);
|
||||
i40iw_add_ref(&iwqp->ibqp);
|
||||
|
||||
i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0);
|
||||
|
||||
attr.qp_state = IB_QPS_RTS;
|
||||
cm_node->qhash_set = false;
|
||||
i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
|
||||
|
||||
cm_node->accelerated = 1;
|
||||
status =
|
||||
i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0);
|
||||
if (status)
|
||||
i40iw_debug(dev, I40IW_DEBUG_CM, "error sending cm event - ESTABLISHED\n");
|
||||
|
||||
if (cm_node->loopbackpartner) {
|
||||
cm_node->loopbackpartner->pdata.size = conn_param->private_data_len;
|
||||
|
||||
|
@ -3704,7 +3707,6 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|||
i40iw_create_event(cm_node->loopbackpartner, I40IW_CM_EVENT_CONNECTED);
|
||||
}
|
||||
|
||||
cm_node->accelerated = 1;
|
||||
if (cm_node->accept_pend) {
|
||||
atomic_dec(&cm_node->listener->pend_accepts_cnt);
|
||||
cm_node->accept_pend = 0;
|
||||
|
@ -3864,6 +3866,12 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
|||
goto err;
|
||||
}
|
||||
|
||||
if (cm_node->loopbackpartner) {
|
||||
cm_node->loopbackpartner->state = I40IW_CM_STATE_MPAREQ_RCVD;
|
||||
i40iw_create_event(cm_node->loopbackpartner,
|
||||
I40IW_CM_EVENT_MPA_REQ);
|
||||
}
|
||||
|
||||
i40iw_debug(cm_node->dev,
|
||||
I40IW_DEBUG_CM,
|
||||
"Api - connect(): port=0x%04x, cm_node=%p, cm_id = %p.\n",
|
||||
|
@ -4044,9 +4052,6 @@ static void i40iw_cm_event_connected(struct i40iw_cm_event *event)
|
|||
dev->iw_priv_qp_ops->qp_send_rtt(&iwqp->sc_qp, read0);
|
||||
if (iwqp->page)
|
||||
kunmap(iwqp->page);
|
||||
status = i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY, 0);
|
||||
if (status)
|
||||
i40iw_pr_err("send cm event\n");
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.qp_state = IB_QPS_RTS;
|
||||
|
@ -4054,6 +4059,10 @@ static void i40iw_cm_event_connected(struct i40iw_cm_event *event)
|
|||
i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
|
||||
|
||||
cm_node->accelerated = 1;
|
||||
status = i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY,
|
||||
0);
|
||||
if (status)
|
||||
i40iw_debug(dev, I40IW_DEBUG_CM, "error sending cm event - CONNECT_REPLY\n");
|
||||
|
||||
return;
|
||||
|
||||
|
|
|
@ -513,7 +513,7 @@ static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp,
|
|||
|
||||
ret_code = i40iw_allocate_dma_mem(cqp->dev->hw,
|
||||
&cqp->sdbuf,
|
||||
128,
|
||||
I40IW_UPDATE_SD_BUF_SIZE * cqp->sq_size,
|
||||
I40IW_SD_BUF_ALIGNMENT);
|
||||
|
||||
if (ret_code)
|
||||
|
@ -596,14 +596,15 @@ void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp)
|
|||
}
|
||||
|
||||
/**
|
||||
* i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
|
||||
* @cqp: struct for cqp hw
|
||||
* @wqe_idx: we index of cqp ring
|
||||
* i40iw_sc_cqp_get_next_send_wqe_idx - get next WQE on CQP SQ and pass back the index
|
||||
* @cqp: pointer to CQP structure
|
||||
* @scratch: private data for CQP WQE
|
||||
* @wqe_idx: WQE index for next WQE on CQP SQ
|
||||
*/
|
||||
u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
|
||||
static u64 *i40iw_sc_cqp_get_next_send_wqe_idx(struct i40iw_sc_cqp *cqp,
|
||||
u64 scratch, u32 *wqe_idx)
|
||||
{
|
||||
u64 *wqe = NULL;
|
||||
u32 wqe_idx;
|
||||
enum i40iw_status_code ret_code;
|
||||
|
||||
if (I40IW_RING_FULL_ERR(cqp->sq_ring)) {
|
||||
|
@ -616,20 +617,32 @@ u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
|
|||
cqp->sq_ring.size);
|
||||
return NULL;
|
||||
}
|
||||
I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, wqe_idx, ret_code);
|
||||
I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code);
|
||||
cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS]++;
|
||||
if (ret_code)
|
||||
return NULL;
|
||||
if (!wqe_idx)
|
||||
if (!*wqe_idx)
|
||||
cqp->polarity = !cqp->polarity;
|
||||
|
||||
wqe = cqp->sq_base[wqe_idx].elem;
|
||||
cqp->scratch_array[wqe_idx] = scratch;
|
||||
wqe = cqp->sq_base[*wqe_idx].elem;
|
||||
cqp->scratch_array[*wqe_idx] = scratch;
|
||||
I40IW_CQP_INIT_WQE(wqe);
|
||||
|
||||
return wqe;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
|
||||
* @cqp: struct for cqp hw
|
||||
* @scratch: private data for CQP WQE
|
||||
*/
|
||||
u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
|
||||
{
|
||||
u32 wqe_idx;
|
||||
|
||||
return i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_sc_cqp_destroy - destroy cqp during close
|
||||
* @cqp: struct for cqp hw
|
||||
|
@ -3587,8 +3600,10 @@ static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp,
|
|||
u64 *wqe;
|
||||
int mem_entries, wqe_entries;
|
||||
struct i40iw_dma_mem *sdbuf = &cqp->sdbuf;
|
||||
u64 offset;
|
||||
u32 wqe_idx;
|
||||
|
||||
wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
|
||||
wqe = i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
|
||||
if (!wqe)
|
||||
return I40IW_ERR_RING_FULL;
|
||||
|
||||
|
@ -3601,8 +3616,10 @@ static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp,
|
|||
LS_64(mem_entries, I40IW_CQPSQ_UPESD_ENTRY_COUNT);
|
||||
|
||||
if (mem_entries) {
|
||||
memcpy(sdbuf->va, &info->entry[3], (mem_entries << 4));
|
||||
data = sdbuf->pa;
|
||||
offset = wqe_idx * I40IW_UPDATE_SD_BUF_SIZE;
|
||||
memcpy((char *)sdbuf->va + offset, &info->entry[3],
|
||||
mem_entries << 4);
|
||||
data = (u64)sdbuf->pa + offset;
|
||||
} else {
|
||||
data = 0;
|
||||
}
|
||||
|
|
|
@ -1114,7 +1114,7 @@
|
|||
#define I40IWQPC_VLANTAG_MASK (0xffffULL << I40IWQPC_VLANTAG_SHIFT)
|
||||
|
||||
#define I40IWQPC_ARPIDX_SHIFT 48
|
||||
#define I40IWQPC_ARPIDX_MASK (0xfffULL << I40IWQPC_ARPIDX_SHIFT)
|
||||
#define I40IWQPC_ARPIDX_MASK (0xffffULL << I40IWQPC_ARPIDX_SHIFT)
|
||||
|
||||
#define I40IWQPC_FLOWLABEL_SHIFT 0
|
||||
#define I40IWQPC_FLOWLABEL_MASK (0xfffffUL << I40IWQPC_FLOWLABEL_SHIFT)
|
||||
|
@ -1526,7 +1526,7 @@ enum i40iw_alignment {
|
|||
I40IW_AEQ_ALIGNMENT = 0x100,
|
||||
I40IW_CEQ_ALIGNMENT = 0x100,
|
||||
I40IW_CQ0_ALIGNMENT = 0x100,
|
||||
I40IW_SD_BUF_ALIGNMENT = 0x100
|
||||
I40IW_SD_BUF_ALIGNMENT = 0x80
|
||||
};
|
||||
|
||||
#define I40IW_WQE_SIZE_64 64
|
||||
|
@ -1534,6 +1534,8 @@ enum i40iw_alignment {
|
|||
#define I40IW_QP_WQE_MIN_SIZE 32
|
||||
#define I40IW_QP_WQE_MAX_SIZE 128
|
||||
|
||||
#define I40IW_UPDATE_SD_BUF_SIZE 128
|
||||
|
||||
#define I40IW_CQE_QTYPE_RQ 0
|
||||
#define I40IW_CQE_QTYPE_SQ 1
|
||||
|
||||
|
|
Loading…
Reference in New Issue